数据持久化(数据保存)
1. Excel
创建数据表
import openpyxl
work_book = openpyxl. Workbook( )
sheet1 = work_book. create_sheet( '表1' )
sheet1 = work_book. active
sheet1[ 'A1' ] = 'A1'
sheet1[ 'B7' ] = 'B7'
sheet1. cell( row= 1 , column= 1 ) . value = '111111'
sheet1. cell( row= 2 , column= 2 ) . value = '222222'
data1 = ( 1 , 2 , 3 , 4 , 5 )
sheet1. append( data1)
work_book. save( '实例.xlsx' )
批量数据写入
import openpyxl
work = openpyxl. Workbook( )
sheet1 = work. active
for i in range ( 1 , 10 ) :
for j in range ( 1 , i + 1 ) :
print ( f' { j} x { i} = { j * i} ' , end= '\t' )
sheet1. cell( row= i, column= j) . value = f' { j} x { i} = { j * i} '
print ( )
work. save( '实例.xlsx' )
读取表格数据
import openpyxl
workbook = openpyxl. load_workbook( '实例.xlsx' )
print ( workbook. sheetnames)
sheet = workbook[ 'Sheet' ]
print ( sheet. max_row)
print ( sheet. max_column)
for i in range ( 1 , sheet. max_column + 1 ) :
print ( sheet. cell( row= 1 , column= i) . value)
for j in range ( 1 , sheet. max_row + 1 ) :
print ( sheet. cell( row= j, column= 1 ) . value)
for i in range ( 1 , sheet. max_column + 1 ) :
for j in range ( 1 , sheet. max_row + 1 ) :
print ( sheet. cell( row= i, column= j) . value)
案例 - 豆瓣保存 Excel
import parsel
import requests
import openpyxl
work = openpyxl. Workbook( )
sheet1 = work. active
sheet1. append( [ '标题' , '简介' , '评分' , '评价人数' ] )
for page in range ( 0 , 226 , 25 ) :
url = f'https://movie.douban.com/top250?start= { page} &filter='
headers = {
'Cookie' : 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw' ,
'Host' : 'movie.douban.com' ,
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' ,
}
response = requests. get( url= url, headers= headers)
html_data = response. text
"""解析数据"""
selector = parsel. Selector( html_data)
lis = selector. css( '.grid_view>li' )
for li in lis:
title = li. css( '.hd>a>span:nth-child(1)::text' ) . get( )
info = li. css( '.bd>p:nth-child(1)::text' ) . getall( )
info = '//' . join( [ i. strip( ) for i in info] )
score = li. css( '.rating_num::text' ) . get( )
follow = li. css( '.star>span:nth-child(4)::text' ) . get( )
print ( title, info, score, follow)
sheet1. append( [ title, info, score, follow] )
print ( '=' * 100 + '\n' )
work. save( 'douban.xlsx' )
案例 - 网易新闻Excel保存
"""
目标站点:https://news.163.com/
往下翻有 "要闻" 这个新闻类目, 找不到可以 Ctrl + F 搜索下
需求:
爬取网易新闻 "要闻" 类目第一页数据,将数据保存为 Excel 表格
保存字段需要以下内容
title
channelname
docurl
imgurl
source
tlink
"""
import json
import re
import requests
import openpyxl
url = 'https://news.163.com/special/cm_yaowen20200213/?callback=data_callback'
response = requests. get( url= url)
json_data = response. text
result = re. findall( 'data_callback\((.*?)\)' , json_data, re. S)
item_json = json. loads( result[ 0 ] )
work = openpyxl. Workbook( )
sheet1 = work. active
sheet1. append( [ 'title' , 'channelname' , 'docurl' , 'imgurl' , 'source' , 'tlink' ] )
for item in item_json:
title = item[ 'title' ]
channelname = item[ 'channelname' ]
docurl = item[ 'docurl' ]
imgurl = item[ 'imgurl' ]
source = item[ 'source' ]
tlink = item[ 'tlink' ]
print ( title, channelname, docurl, imgurl, source, tlink, sep= ' | ' )
sheet1. append( [ title, channelname, docurl, imgurl, source, tlink] )
work. save( '网易新闻.xlsx' )
2. Json
数据序列化和反序列化
import json
data = {
'name' : 'ACME' ,
'shares' : 100 ,
'price' : 542.23
}
"""
json序列化: 将对象转化成json字符串
dumps() 序列化json字符串
"""
json_str = json. dumps( data)
print ( json_str)
print ( type ( json_str) )
"""
json反序列化: 将json字符串转化成对象
dumps() 序列化json字符串
"""
json_obj = json. loads( json_str)
print ( json_obj)
print ( type ( json_obj) )
中文指定
import json
data = {
'name' : '青灯' ,
'shares' : 100 ,
'price' : 542.23
}
json_str = json. dumps( data, ensure_ascii= False )
with open ( 'data.json' , mode= 'w' , encoding= 'utf-8' ) as f:
f. write( json_str)
案例 - 豆瓣保存Json
import json
import parsel
import requests
import openpyxl
data = [ ]
for page in range ( 0 , 226 , 25 ) :
url = f'https://movie.douban.com/top250?start= { page} &filter='
headers = {
'Cookie' : 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw' ,
'Host' : 'movie.douban.com' ,
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' ,
}
response = requests. get( url= url, headers= headers)
html_data = response. text
"""解析数据"""
selector = parsel. Selector( html_data)
lis = selector. css( '.grid_view>li' )
for li in lis:
title = li. css( '.hd>a>span:nth-child(1)::text' ) . get( )
info = li. css( '.bd>p:nth-child(1)::text' ) . getall( )
info = '//' . join( [ i. strip( ) for i in info] )
score = li. css( '.rating_num::text' ) . get( )
follow = li. css( '.star>span:nth-child(4)::text' ) . get( )
d = { 'title' : title, 'info' : info, 'score' : score, 'follow' : follow}
data. append( d)
print ( data)
json_str = json. dumps( data, ensure_ascii= False )
with open ( 'douban.json' , mode= 'w' , encoding= 'utf-8' ) as f:
f. write( json_str)
案例 - Json保存
"""
目标网址:https://www.ku6.com/video/feed?pageNo=0&pageSize=40&subjectId=76
请求方式: GET
要求:
1、请求上述网址的数据
2、把获取到的数据保存到json文件中
文件命名: data.json
需要在文件中看到json字符串
请在下方编写代码
"""
import requests
url = 'https://www.ku6.com/video/feed?pageNo=0&pageSize=40&subjectId=76'
response = requests. get( url= url)
json_data = response. text
print ( json_data)
with open ( 'data.json' , mode= 'w' , encoding= 'utf-8' ) as f:
f. write( json_data)
3. Csv
写入csv列表数据
"""
csv数据格式:
每一行是一条数据
每一行中每个数据字段有分隔符号, 默认为逗号
"""
import csv
data = [
[ 1 , 2 , 3 , 4 ] ,
[ 1 , 2 , 3 , 4 ] ,
[ 5 , 6 , 7 , 8 ] ,
[ 5 , 6 , 7 , 8 ]
]
with open ( 'data.csv' , mode= 'a' , encoding= 'utf-8' , newline= '' ) as f:
csv_write = csv. writer( f)
for i in data:
csv_write. writerow( i)
案例 - 豆瓣列表保存Csv
import csv
import json
import parsel
import requests
import openpyxl
with open ( 'douban-list.csv' , mode= 'a' , encoding= 'utf-8' , newline= '' ) as f:
csv_write = csv. writer( f)
f. write( '标题,简介,平分,评论人数\n' )
for page in range ( 0 , 226 , 25 ) :
url = f'https://movie.douban.com/top250?start= { page} &filter='
headers = {
'Cookie' : 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw' ,
'Host' : 'movie.douban.com' ,
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' ,
}
response = requests. get( url= url, headers= headers)
html_data = response. text
"""解析数据"""
selector = parsel. Selector( html_data)
lis = selector. css( '.grid_view>li' )
for li in lis:
title = li. css( '.hd>a>span:nth-child(1)::text' ) . get( )
info = li. css( '.bd>p:nth-child(1)::text' ) . getall( )
info = '//' . join( [ i. strip( ) for i in info] )
score = li. css( '.rating_num::text' ) . get( )
follow = li. css( '.star>span:nth-child(4)::text' ) . get( )
print ( title, info, score, follow)
csv_write. writerow( [ title, info, score, follow] )
print ( '=' * 100 + '\n' )
写入csv字典数据
"""
csv数据格式:
每一行是一条数据
每一行中每个数据字段有分隔符号, 默认为逗号
"""
import csv
list_dict = [ { 'first_name' : 'Baked' , 'last_name' : 'Beans' } ,
{ 'first_name' : 'Lovely' } ,
{ 'first_name' : 'Wonderful' , 'last_name' : 'Spam' } ]
with open ( 'data.csv' , mode= 'a' , encoding= 'utf-8' , newline= '' ) as f:
csv_write = csv. DictWriter( f, fieldnames= [ 'first_name' , 'last_name' ] )
csv_write. writeheader( )
for i in list_dict:
csv_write. writerow( i)
案例 - 豆瓣字典保存csv
import csv
import json
import parsel
import requests
import openpyxl
with open ( 'douban-dict.csv' , mode= 'a' , encoding= 'utf-8' , newline= '' ) as f:
csv_write = csv. DictWriter( f, fieldnames= [ 'title' , 'info' , 'score' , 'follow' ] )
csv_write. writeheader( )
for page in range ( 0 , 226 , 25 ) :
url = f'https://movie.douban.com/top250?start= { page} &filter='
headers = {
'Cookie' : 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw' ,
'Host' : 'movie.douban.com' ,
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' ,
}
response = requests. get( url= url, headers= headers)
html_data = response. text
"""解析数据"""
selector = parsel. Selector( html_data)
lis = selector. css( '.grid_view>li' )
for li in lis:
title = li. css( '.hd>a>span:nth-child(1)::text' ) . get( )
info = li. css( '.bd>p:nth-child(1)::text' ) . getall( )
info = '//' . join( [ i. strip( ) for i in info] )
score = li. css( '.rating_num::text' ) . get( )
follow = li. css( '.star>span:nth-child(4)::text' ) . get( )
print ( title, info, score, follow)
d = { 'title' : title, 'info' : info, 'score' : score, 'follow' : follow}
csv_write. writerow( d)
print ( '=' * 100 + '\n' )
读取csv数据
import csv
"""基于字符串文件类型直接读取"""
"""读取返回列表"""
"""读取返回字典对象的方法"""
with open ( 'douban-list.csv' , mode= 'r' , encoding= 'utf-8' ) as f:
csv_read = csv. DictReader( f)
print ( csv_read)
for i in csv_read:
print ( i)
案例 - 网易新闻csv
"""
目标站点:https://news.163.com/
往下翻有 要闻 这个新闻类目
需求:
爬取网易新闻 要闻 类目第一页数据,将数据保存为csv格式
保存字段需要以下内容
title
channelname
docurl
imgurl
source
tlink
"""
import csv
import json
import re
import requests
import openpyxl
url = 'https://news.163.com/special/cm_yaowen20200213/?callback=data_callback'
response = requests. get( url= url)
json_data = response. text
result = re. findall( 'data_callback\((.*?)\)' , json_data, re. S)
item_json = json. loads( result[ 0 ] )
with open ( '网易新闻.csv' , mode= 'a' , encoding= 'utf-8' , newline= '' ) as f:
write = csv. writer( f)
write. writerow( [ 'title' , 'channelname' , 'docurl' , 'imgurl' , 'source' , 'tlink' ] )
for item in item_json:
title = item[ 'title' ]
channelname = item[ 'channelname' ]
docurl = item[ 'docurl' ]
imgurl = item[ 'imgurl' ]
source = item[ 'source' ]
tlink = item[ 'tlink' ]
print ( title, channelname, docurl, imgurl, source, tlink, sep= ' | ' )
write. writerow( [ title, channelname, docurl, imgurl, source, tlink] )