chenluhe 2019-09-05
字符串常用方法
# 去掉左右空格 'hello world'.strip() # 'hello world' # 按指定字符切割 'hello world'.split(' ') # ['hello','world'] # 替换指定字符串 'hello world'.replace(' ','#') # 'hello#world'
csv模块
作用:将爬取的数据存放到本地的csv文件中
使用流程
import csv with open('test.csv','w') as f: writer = csv.writer(f) # 初始化写入对象 # 写一行 writer.writerow(['超哥哥',20]) writer.writerow(['步惊云',22]) with open('test.csv','a') as f: writer = csv.writer(f) # 写多行 data_list = [('聂风',23),('秦霜',30)] writer.writerows(data_list)
Windows中使用csv模块默认会在每行后面添加一个空行,使用newline=''可解决
with open('xxx.csv','w',newline='') as f:
猫眼电影top100抓取案例
确定URL网址
猫眼电影 - 榜单 - top100榜 目标
电影名称、主演、上映时间 操作步骤
1、查看是否为动态加载
右键 - 查看网页源代码 - 搜索爬取关键字(查看在源代码中是否存在)
2、找URL规律
3、正则表达式
<div class="movie-item-info">.*?title="(.*?)".*?class="star">(.*?)</p>.*?releasetime">(.*?)</p>
4、编写程序框架,完善程序
from urllib import request import time import re import csv import random class MaoyanSpider(object): def __init__(self): self.page = 1 # 用于记录页数 self.url = 'https://maoyan.com/board/4?offset={}' self.agent = [ 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 \ Safari/535.1', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; \ .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1'] # 请求 def get_page(self, url): headers = {'User-Agent': random.choice(self.agent)} # 每次使用随机的user-agent req = request.Request(url=url, headers=headers) # 创建请求对象 res = request.urlopen(req) # 发起请求 html = res.read().decode('utf-8') # 获取请求内容 self.parse_page(html) # 直接调用解析函数 # 解析 def parse_page(self, html): pattren = re.compile( '<div class="movie-item-info">.*?title="(.*?)".*?class="star">(.*?)</p>.*?releasetime">(.*?)</p>', re.S) r_list = pattren.findall(html) # rlist: [('霸王别姬', '\n 主演:张国荣,张丰毅,巩俐\n ', '上映时间:1993-01-01'),(...),(...)] self.write_page(r_list) # 写入csv文件 # # 保存,打印输出 # def write_page(self,r_list): # one_film_dict = {} # for rt in r_list: # one_film_dict['name'] = rt[0].strip() # one_film_dict['star'] = rt[1].strip() # one_film_dict['time'] = rt[2].strip()[5:15] # # print(one_film_dict) # 保存到csv文件(writerows) -- 推荐使用此方法 def write_page(self, r_list): # 空列表,最终writerows()的参数: [(),(),()] film_list = [] with open('maoyan.csv', 'a',newline="") as f: writer = csv.writer(f) for rt in r_list: # 把处理过的数据定义成元组 t = (rt[0], rt[1].strip(), rt[2].strip()[5:15]) film_list.append(t) writer.writerows(film_list) def main(self): for offset in range(0, 31, 10): url = self.url.format(offset) self.get_page(url) time.sleep(random.randint(1, 3)) print('第%d页爬取完成' % self.page) self.page += 1 if __name__ == '__main__': start = time.time() spider = MaoyanSpider() spider.main() end = time.time() print('执行时间: %.2f' % (end - start))
数据持久化存储(MySQL数据库)
让我们来回顾一下pymysql模块的基本使用
import pymysql db = pymysql.connect('localhost', 'root', '123456', 'maoyandb', charset='utf8') cursor = db.cursor() # 创建游标对象 # execute()方法第二个参数为列表传参补位 cursor.execute('insert into film values(%s,%s,%s)', ['霸王别姬', '张国荣', '1993']) db.commit() # 提交到数据库执行 cursor.close() # 关闭 db.close()
让我们来回顾一下pymysql中executemany()的用法
import pymysql # 数据库连接对象 db = pymysql.connect('localhost', 'root', '123456', charset='utf8') cursor = db.cursor() # 游标对象 ins_list = [] # 存放所有数据的大列表 for i in range(2): name = input('请输入第%d个学生姓名:' % (i + 1)) age = input('请输入第%d个学生年龄:' % (i + 1)) ins_list.append([name, age]) ins = 'insert into t3 values(%s,%s)' # 定义插入语句 cursor.executemany(ins, ins_list) # 一次数据库的IO操作可插入多条语句,提升性能 db.commit() # 提交到数据库执行 cursor.close() # 关闭游标 db.close() # 关闭数据库 ins = 'insert into maoyanfilm values(%s,%s,%s)' cursor.execute(['霸王', '国荣', '1991']) cursor.executemany([ ['月光宝盒', '周星驰', '1993'], ['大圣娶亲', '周星驰', '1993']])
练习:把猫眼电影案例中电影信息存入MySQL数据库中(尽量使用executemany方法)
from urllib import request import time import re import pymysql import random class MaoyanSpider(object): def __init__(self): self.page = 1 # 用于记录页数 self.url = 'https://maoyan.com/board/4?offset={}' self.ua_list = [ 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) \ Chrome/14.0.835.163 Safari/535.1', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; \ .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)'] # 创建数据库连接对象和游标对象 self.db = pymysql.connect('localhost', 'root', '123456', 'maoyandb', charset='utf8') self.cursor = self.db.cursor() # 获取 def get_page(self, url): # 每次使用随机的user-agent headers = {'User-Agent': random.choice(self.ua_list)} req = request.Request(url=url, headers=headers) res = request.urlopen(req) html = res.read().decode('utf-8') self.parse_page(html) # 直接调用解析函数 # 解析 def parse_page(self, html): pattren = re.compile( '<div class="movie-item-info">.*?title="(.*?)".*?class="star">(.*?)</p>.*?releasetime">(.*?)</p>', re.S) # rlist: [('霸王别姬','张国荣','1993'),(),()] r_list = pattren.findall(html) print(r_list) self.write_page(r_list) # 存入mysql数据库(executemany([ [],[],[] ])) def write_page(self, r_list): film_list = [] ins = 'insert into filmtab values(%s,%s,%s)' # 定义插入语句 # 处理数据,放到大列表film_list中 for rt in r_list: one_film = [rt[0], rt[1].strip(), rt[2].strip()[5:15]] # 添加到大列表中 film_list.append(one_film) # 一次数据库IO把1页数据存入 self.cursor.executemany(ins, film_list) # 提交到数据库执行 self.db.commit() def main(self): for offset in range(0, 31, 10): url = self.url.format(offset) self.get_page(url) time.sleep(random.randint(1, 3)) print('第%d页爬取完成' % self.page) self.page += 1 # 断开数据库(所有页爬完之后) self.cursor.close() self.db.close() if __name__ == '__main__': start = time.time() spider = MaoyanSpider() spider.main() end = time.time() print('执行时间: %.2f' % (end - start))
让我们来做个SQL命令查询
1、查询20年以前的电影的名字和上映时间
select name,time from filmtab where time<(now()-interval 20 year);
2、查询1990-2000年的电影名字和上映时间
select name,time from filmtab where time>='1990-01-01' and time<='2000-12-31';
让我们来复习一下mongdb数据库
import pymongo # 1.连接对象 conn = pymongo.MongoClient(host='127.0.0.1', port=27017) db = conn['maoyandb'] # 2.库对象 myset = db['filmtab'] # 3.集合对象 myset.insert_one({'name': '赵敏'}) # 4.插入数据库
练习:把猫眼电影案例中电影信息存入MongDB数据库中
from urllib import request import re import time import random import pymongo class MaoyanSpider(object): def __init__(self): self.url = 'https://maoyan.com/board/4?offset={}' # 计数 self.num = 0 # 创建3个对象 self.conn = pymongo.MongoClient('localhost', 27017) self.db = self.conn['maoyandb'] self.myset = self.db['filmset'] self.ua_list = [ 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET \ CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)', ] def get_html(self, url): headers = { 'User-Agent': random.choice(self.ua_list) } req = request.Request(url=url, headers=headers) res = request.urlopen(req) html = res.read().decode('utf-8') # 直接调用解析函数 self.parse_html(html) def parse_html(self, html): re_bds = r'<div class="movie-item-info">.*?title="(.*?)".*?class="star">(.*?)</p>.*?releasetime">(.*?)</p>' pattern = re.compile(re_bds, re.S) # film_list: [('霸王别姬','张国荣','1993'),()] film_list = pattern.findall(html) # 直接调用写入函数 self.write_html(film_list) # mongodb数据库 def write_html(self, film_list): for film in film_list: film_dict = { 'name': film[0].strip(), 'star': film[1].strip(), 'time': film[2].strip()[5:15] } # 插入mongodb数据库 self.myset.insert_one(film_dict) def main(self): for offset in range(0, 31, 10): url = self.url.format(offset) self.get_html(url) time.sleep(random.randint(1, 2)) if __name__ == '__main__': start = time.time() spider = MaoyanSpider() spider.main() end = time.time() print('执行时间:%.2f' % (end - start))
电影天堂案例(二级页面抓取)
1、查看是否为静态页面,是否为动态加载
右键 - 查看网页源代码
2、确定URL地址
百度搜索 :电影天堂 - 2019年新片 - 更多
3、目标
*********一级页面***********
1、电影名称
2、电影链接
*********二级页面***********
1、下载链接
4、步骤
找URL规律
第1页 :https://www.dytt8.net/html/gndy/dyzz/list_23_1.html
第2页 :https://www.dytt8.net/html/gndy/dyzz/list_23_2.html
第n页 :https://www.dytt8.net/html/gndy/dyzz/list_23_n.html
写正则表达式
1、一级页面正则表达式(电影名称、电影详情链接)
<table width="100%".*?<td height="26">.*?<a href="(.*?)" rel="external nofollow" rel="external nofollow" .*?>(.*?)</a>
2、二级页面正则表达式
<td style="WORD-WRAP.*?>.*?>(.*?)</a>
代码实现
# decode('gbk','ignore') 注意ignore参数
# 注意结构和代码可读性(一个函数不要太冗余)
from urllib import request import re import time import random from useragents import * import pymysql class FilmSky(object): def __init__(self): self.url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_{}.html' # 定义两个对象 self.db = pymysql.connect('127.0.0.1', 'root', '123456', 'maoyandb', charset='utf8') self.cursor = self.db.cursor() # 获取html函数(因为两个页面都需要发请求) def get_page(self, url): req = request.Request(url=url, headers={'User-Agent': random.choice(ua_list)}) res = request.urlopen(req) # ignore参数,实在处理不了的编码错误忽略 # 查看网页源码,发现网页编码为 gb2312,不是 utf-8 html = res.read().decode('gbk', 'ignore') return html # 解析提取数据(把名称和下载链接一次性拿到) # html为一级页面响应内容 def parse_page(self, html): # 1. 先解析一级页面(电影名称 和 详情链接) pattern = re.compile('<table width="100%".*?<td height="26">.*?<a href="(.*?)" rel="external nofollow" rel="external nofollow" .*?>(.*?)</a>', re.S) # film_list: [('详情链接','名称'),()] film_list = pattern.findall(html) # [('/html/gndy/dyzz/20190806/58956.html', '019年惊悚动作《报仇雪恨/血债血偿》BD中英双字幕'),(),()] ins = 'insert into filmsky values(%s,%s)' for film in film_list: film_name = film[1] film_link = 'https://www.dytt8.net' + film[0] # 2. 拿到详情链接后,再去获取详情链接html,提取下载链接 download_link = self.parse_two_html(film_link) self.cursor.execute(ins, [film_name, film_link]) self.db.commit() # 打印测试 d = {'电影名称': film_name, '下载链接': download_link} print(d) # {'电影名称': '019年惊悚动作《报仇雪恨/血债血偿》BD中英双字幕', '下载链接': 'ftp://ygdy8:[email protected]:8590/阳光电影www.ygdy8.com.报仇雪恨.BD.720p.中英双字幕.mkv'} # 解析二级页面,获取下载链接 def parse_two_html(self, film_link): two_html = self.get_page(film_link) pattern = re.compile('<td style="WORD-WRAP.*?>.*?>(.*?)</a>', re.S) download_link = pattern.findall(two_html)[0] return download_link # 主函数 def main(self): for page in range(1, 11): url = self.url.format(page) html = self.get_page(url) self.parse_page(html) time.sleep(random.randint(1, 3)) print('第%d页完成' % page) if __name__ == '__main__': start = time.time() spider = FilmSky() spider.main() end = time.time() print('执行时间:%.2f' % (end - start))