爬虫 2 BeautifulSoup的运用
学习重点:
Step 1 解析数据
Step 2 通过find_all便利标签
Step 3 通过.text [‘src’][‘href’]提取内容
# 引用requests库 import requests # 引用BeautifulSoup库 from bs4 import BeautifulSoup # 获取数据 res_foods = requests.get('http://lvnvl.cn') # 解析数据 bs_foods = BeautifulSoup(res_foods.text , 'html.parser') movieLists = bs_foods.find_all('div',class_='mi_btcon') moviesList = [] def main(): for lis in movieLists: li = lis.find_all('li') # 打印解析结果 for i in li: href = i.find('a')['href'] # print(tag_a) img = i.find('img',class_='thumb')['data-original'] # print(img) tit = i.find('h3').text # print(tit) movie = {} movie['href']=href movie['img'] = img movie['tit'] = tit moviesList.append(movie) return(moviesList) main() print(moviesList)
进阶用法,伪装浏览器分页爬取数据
import requests, bs4 # 为躲避反爬机制,伪装成浏览器的请求头 headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'} for x in range(10): url = 'https://movie.douban.com/top250?start=' + str(x*25) + '&filter=' res = requests.get(url, headers=headers) bs = bs4.BeautifulSoup(res.text, 'html.parser') bs = bs.find('ol', class_="grid_view") for titles in bs.find_all('div',class_='item'): num = titles.find('em',class_="").text #查找序号 title = titles.find('span', class_="title").text #查找电影名 if titles.find('span',class_="inq"): tes = titles.find('span',class_="inq").text else: tes = '无推荐语' #查找推荐语 comment = titles.find('span',class_="rating_num").text #查找评分 url_movie = titles.find('a')['href'] print(num + '.' + title + '——' + comment + '\n' + '推荐语:' + tes +'\n' + url_movie)
import requests, bs4 # 为躲避反爬机制,伪装成浏览器的请求头 headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'} # 引用requests库 import requests # 引用BeautifulSoup库 from bs4 import BeautifulSoup # 获取数据 res_foods = requests.get('http://www.xiachufang.com/explore/',headers=headers) # 解析数据 bs_foods = BeautifulSoup(res_foods.text,'html.parser') # 查找包含菜名和URL的<p>标签 tag_name = bs_foods.find_all('p',class_='name') # 查找包含食材的<p>标签 tag_ingredients = bs_foods.find_all('p',class_='ing ellipsis') # 创建一个空列表,用于存储信息 list_all = [] # 启动一个循环,次数等于菜名的数量 for x in range(len(tag_name)): # 提取信息,封装为列表。此处[18:-14]切片的主要功能是切掉空格 list_food = [tag_name[x].text[18:-14],tag_name[x].find('a')['href'],tag_ingredients[x].text[1:-1]] # 将信息添加进list_all list_all.append(list_food) # 打印 print(list_all) # 以下是另外一种解法 # 查找最小父级标签 list_foods = bs_foods.find_all('div',class_='info pure-u') # 创建一个空列表,用于存储信息 list_all = [] for food in list_foods: # 提取第0个父级标签中的<a>标签 tag_a = food.find('a') # 菜名,使用[17:-13]切掉了多余的信息 name = tag_a.text[17:-13] # 获取URL URL = 'http://www.xiachufang.com'+tag_a['href'] # 提取第0个父级标签中的<p>标签 tag_p = food.find('p',class_='ing ellipsis') # 食材,使用[1:-1]切掉了多余的信息 ingredients = tag_p.text[1:-1] # 将菜名、URL、食材,封装为列表,添加进list_all list_all.append([name,URL,ingredients]) # 打印 print(list_all)
find(),find_all()的两种常用方法
# 第一种方法:在attrs属性用字典进行传递参数 css_class = soup.find(attrs={'class':'primaryconsumers'}) print(css_class) #第二种方法:BeautifulSoup中的特别关键字参数class_ css_class = soup.find(class_ = 'primaryconsumers')