# 一、div标签文本提取
# 将学习视频中xpath.html文件中div标签下文本值
from lxml import etree
file=open('xpath.html','r',encoding='utf-8')
html=file.read()
file.close()
# print(html)
selector=etree.HTML(html)
# print(selector,type(selector))
# div_1=selector.xpath('//ul[@class="title"]/text()')[0].strip()
# div_2=selector.xpath('//ul[@class="xpath_learn"]/text()')[0]
div=selector.xpath('//div/text()')
print(div)
# div1=selector.xpath('//div/text()')[0].strip()? #请问老师此处是否应该用到循环?而不是直接找位置?
# div2=selector.xpath('//div/text()')[3].strip()
# print(div1,div2)
# “第一个div” ,“第二个div” 使用xpath结构化提取并打印输出
div_1=div[0].strip()
div_2=div[3].strip()
print(div_1, div_2)
# 二、ul标签文本提取
# 将xpath.html文件中ul标签下“流程” ,“xpath学习”,“流程2”文本值
# 使用xpath结构化提取并打印输出
ul1=selector.xpath('//ul/text()')[0].strip()
ul2=selector.xpath('//ul/text()')[6].strip()
ul3=selector.xpath('//ul/text()')[8].strip()
print(ul1,ul2,ul3)
# 三、过滤标签
# 将xpath.html文件中的第一个div下的前3个a标签的文本及超链接
# 使用xpath结构化提取,打印输出
infos=selector.xpath('//div[@class="works"][1]/ul[@class="title"][1]/li[position()<4]/a')
for info in infos:
a_text=info.xpath('text()')[0]
a_href=info.xpath('@href')[0]
print(a_text, a_href)
# 四、requests??楹蚻xml&xpath结合提取数据
# 结合上节课requests模块知识,将阳光电影网导航栏的文本及超链接结构化提取
import requests
url = 'http://www.ygdy8.com/'
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Cookie':'37cs_pidx=1; 37cs_user=37cs78691413268; 37cs_show=69; UM_distinctid=15e36b74d82561-042fa83480e0e9-8383667-1fa400-15e36b74d836c6; CNZZDATA5783118=cnzz_eid%3D1135589022-1504151834-%26ntime%3D1504151834; cscpvrich4016_fidx=3',
'Host':'www.ygdy8.com',
'If-Modified-Since':'Wed, 30 Aug 2017 14:38:20 GMT',
'If-None-Match':'"056e49f9d21d31:530"',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
req = requests.get(url)
status_code = req.status_code
# print(status_code)
req.encoding = 'gb2312'
html = req.text
#请问此处我们需要从网上request建立到一个html文档,然后再读取吗??
# fp=open(r'C:\Python\Learn\20170813-list\txt\#14 ygdd.html','w',encoding='utf-8')
# fp.write(html)
# fp.close
#
# file=open('ygdd.html','r',encoding='utf-8')
# html=file.read()
# file.close()
# from lxml import etree
selector=etree.HTML(html)
infos=selector.xpath('//li/a')
for info in infos:
??? title=info.xpath('text()')
??? u1=info.xpath('@href')[0]
??? if len(title)>=1:
??????? print(title, u1)
??? else:
??????? break