Python3学习urllib的使用方法示例
urllib是python的一个获取url(Uniform Resource Locators,统一资源定址符)了,可以利用它来抓取远程的数据进行保存,本文整理了一些关于urllib使用中的一些关于header,代理,超时,认证,异常处理处理方法。 1.基本方法 urllib.request.urlopen(url,data=None,[timeout,]*,cafile=None,capath=None,cadefault=False,context=None)
直接用urllib.request模块的urlopen()获取页面,page的数据格式为bytes类型,需要decode()解码,转换成str类型。 from urllib import request response = request.urlopen(r'http://python.org/') # <http.client.HTTPResponse object at 0x00000000048BC908> HTTPResponse类型 page = response.read() page = page.decode('utf-8') urlopen返回对象提供方法:
1、简单读取网页信息 import urllib.request response = urllib.request.urlopen('http://python.org/') html = response.read() 2、使用request urllib.request.Request(url,headers={},method=None) 使用request()来包装请求,再通过urlopen()获取页面。 import urllib.request req = urllib.request.Request('http://python.org/') response = urllib.request.urlopen(req) the_page = response.read() 3、发送数据,以登录知乎为例 ''''' Created on 2016年5月31日 @author: gionee ''' import gzip import re import urllib.request import urllib.parse import http.cookiejar def ungzip(data): try: print("尝试解压缩...") data = gzip.decompress(data) print("解压完毕") except: print("未经压缩,无需解压") return data def getXSRF(data): cer = re.compile('name="_xsrf" value="(.*)"',flags = 0) strlist = cer.findall(data) return strlist[0] def getOpener(head): # cookies 处理 cj = http.cookiejar.CookieJar() pro = urllib.request.HTTPCookieProcessor(cj) opener = urllib.request.build_opener(pro) header = [] for key,value in head.items(): elem = (key,value) header.append(elem) opener.addheaders = header return opener # header信息可以通过firebug获得 header = { 'Connection': 'Keep-Alive','Accept': 'text/html,application/xhtml+xml,*/*','Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3','User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0','Accept-Encoding': 'gzip,deflate','Host': 'www.zhihu.com','DNT': '1' } url = 'http://www.zhihu.com/' opener = getOpener(header) op = opener.open(url) data = op.read() data = ungzip(data) _xsrf = getXSRF(data.decode()) url += "login/email" email = "登录账号" password = "登录密码" postDict = { '_xsrf': _xsrf,'email': email,'password': password,'rememberme': 'y' } postData = urllib.parse.urlencode(postDict).encode() op = opener.open(url,postData) data = op.read() data = ungzip(data) print(data.decode()) 4、http错误 import urllib.request req = urllib.request.Request('http://www.lz881228.blog.163.com ') try: urllib.request.urlopen(req) except urllib.error.HTTPError as e: print(e.code) print(e.read().decode("utf8")) 5、异常处理 from urllib.request import Request,urlopen from urllib.error import URLError,HTTPError req = Request("http://www.abc.com /") try: response = urlopen(req) except HTTPError as e: print('The server couldn't fulfill the request.') print('Error code: ',e.code) except URLError as e: print('We failed to reach a server.') print('Reason: ',e.reason) else: print("good!") print(response.read().decode("utf8")) 6、http认证 import urllib.request # create a password manager password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() # Add the username and password. # If we knew the realm,we could use it instead of None. top_level_url = "https://www.aspzz.cn /" password_mgr.add_password(None,top_level_url,'rekfan','xxxxxx') handler = urllib.request.HTTPBasicAuthHandler(password_mgr) # create "opener" (OpenerDirector instance) opener = urllib.request.build_opener(handler) # use the opener to fetch a URL a_url = "https://www.aspzz.cn /" x = opener.open(a_url) print(x.read()) # Install the opener. # Now all calls to urllib.request.urlopen use our opener. urllib.request.install_opener(opener) a = urllib.request.urlopen(a_url).read().decode('utf8') print(a) 7、使用代理 import urllib.request proxy_support = urllib.request.ProxyHandler({'sock5': 'localhost:1080'}) opener = urllib.request.build_opener(proxy_support) urllib.request.install_opener(opener) a = urllib.request.urlopen("http://www.baidu.com ").read().decode("utf8") print(a) 8、超时 import socket import urllib.request # timeout in seconds timeout = 2 socket.setdefaulttimeout(timeout) # this call to urllib.request.urlopen now uses the default timeout # we have set in the socket module req = urllib.request.Request('http://www.aspzz.cn /') a = urllib.request.urlopen(req).read() print(a) 以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持编程小技巧。 (编辑:李大同) 【声明】本站内容均来自网络,其相关言论仅代表作者个人观点,不代表本站立场。若无意侵犯到您的权利,请及时与联系站长删除相关内容! |