分享一段非常轻易的Python批量收罗wordpress网站数据的爬虫剧本,实现收罗wordpress程序的网站的整站数据的爬虫程序。从首页最先,抓取href标签,到子页面后照样要连续找href标签,接收Python递归方法,间接贴代码吧!
import reimport bs4import urllib.requesturl_home = 'https://www.56admin.com/'#要收罗的网站url_pattern = url_home + '([\s\S]*)\.html' #正则抒发式结婚文章页面,此处需欠缺为更好的写法url_set = set()url_cache = set()url_count = 0url_maxCount = 1000#最年夜收罗数目#收罗结婚文章内容的href标签def spiderURL(url, pattern): html = urllib.request.urlopen(url).read().decode('utf8') soup = bs4.BeautifulSoup(html, 'html.parser') links = soup.find_all('a', href = re.compile(pattern)) for link in links: if link['href'] not in url_cache: url_set.add(link['href']) return soup#收罗的进程异样解决还需要欠缺,对于一些加了防收罗的站,还需要解决header的,下次咱们再深造spiderURL(url_home, url_pattern)while len(url_set) != 0: try: url = url_set.pop() url_cache.add(url) soup = spiderURL(url, url_pattern) page = soup.find('div', {'class':'content'}) title = page.find('h1').get_text() autor = page.find('h4').get_text() content = page.find('article').get_text() print(title, autor, url) except Exception as e: print(url, e) continue else: url_count += 1 finally: if url_count == url_maxCount: breakprint('一共收罗了: ' + str(url_count) + ' 条数据')
本文链接:https://addon.ciliseo.com/python-pi-liang-cai-ji-wordpress-wang-zhan-shu-ju-pa-chong-jiao-ben.html
网友评论