-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwebcrawler.py
33 lines (26 loc) · 918 Bytes
/
webcrawler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import requests
from bs4 import BeautifulSoup
def trade_spider(max_pages):
page_code = 0
page = 1
while page <= max_pages:
url = 'httm://websitelink.com' + str(page) + '&_skc=' + str(page_code) + '&rt=nc'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
for link in soup.findAll('a', {'class':'vip'}):
href = link.get('href')
# print(href)
title = link.string
print(title)
print(href)
get_single_item_data(href)
page += 1
page_code += 50
def get_single_item_data(item_url):
source_code = requests.get(item_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
for item_name in soup.findAll('span', {'id':'prcIsum'}):
print(item_name.string)
trade_spider(2)