-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscrape_wiki.py
55 lines (43 loc) · 2.08 KB
/
scrape_wiki.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import requests
from bs4 import BeautifulSoup
import re
# specify the URL of the Wikipedia article to scrape
url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
# send a GET request to the URL
response = requests.get(url)
# parse the HTML content of the page using BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
# find the main
# content of the article (usually contained in a <div> element with the id "mw-content-text")
content_div = soup.find('div', {'id': 'mw-content-text'})
# remove any boilerplate formatting from the scraped text
text = re.sub(r'\[[0-9]+\]', '', content_div.get_text()) # remove citation numbers
text = re.sub(r'\s+', ' ', text) # remove excess whitespace
text = re.sub(r'\([^)]*\)', '', text) # remove text in parentheses
text = re.sub(r'\{\{.*?\}\}', '', text) # remove templates
text = re.sub(r'\[.*?\]', '', text) # remove text in square brackets
text = re.sub(r'<.*?>', '', text) # remove HTML tags
text = re.sub(r'\u200b', '', text) # remove zero-width space
# print the cleaned-up text
print(text)
import requests
from bs4 import BeautifulSoup
import re
# specify the URL of the Wikipedia article to scrape
url = "https://en.wikipedia.org/wiki/Web_scraping"
# send a GET request to the URL
response = requests.get(url)
# parse the HTML content of the page using BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
# find the main content of the article (usually contained in a <div> element with the id "mw-content-text")
content_div = soup.find('div', {'id': 'mw-content-text'})
# remove any boilerplate formatting from the scraped text
text = re.sub(r'\[[0-9]+\]', '', content_div.get_text()) # remove citation numbers
text = re.sub(r'\s+', ' ', text) # remove excess whitespace
text = re.sub(r'\([^)]*\)', '', text) # remove text in parentheses
text = re.sub(r'\{\{.*?\}\}', '', text) # remove templates
text = re.sub(r'\[.*?\]', '', text) # remove text in square brackets
text = re.sub(r'<.*?>', '', text) # remove HTML tags
text = re.sub(r'\u200b', '', text) # remove zero-width space
# print the cleaned-up text
print(text)