Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
TiloMichel committed Jul 2, 2022
0 parents commit 66ff720
Show file tree
Hide file tree
Showing 80 changed files with 54,174 additions and 0 deletions.
135 changes: 135 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# Elasticsearch data
/0_webcrawler/data

# scrapy test output file
/0_webcrawler/test.json
143 changes: 143 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
{
"version": "0.1.0",
"configurations": [
{
"name": "Scrapy to JSON",
"type": "python",
"request": "launch",
"module": "scrapy",
"args": [
"runspider",
"${file}",
"-a",
"start_url=https://kmi.fbi.h-da.de/aboutkmi.html",
"-o",
"test.json"
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
{
"name": "Scrapy to ElasticSearch",
"type": "python",
"request": "launch",
"module": "scrapy",
"args": [
"runspider",
"${file}",
"-a",
"start_url=https://kmi.fbi.h-da.de/aboutkmi.html",
"-s",
"ELASTICSEARCH_INDEX=test123",
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
{
"name": "Paraphrase file transformer",
"type": "python",
"request": "launch",
"program": "${file}",
"args": [
"--strategy",
"transformer_model",
"--file",
"training_examples.txt",
"--out",
"paraphrases1.xlsx"
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
{
"name": "Paraphrase file backtranslation",
"type": "python",
"request": "launch",
"program": "${file}",
"args": [
"--strategy",
"backtranslation",
"--file",
"training_examples.txt",
"--out",
"paraphrases1.xlsx"
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
{
"name": "Generate questions from website (answer-agnostic)",
"type": "python",
"request": "launch",
"program": "${file}",
"args": [
"--qg-strategy",
"answer_agnostic",
"--textblock-strategy",
"all",
"--index-name",
"kmi_crawler_2364dfd1-a060-44d9-85a8-eb7e5ecc3fe1",
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
{
"name": "Generate questions from website (answer-aware)",
"type": "python",
"request": "launch",
"program": "${file}",
"args": [
"--qg-strategy",
"answer_aware",
"--textblock-strategy",
"all",
"--index-name",
"kmi_crawler_2364dfd1-a060-44d9-85a8-eb7e5ecc3fe1",
"--ner-library",
"flair",
"--ner-highlighting-strategy",
"word"
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
{
"name": "Generate questions from website (answer-agnostic, translation pipeline)",
"type": "python",
"request": "launch",
"program": "${file}",
"args": [
"--qg-strategy",
"answer_agnostic",
"--textblock-strategy",
"all",
"--index-name",
"kmi_crawler_2364dfd1-a060-44d9-85a8-eb7e5ecc3fe1",
"--translation-pipeline",
"True"
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
{
"name": "Generate questions from website (answer-agnostic faq random samples)",
"type": "python",
"request": "launch",
"program": "${file}",
"args": [
"--qg-strategy",
"answer_agnostic",
"--textblock-strategy",
"faq",
"--index-name",
"rki_crawler_57b57991-2755-4b68-9788-b382bf3c8196",
"--random-samples",
"True",
"--random-samples-count",
"3"
],
"console": "integratedTerminal",
"cwd": "${fileDirname}",
},
]
}
81 changes: 81 additions & 0 deletions 0_webcrawler/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# Webcrawler
The webcrawler extracts paragraphs from the target websites which can be used for question generation later on. Each distinct website is saved on an elasticsearch index for the given url. The name of the index and start URL are parameters for the crawler (see [Start the crawler from CLI](#Start-the-crawler-from-CLI)).

<img src="../images/crawling-websites-for-text.png" width="600" height="350" />

## Webpage to text test

|Task| |
|---|---|
|Webpage to text test|[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/TiloMichel/textgen-for-chatbot-training-german/blob/main/0_webcrawler/webpage_to_text_test.ipynb) |

## Install
Install all with

`pip install -r requirements.txt`

Scrapy, a fast high-level web crawling & scraping framework for Python https://github.com/scrapy/scrapy

`pip install scrapy==2.5.1`

Splash is a "Lightweight, scriptable browser as a service with an HTTP API" https://github.com/scrapinghub/splash

`pip install scrapy_splash==0.8.0`

Free and Open, Distributed, RESTful Search Engine https://github.com/elastic/elasticsearch

`pip install elasticsearch==7.17.0`

Heuristic based boilerplate removal tool https://github.com/miso-belica/jusText

`pip install justext==3.0.0`

### Install & run splash and Elasticsearch

`docker-compose up`

or

`docker-compose -p "webcrawler" up`

Elasticsearch server: http://localhost:9200/

Splash frontend: http://localhost:8050/

Recommended frontend for elasticsearch (browser extension): https://elasticvue.com/

## Start the crawler from CLI
The webcrawler can be started from CLI.

Options
* -s: Settings
* -a: Arguments
* -o: Output

`scrapy crawl intent-finder-bot -s ELASTICSEARCH_INDEX=test -a start_url='https://kmi.fbi.h-da.de/aboutkmi.html' -o test.json`


## Test URLs

### No JavaScript content
https://kmi.fbi.h-da.de/aboutkmi.html

https://www.rki.de/DE/Home/homepage_node.html

https://www.dge.de/wissenschaft/faqs/

https://quotes.toscrape.com/

https://books.toscrape.com/

### JavaScript content
http://www.webscrapingfordatascience.com/simplejavascript/

https://quotes.toscrape.com/js/

### Known problems
- Cookie banner content is scraped.
- If content is rendered after mandatory user interaction it isn't scraped e. g. the cookie banner has to be accepted for content to render.

## Further possible improvements
- Run spiders on scrapyd (scrapy daemon service) for maintainability, monitoring and scalability https://scrapyd.readthedocs.io/en/stable/
Empty file added 0_webcrawler/__init__.py
Empty file.
29 changes: 29 additions & 0 deletions 0_webcrawler/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
version: '3'
services:
# search engine
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0
container_name: elasticsearch
environment:
- discovery.type=single-node
- http.port=9200
- http.cors.enabled=true
- http.cors.allow-origin=/.*/
- http.cors.allow-headers=X-Requested-With,X-Auth-Token,Content-Type,Content-Length,Authorization
- http.cors.allow-credentials=true
- bootstrap.memory_lock=true
- 'ES_JAVA_OPTS=-Xms512m -Xmx512m'
ports:
- '9200:9200'
- '9300:9300'
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./data:/usr/share/elasticsearch/data
splash:
image: scrapinghub/splash
container_name: splash
ports:
- 8050:8050
Loading

0 comments on commit 66ff720

Please sign in to comment.