Skip to content

Commit

Permalink
Apply an lru_cache to avoid spamming requests
Browse files Browse the repository at this point in the history
  • Loading branch information
1over137 committed Mar 20, 2024
1 parent a55bb88 commit fa25512
Show file tree
Hide file tree
Showing 5 changed files with 35 additions and 15 deletions.
18 changes: 18 additions & 0 deletions vocabsieve/cached_get.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from functools import lru_cache
import requests
from .constants import FORVO_HEADERS


@lru_cache(maxsize=5000)
def cached_get(url, forvo_headers=False):
"""
Cached requests.get
Note this will throw an exception, which is not cached
"""
if forvo_headers:
res = requests.get(url, headers=FORVO_HEADERS, timeout=10)
else:
res = requests.get(url, timeout=10)
print(res.text)
res.raise_for_status()
return res
5 changes: 2 additions & 3 deletions vocabsieve/sources/forvo_audio_source.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# mypy: ignore-errors
from ..cached_get import cached_get
from ..models import AudioSource, LemmaPolicy, AudioLookupResult

from bs4 import BeautifulSoup
Expand All @@ -11,8 +12,6 @@
from urllib.parse import quote, unquote
from dataclasses import dataclass
from ..global_names import settings
from ..constants import FORVO_HEADERS

from loguru import logger


Expand Down Expand Up @@ -44,7 +43,7 @@ def __init__(self, word, lang, accent=""):
self.accent = accent

def get_pronunciations(self):
res = requests.get(self.url, headers=FORVO_HEADERS, timeout=5)
res = cached_get(self.url, forvo_headers=True)
if res.status_code == 200:
page = res.text
else:
Expand Down
13 changes: 6 additions & 7 deletions vocabsieve/sources/google_translate_source.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from ..models import DictionarySource, LookupResult, SourceOptions
import requests
from urllib.parse import quote
from ..cached_get import cached_get


class GoogleTranslateSource(DictionarySource):
Expand All @@ -14,9 +14,8 @@ def __init__(self, langcode: str, options: SourceOptions, gtrans_api: str, gtran

def _lookup(self, word: str) -> LookupResult:
url = f"{self.gtrans_api}/api/v1/{self.langcode}/{self.to_langcode}/{quote(word)}"
print(url)
res = requests.get(url, timeout=5)
if res.status_code == 200:
return LookupResult(definition=res.json()['translation'])
else:
return LookupResult(error=f'{res.text}')
try:
res = cached_get(url)
except Exception as e:
return LookupResult(error=repr(e))
return LookupResult(definition=res.json()['translation'])
8 changes: 4 additions & 4 deletions vocabsieve/sources/wiktionary_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from bs4 import BeautifulSoup
from ..models import DictionarySource, SourceOptions, LookupResult
from loguru import logger
from ..cached_get import cached_get


def fmt_result(definitions):
Expand All @@ -26,13 +27,12 @@ def __init__(self, langcode: str, options: SourceOptions) -> None:
def _lookup(self, word: str) -> LookupResult:
logger.info(f"Looking up {word} in Wiktionary")
try:
res = requests.get(
res = cached_get(
'https://en.wiktionary.org/api/rest_v1/page/definition/' +
word,
timeout=4)
word)
except Exception as e:
logger.error(f"Failed to get data from Wiktionary: {repr(e)}")
return LookupResult(error=str(e))

if res.status_code != 200:
return LookupResult(error=str(res.text))
definitions = []
Expand Down
6 changes: 5 additions & 1 deletion vocabsieve/tools.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,21 @@
from functools import lru_cache
import json
import urllib.request
import os
import re
import unicodedata
from itertools import zip_longest, islice
import time

from .constants import FORVO_HEADERS
from .vsnt import FIELDS, CARDS, CSS
from bs4 import BeautifulSoup
from typing import List
from typing import List, Optional
from .local_dictionary import LocalDictionary
from json.decoder import JSONDecodeError
import mobi
from datetime import datetime
import requests
from lxml import etree
from charset_normalizer import from_bytes, from_path
from ebooklib import epub, ITEM_DOCUMENT
Expand Down

0 comments on commit fa25512

Please sign in to comment.