Skip to content

Commit

Permalink
Add Shamrock
Browse files Browse the repository at this point in the history
  • Loading branch information
sindrig committed Jul 4, 2024
1 parent 64fa753 commit 657d335
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 34 deletions.
1 change: 1 addition & 0 deletions clock/src/club-ids.js
Original file line number Diff line number Diff line change
Expand Up @@ -177,4 +177,5 @@ export default {
Ægir: "822",
Æskan: "139060",
Örninn: "145472",
"Shamrock Rovers": "142447",
};
Binary file added clock/src/images/club-logos/Shamrock Rovers.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 2 additions & 0 deletions clock/src/images/clubLogos.js
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,7 @@ import { default as fhl } from "./club-logos/FHL.png";
import { default as valur } from "./club-logos/Valur.png";
import { default as valurreyðarf } from "./club-logos/Valur Reyðarf.png";
import { default as þorlákur } from "./club-logos/Þorlákur.png";
import {default as shamrockrovers} from "./club-logos/Shamrock Rovers.png";

export default {
"Afganistan": afganistan,
Expand Down Expand Up @@ -778,4 +779,5 @@ export default {
"Þór Th": þórth,
"Þór-KA": þórka,
"Þýskaland": þýskaland,
"Shamrock Rovers": shamrockrovers,
};
84 changes: 50 additions & 34 deletions utils/add_club.py
Original file line number Diff line number Diff line change
@@ -1,104 +1,120 @@
#!/usr/bin/env python
import argparse
import os
import unicodedata

import bs4
import requests

PAGE = 'https://www.ksi.is/mot/felog/adildarfelog/'
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PAGE = "https://www.ksi.is/mot/felog/adildarfelog/"
BASE = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "clock"
)


def strip_accents(s):
return "".join(
c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn"
)


def get_absolute_url(absolute_path):
return 'https://www.ksi.is%s' % (absolute_path,)
return "https://www.ksi.is%s" % (absolute_path,)


def main(club_id, out_folder, club_name=None):
r = requests.get(get_absolute_url(f'/mot/felag/?lid={club_id}'))
r = requests.get(get_absolute_url(f"/mot/felag/?lid={club_id}"))
r.raise_for_status()
soup = bs4.BeautifulSoup(r.text, 'html.parser')
h1 = soup.find('h1')
soup = bs4.BeautifulSoup(r.text, "html.parser")
h1 = soup.find("h1")
if not h1:
raise RuntimeError("No h1 found")
if not club_name:
club_name = h1.text.split('-')[1].strip()
club_name = h1.text.split("-")[1].strip()
if not club_name:
raise RuntimeError("Club name not found")
for img_tag in soup.findAll('img'):
if img_tag.get('alt', '') == 'Model.BasicInfo.ShortName':
img_url = img_tag['src']
club_name_var_name = club_name.replace(" ", "").lower()
for img_tag in soup.findAll("img"):
if img_tag.get("alt", "") == "Model.BasicInfo.ShortName":
img_url = img_tag["src"]
break
else:
raise RuntimeError("Did not find img!")

exts = [os.path.splitext(str(img_url))[1], '.svg']
exts = [os.path.splitext(str(img_url))[1], ".svg"]
for ext in exts:
path = os.path.join(out_folder, '%s%s' % (club_name, ext))
path = os.path.join(out_folder, "%s%s" % (club_name, ext))
if os.path.isfile(path):
print('%s exists' % (path,))
print("%s exists" % (path,))
break
else:
path = os.path.join(out_folder, '%s%s' % (club_name, exts[0]))
path = os.path.join(out_folder, "%s%s" % (club_name, exts[0]))
r2 = requests.get(get_absolute_url(img_url))
r2.raise_for_status()
with open(path, 'wb') as f:
with open(path, "wb") as f:
for chunk in r2.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print(
'Saved %s for %s'
"Saved %s for %s"
% (
path,
club_name,
)
)

club_ids = os.path.join(BASE, 'src', 'club-ids.js')
line = " '%s': '%s',\n" % (club_name, club_id)
with open(club_ids, 'r') as f:
club_ids = os.path.join(BASE, "src", "club-ids.js")
line = ' "%s": "%s",\n' % (club_name, club_id)
with open(club_ids, "r") as f:
lines = f.readlines()
if line not in lines:
lines[1:-1] = sorted(lines[1:-1] + [line])
with open(club_ids, 'w') as f:
lines.insert(-1, line)
with open(club_ids, "w") as f:
for line in lines:
f.write(line)

club_logos = os.path.join(BASE, 'src', 'images', 'clubLogos.js')
club_logos = os.path.join(BASE, "src", "images", "clubLogos.js")
line = " %s: require('./%s'),\n" % (
club_name,
os.path.relpath(path, os.path.dirname(club_logos)),
)
with open(club_logos, 'r') as f:
with open(club_logos, "r") as f:
lines = f.readlines()
if line not in lines:
lines[2:-1] = sorted(lines[2:-1] + [line])
with open(club_logos, 'w') as f:
added_import = False
with open(club_logos, "w") as f:
for line in lines:
if not added_import and not line.strip():
f.write(
f'import {{default as {club_name_var_name}}} from "./{os.path.relpath(path, os.path.dirname(club_logos))}";\n'
)
if line.strip() == "};":
f.write(f'\t"{club_name}": {club_name_var_name},\n')
f.write(line)


def get_club_id(club_name):
r = requests.get(
get_absolute_url(
f'/leit/?searchstring={club_name}&contentcategories=F%c3%a9l%c3%b6g'
f"/leit/?searchstring={club_name}&contentcategories=F%c3%a9l%c3%b6g"
)
)
r.raise_for_status()
soup = bs4.BeautifulSoup(r.text, 'html.parser')
all_h2 = soup.findAll('h2')
soup = bs4.BeautifulSoup(r.text, "html.parser")
all_h2 = soup.findAll("h2")
for h2 in all_h2:
if h2.text == club_name:
a = h2.find('a')
a = h2.find("a")
if not a:
raise RuntimeError("No link found in search result")
href = a['href']
return int(href.replace('/mot/lid/?lid=', ''))
href = a["href"]
return int(href.replace("/mot/lid/?lid=", ""))


if __name__ == '__main__':
if __name__ == "__main__":
parser = argparse.ArgumentParser()
folder = os.path.join(BASE, 'src', 'images', 'club-logos')
parser.add_argument('club_id', type=str)
folder = os.path.join(BASE, "src", "images", "club-logos")
parser.add_argument("club_id", type=str)
args = parser.parse_args()
club_name = None
if args.club_id.isdigit():
Expand Down

0 comments on commit 657d335

Please sign in to comment.