Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update #118

Merged
merged 5 commits into from
Apr 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
File renamed without changes.
28 changes: 25 additions & 3 deletions ncc_core/nc_import/bots/get_langs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

"""
import re
import wikitextparser as wtp
from newapi.ncc_page import MainPage as ncc_MainPage
from newapi import printe

Expand All @@ -26,9 +27,30 @@ def get_langs_codes():
"""
text = get_text()
langs = []
fi = re.findall(r"\* (.*)\n", text)
for i in fi:
langs.append(i.strip())
# * {{User:Mr. Ibrahem/import bot/line|ar}}
# ---
tmp = "User:Mr. Ibrahem/import bot/line"
# ---
prased = wtp.parse(text)
temps = prased.templates
for temp in temps:
# ---
name = str(temp.normal_name()).strip().lower().replace("_", " ")
# ---
printe.output(f"{temp.name=}, {name=}")
# ---
if name == tmp.lower():
# ---
# get first argument
# ---
va = temp.get_arg("1")
if va and va.value:
langs.append(va.value.strip())
# ---
printe.output(f"langs: {langs}")
# ---
return langs

if __name__ == "__main__":
# python3 core8/pwb.py nc_import/bots/get_langs
get_langs_codes()
13 changes: 13 additions & 0 deletions ncc_core/nc_import/bots/import_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
bot for importing files from nccommons to wikipedia

"""
import re
import sys
from newapi.ncc_page import MainPage as ncc_MainPage, NEW_API as ncc_NEW_API
from newapi import printe
Expand All @@ -22,6 +23,16 @@ def get_file_text(title):

return text

def categories_work(text):
"""
remove all categories from the text
"""
# ---
text = re.sub(r"\[\[Category:(.*?)\]\]", "", text, flags=re.DOTALL)
# ---
text += "\n[[Category:Files imported from NC Commons]]"
# ---
return text

def import_file(title, code):
"""
Expand All @@ -31,6 +42,8 @@ def import_file(title, code):
# ---
file_text = get_file_text(title)
# ---
file_text = categories_work(file_text)
# ---
api_new = ncc_NEW_API("www", family="nccommons")
# api_new.Login_to_wiki()
img_url = api_new.Get_image_url(title)
Expand Down
15 changes: 13 additions & 2 deletions ncc_core/nc_import/bots/wrk_pages.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,12 @@ def start(self):
# ---
self.get_temps()
self.work_on_temps()
# ---
if self.new_text == self.text:
printe.output("no changes")
return
# ---
self.add_category()
self.save()

def get_temps(self):
Expand Down Expand Up @@ -79,9 +85,14 @@ def work_on_temps(self):
if temp_new_text != string:
self.new_text = self.new_text.replace(string, temp_new_text)

def add_category(self):
cat = "Category:Contains images from NC Commons"
# ---
if self.new_text.find(cat) == -1:
self.new_text += "\n[[Category:Contains images from NC Commons]]"

def save(self):
if self.new_text != self.text:
self.page.save(newtext=self.new_text, summary="bot: fix NC")
self.page.save(newtext=self.new_text, summary="bot: fix NC")


def work_on_pages(code, pages):
Expand Down
52 changes: 52 additions & 0 deletions ncc_core/nc_import/views.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""

python3 core8/pwb.py nc_import/views
fetch("https://pageviews.wmcloud.org/pageviews/api.php?pages=Kat%7CHond&project=af.wikipedia.org&start=2015-07-01&end=2024-03-27&totals=true", {
"headers": {
"accept": "*/*",
"accept-language": "ar-EG,ar;q=0.9,en-US;q=0.8,en;q=0.7,sk;q=0.6,zh-CN;q=0.5,zh-TW;q=0.4,zh;q=0.3",
"sec-ch-ua": "\"Google Chrome\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-requested-with": "XMLHttpRequest"
},
"referrer": "https://pageviews.wmcloud.org/pageviews/?project=af.wikipedia.org&platform=all-access&agent=user&redirects=0&range=all-time&pages=Kat|Hond",
"referrerPolicy": "strict-origin-when-cross-origin",
"body": null,
"method": "GET",
"mode": "cors",
"credentials": "omit"
});
"""
from nc_import.bots.get_langs import get_langs_codes

def get_views(code):
endpoint = "https://pageviews.wmcloud.org/massviews/api.php"
category = "Category:Files_imported_from_NC_Commons"
# ---
# https://pageviews.wmcloud.org/massviews/api.php?project=af.wikipedia.org&category=Files_imported_from_NC_Commons&limit=20000
# ---
params = {
"project": f"{code}.wikipedia.org",
"category": category
}
# ---
# result example: [{"title":"Chondrosarcoma_of_the_nasal_septum_(Radiopaedia_165701-135935_Sagittal_2).jpeg","ns":6}]
# ---


def start():
"""
A function that starts the process by iterating over languages, getting pages for each language, and then working on those pages.
"""
langs = get_langs_codes()
# ---
for code in langs:
pages = get_views(code)


if __name__ == "__main__":
start()
Loading