From f0b0364f747a55e4a5f755e616dcff53c2375c63 Mon Sep 17 00:00:00 2001 From: Menghuan1918 Date: Sun, 27 Oct 2024 23:17:03 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=B9=B6=E6=94=B9=E8=BF=9Bbu?= =?UTF-8?q?ild=20with=20latex=E7=9A=84Docker=E6=9E=84=E5=BB=BA=20(#2020)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 改进构建文件 * 修复问题 * 更改docker注释,同时测试拉取大小 --- .github/workflows/build-with-latex-arm.yml | 2 +- docker-compose.yml | 1 + docs/GithubAction+NoLocal+Latex | 41 +++++++++++----------- docs/GithubAction+NoLocal+Latex+Arm | 25 ------------- 4 files changed, 22 insertions(+), 47 deletions(-) delete mode 100644 docs/GithubAction+NoLocal+Latex+Arm diff --git a/.github/workflows/build-with-latex-arm.yml b/.github/workflows/build-with-latex-arm.yml index ac20afad16..2f00457a72 100644 --- a/.github/workflows/build-with-latex-arm.yml +++ b/.github/workflows/build-with-latex-arm.yml @@ -46,6 +46,6 @@ jobs: context: . push: true platforms: linux/arm64 - file: docs/GithubAction+NoLocal+Latex+Arm + file: docs/GithubAction+NoLocal+Latex tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 06a35600da..cd72e3afed 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -180,6 +180,7 @@ version: '3' services: gpt_academic_with_latex: image: ghcr.io/binary-husky/gpt_academic_with_latex:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex) + # 对于ARM64设备,请将以上镜像名称替换为 ghcr.io/binary-husky/gpt_academic_with_latex_arm:master environment: # 请查阅 `config.py` 以查看所有的配置信息 API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' diff --git a/docs/GithubAction+NoLocal+Latex b/docs/GithubAction+NoLocal+Latex index 00a3b6ab46..71d51796a8 100644 --- a/docs/GithubAction+NoLocal+Latex +++ b/docs/GithubAction+NoLocal+Latex @@ -1,35 +1,34 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM +# 此Dockerfile适用于"无本地模型"的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM # - 1 修改 `config.py` # - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex . # - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex -FROM fuqingxu/python311_texlive_ctex:latest -ENV PATH "$PATH:/usr/local/texlive/2022/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2023/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2024/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2025/bin/x86_64-linux" -ENV PATH "$PATH:/usr/local/texlive/2026/bin/x86_64-linux" - -# 指定路径 +FROM menghuan1918/ubuntu_uv_ctex:latest +ENV DEBIAN_FRONTEND=noninteractive +SHELL ["/bin/bash", "-c"] WORKDIR /gpt -RUN pip3 install openai numpy arxiv rich -RUN pip3 install colorama Markdown pygments pymupdf -RUN pip3 install python-docx pdfminer -RUN pip3 install nougat-ocr - -# 装载项目文件 -COPY . . - +# 先复制依赖文件 +COPY requirements.txt . # 安装依赖 -RUN pip3 install -r requirements.txt +RUN pip install --break-system-packages openai numpy arxiv rich colorama Markdown pygments pymupdf python-docx pdfminer \ + && pip install --break-system-packages -r requirements.txt \ + && if [ "$(uname -m)" = "x86_64" ]; then \ + pip install --break-system-packages nougat-ocr; \ + fi \ + && pip cache purge \ + && rm -rf /root/.cache/pip/* + +# 创建非root用户 +RUN useradd -m gptuser && chown -R gptuser /gpt +USER gptuser -# edge-tts需要的依赖 -RUN apt update && apt install ffmpeg -y +# 最后才复制代码文件,这样代码更新时只需重建最后几层,可以大幅减少docker pull所需的大小 +COPY --chown=gptuser:gptuser . . # 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' +RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' # 启动 CMD ["python3", "-u", "main.py"] diff --git a/docs/GithubAction+NoLocal+Latex+Arm b/docs/GithubAction+NoLocal+Latex+Arm deleted file mode 100644 index 94ad894163..0000000000 --- a/docs/GithubAction+NoLocal+Latex+Arm +++ /dev/null @@ -1,25 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# - 1 修改 `config.py` -# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex . -# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex - -FROM menghuan1918/ubuntu_uv_ctex:latest -ENV DEBIAN_FRONTEND=noninteractive -SHELL ["/bin/bash", "-c"] -WORKDIR /gpt -COPY . . -RUN /root/.cargo/bin/uv venv --seed \ - && source .venv/bin/activate \ - && /root/.cargo/bin/uv pip install openai numpy arxiv rich colorama Markdown pygments pymupdf python-docx pdfminer \ - && /root/.cargo/bin/uv pip install -r requirements.txt \ - && /root/.cargo/bin/uv clean - -# 对齐python3 -RUN rm -f /usr/bin/python3 && ln -s /gpt/.venv/bin/python /usr/bin/python3 -RUN rm -f /usr/bin/python && ln -s /gpt/.venv/bin/python /usr/bin/python - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -# 启动 -CMD ["python3", "-u", "main.py"]