Put downloaded models into
<project_dir>/models
Cache should be located in
<project_dir>/cache
<project_dir>/cache/hub # hugging face
<project_dir>/cache/lora # lora training
<project_dir>/cache/models # model merge result
LDFLAGS="-static-libstdc++" CC=/home/neum_al/env/bin/gcc pip install ctransformers --no-binary ctransformers --no-cache-dir