Skip to content

Commit

Permalink
Update dependencies
Browse files Browse the repository at this point in the history
  • Loading branch information
chiragjn committed Dec 2, 2024
1 parent 715de22 commit d8efed6
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ RUN mkdir -p /packages && \
cd /packages && \
git clone https://github.com/truefoundry/axolotl && \
cd axolotl/ && \
git checkout 1442cf55cd7b1c257a525450996355473fdcbb35 && \
git checkout b8db5db0fea9a1dad15338c1daf73a04f647caf4 && \
cd /packages/axolotl/ && \
MAX_JOBS=1 NVCC_APPEND_FLAGS="--threads 1" pip install -U --use-pep517 --no-build-isolation --no-cache-dir -e .[flash-attn,mamba-ssm,optimizers,lion-pytorch,galore] && \
rm -rf /root/.cache/pip
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile-notebook
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ USER jovyan
RUN cd /packages && \
git clone https://github.com/truefoundry/axolotl && \
cd axolotl/ && \
git checkout 1442cf55cd7b1c257a525450996355473fdcbb35 && \
git checkout b8db5db0fea9a1dad15338c1daf73a04f647caf4 && \
cd /packages/axolotl/ && \
MAX_JOBS=1 NVCC_APPEND_FLAGS="--threads 1" pip install -U --use-pep517 --no-build-isolation --no-cache-dir -e .[flash-attn,mamba-ssm,optimizers,lion-pytorch,galore]

Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@ rich>=13.0.0,<14
snowflake-connector-python[pandas]==3.12.3
torch==2.3.1+cu121
torchao==0.6.1+cu121
truefoundry>=0.4.8,<0.5.0
unsloth @ git+https://github.com/unslothai/unsloth@c2b185e7dbe04cdf2b95c681f42416dbe19d5f97
truefoundry>=0.4.10,<0.5.0
unsloth[cu121-ampere-torch230]==2024.11.9
4 changes: 2 additions & 2 deletions sample_run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True,roundup_power2_division
export CUDA_VISIBLE_DEVICES=0
## This controls how much memory to user per gpu
export TORCH_PER_PROCESS_MEMORY_LIMIT=0.98

export HF_HUB_ENABLE_HF_TRANSFER=1
## Add your token for private/gated models
export HF_TOKEN=

Expand All @@ -31,7 +31,7 @@ accelerate launch \
train.py \
config-base.yaml \
--deepspeed ./deepspeed_configs/3_ds_z2_config.json \
--base_model Qwen/Qwen2.5-0.5B-Instruct \
--base_model unsloth/Llama-3.2-1B-Instruct \
--dataset_type chat \
--train_data_uri ./sample_data/chatalpaca-openai-1k.jsonl \
--val_data_uri None \
Expand Down

0 comments on commit d8efed6

Please sign in to comment.