22-hours / cabrita Goto Github PK
View Code? Open in Web Editor NEWFinetuning InstructLLaMA with portuguese data
License: Apache License 2.0
Finetuning InstructLLaMA with portuguese data
License: Apache License 2.0
What is the total amout of memory necessary to load and use the model ?
Hello. I'm having a issue on this part of your code [train_lora].
trainer = transformers.Trainer(
model=model,
train_dataset=data["train"],
args=transformers.TrainingArguments(
per_device_train_batch_size=MICRO_BATCH_SIZE,
gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
warmup_steps=100,
num_train_epochs=EPOCHS,
learning_rate=LEARNING_RATE,
fp16=True,
logging_steps=20,
output_dir="lora-alpaca",
save_total_limit=3,
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train(resume_from_checkpoint=False)
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ in :1 │
│ │
│ ❱ 1 trainer = transformers.Trainer( │
│ 2 │ model=model, │
│ 3 │ train_dataset=data["train"], │
│ 4 │ args=transformers.TrainingArguments( │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/transformers/trainer.py:476 in init │
│ │
│ 473 │ │ self.tokenizer = tokenizer │
│ 474 │ │ │
│ 475 │ │ if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False) │
│ ❱ 476 │ │ │ self._move_model_to_device(model, args.device) │
│ 477 │ │ │
│ 478 │ │ # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs │
│ 479 │ │ if self.is_model_parallel: │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/transformers/trainer.py:715 in │
│ _move_model_to_device │
│ │
│ 712 │ │ self.callback_handler.remove_callback(callback) │
│ 713 │ │
│ 714 │ def _move_model_to_device(self, model, device): │
│ ❱ 715 │ │ model = model.to(device) │
│ 716 │ │ # Moving a model to an XLA device disconnects the tied weights, so we have to re │
│ 717 │ │ if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights") │
│ 718 │ │ │ model.tie_weights() │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/transformers/modeling_utils.py:1811 in to │
│ │
│ 1808 │ │ │ │ " model has already been set to the correct devices and casted to the co │
│ 1809 │ │ │ ) │
│ 1810 │ │ else: │
│ ❱ 1811 │ │ │ return super().to(*args, **kwargs) │
│ 1812 │ │
│ 1813 │ def half(self, *args): │
│ 1814 │ │ # Checks if the model has been loaded in 8-bit │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1145 in to │
│ │
│ 1142 │ │ │ │ │ │ │ non_blocking, memory_format=convert_to_format) │
│ 1143 │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() else No │
│ 1144 │ │ │
│ ❱ 1145 │ │ return self._apply(convert) │
│ 1146 │ │
│ 1147 │ def register_full_backward_pre_hook( │
│ 1148 │ │ self, │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:797 in _apply │
│ │
│ 794 │ │
│ 795 │ def _apply(self, fn): │
│ 796 │ │ for module in self.children(): │
│ ❱ 797 │ │ │ module._apply(fn) │
│ 798 │ │ │
│ 799 │ │ def compute_should_use_set_data(tensor, tensor_applied): │
│ 800 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:797 in _apply │
│ │
│ 794 │ │
│ 795 │ def _apply(self, fn): │
│ 796 │ │ for module in self.children(): │
│ ❱ 797 │ │ │ module._apply(fn) │
│ 798 │ │ │
│ 799 │ │ def compute_should_use_set_data(tensor, tensor_applied): │
│ 800 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:797 in _apply │
│ │
│ 794 │ │
│ 795 │ def _apply(self, fn): │
│ 796 │ │ for module in self.children(): │
│ ❱ 797 │ │ │ module._apply(fn) │
│ 798 │ │ │
│ 799 │ │ def compute_should_use_set_data(tensor, tensor_applied): │
│ 800 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:797 in _apply │
│ │
│ 794 │ │
│ 795 │ def _apply(self, fn): │
│ 796 │ │ for module in self.children(): │
│ ❱ 797 │ │ │ module._apply(fn) │
│ 798 │ │ │
│ 799 │ │ def compute_should_use_set_data(tensor, tensor_applied): │
│ 800 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:797 in _apply │
│ │
│ 794 │ │
│ 795 │ def _apply(self, fn): │
│ 796 │ │ for module in self.children(): │
│ ❱ 797 │ │ │ module._apply(fn) │
│ 798 │ │ │
│ 799 │ │ def compute_should_use_set_data(tensor, tensor_applied): │
│ 800 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:820 in _apply │
│ │
│ 817 │ │ │ # track autograd history of param_applied
, so we have to use │
│ 818 │ │ │ # with torch.no_grad():
│
│ 819 │ │ │ with torch.no_grad(): │
│ ❱ 820 │ │ │ │ param_applied = fn(param) │
│ 821 │ │ │ should_use_set_data = compute_should_use_set_data(param, param_applied) │
│ 822 │ │ │ if should_use_set_data: │
│ 823 │ │ │ │ param.data = param_applied │
│ │
│ /home/user/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1143 in │
│ convert │
│ │
│ 1140 │ │ │ if convert_to_format is not None and t.dim() in (4, 5): │
│ 1141 │ │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() els │
│ 1142 │ │ │ │ │ │ │ non_blocking, memory_format=convert_to_format) │
│ ❱ 1143 │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() else No │
│ 1144 │ │ │
│ 1145 │ │ return self._apply(convert) │
│ 1146 │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
NotImplementedError: Cannot copy out of meta tensor; no data!
When we tried to run the line:
tokenizer = AutoTokenizer.from_pretrained("nlpcloud/instruct-gpt-j-fp16")
We got the error:
TypeError: unhashable type: 'dict'
It could be a library bug? How to fix this problem?
I was wandering if you could upload something to test with ggerganov/llama.cpp and oobabooga/text-generation-webui.
Any q4_0 or q8_0 GGML .bin file will do.
Thanks for your interesting work!
I followed your guideline to finetune the model and try it. But the saved models were only these stuff (image below):
So, when I loaded the PEFT model to use, it could not find the config.json of the model. How can I get over it?
Note that this is the checkpoint saved by Huggingface Trainer model during training. I canceled the training job so .save_pretrained() method in the end might not execute.
I receive this warning installing
git+https://github.com/zphang/transformers@c3dc391
Did not find branch or tag 'c3dc391', assuming revision or ref.
I guess it is related to a specific commit. Can I use one of those branches?
llama
llama_push
llama_export
Hello,
Really cool idea, sadly I do not speak Portuguese, so I cannot evaluate the quality. Do you feel that it is more consistent than using pure LLama model?
May I ask what translation service did you use? Deepl or something else?
ImportError: cannot import name 'LLaMAForCausalLM' from 'transformers' (/usr/local/lib/python3.9/dist-packages/transformers/init.py)
"We translated the alpaca_data.json to portuguese using ChatGPT. ....We paid around US$ 8.00 to translate the full dataset to portuguese."
The initial size of the data is approximately 20 million, and the cost of processing it with ChatGPT Turbo is $0.002 per 1,000 tokens. I am curious as to why the total cost is not close to $40.
By the way, I appreciate you sharing the excellent suggestion for fine-tuning.
acho que no código do notebook disponibilizado está com os nomes antigos das classes
from transformers import LLaMATokenizer, LLaMAForCausalLM
A nova atualização colocou o LLaMa como minusculo. Ficando assim:
from transformers import AutoTokenizer, AutoConfig, LlamaForCausalLM, LlamaTokenizer
Ola, gostaria de saber se existisse uma bolsa de pesquisa para tradução ser feita por brasileiros quantos isso custaria? Pensando que isso poderia ser usado em muitos modelos nacionais
I'm quite new to LLM world. I'm brazilian and decided to start with Cabrita. I managed to deal with several errors but i am stuck with this:
"Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you have set a value for max_memory
you should increase that. To have
an idea of the modules that are set on the CPU or RAM you can print model.hf_device_map."
I tried several solutions but none work. Is it a memory problem?
I have a MacBook Pro i7 16Gb (2018)
Greeting, I have employed the identical configuration on your Google Colab notebook to perform fine-tuning on the alpaca dataset. However, I have observed a notable disparity in the training duration, which is considerably longer compared to your findings. The training process took approximately 1 hour to complete 70 steps, and the estimated time for 3 epochs is around 23 hours. I am eager to receive your insights regarding my situation. Thank you.
MICRO_BATCH_SIZE = 4 # this could actually be 5 but i like powers of 2
BATCH_SIZE = 128
GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE
EPOCHS = 3 # we don't need 3 tbh
LEARNING_RATE = 3e-4 # the Karpathy constant
CUTOFF_LEN = 256 # 256 accounts for about 96% of the data
LORA_R = 8
LORA_ALPHA = 16
LORA_DROPOUT = 0.05
trainer = transformers.Trainer(
model=model,
train_dataset=data["train"],
args=transformers.TrainingArguments(
per_device_train_batch_size=MICRO_BATCH_SIZE,
gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
warmup_steps=100,
num_train_epochs=EPOCHS,
learning_rate=LEARNING_RATE,
fp16=True,
logging_steps=20,
output_dir="lora-alpaca",
save_total_limit=3,
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train(resume_from_checkpoint=False)
A declarative, efficient, and flexible JavaScript library for building user interfaces.
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
An Open Source Machine Learning Framework for Everyone
The Web framework for perfectionists with deadlines.
A PHP framework for web artisans
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
Some thing interesting about web. New door for the world.
A server is a program made to process requests and deliver data to clients.
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
Some thing interesting about visualization, use data art
Some thing interesting about game, make everyone happy.
We are working to build community through open source technology. NB: members must have two-factor auth.
Open source projects and samples from Microsoft.
Google ❤️ Open Source for everyone.
Alibaba Open Source for everyone
Data-Driven Documents codes.
China tencent open source team.