在网络上尝试修改transformers版本号 同时修改模型中的config中为对应版本号也没有解决
{
"architectures": [
"TinyllmForCausalLM"
],
"attention_dropout": 0.0,
"hidden_act": "silu",
"hidden_size": 512,
"initializer_range": 0.02,
"intermediate_size": 1408,
"max_position_embeddings": 1024,
"model_type": "tinyllm",
"num_attention_heads": 8,
"num_hidden_layers": 8,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-06,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "float16",
"transformers_version": "4.42.3",
"use_cache": true,
"vocab_size": 64798
}
[rank0]: Traceback (most recent call last):
[rank0]: File "sft_train.py", line 193, in
[rank0]: main()
[rank0]: File "sft_train.py", line 149, in main
[rank0]: config = transformers.AutoConfig.from_pretrained(
[rank0]: File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/transformers/models/auto/configuration_auto.py", line 984, in from_pretrained
[rank0]: raise ValueError(
[rank0]: ValueError: The checkpoint you are trying to load has model type tinyllm but Transformers does not recognize this architecture. This could be because of an issue with the checkpoint, or because your version of Transformers is out of date.
E0708 15:05:53.145956 139943936660672 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 1131943) of binary: /home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/bin/python
Traceback (most recent call last):
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/bin/torchrun", line 8, in
sys.exit(main())
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/elastic/multiprocessing/errors/init.py", line 347, in wrapper
return f(*args, **kwargs)
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/run.py", line 879, in main
run(args)
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/run.py", line 870, in run
elastic_launch(
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/launcher/api.py", line 132, in call
return launch_agent(self._config, self._entrypoint, list(args))
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
sft_train.py FAILED
在网络上尝试修改transformers版本号 同时修改模型中的config中为对应版本号也没有解决
{
"architectures": [
"TinyllmForCausalLM"
],
"attention_dropout": 0.0,
"hidden_act": "silu",
"hidden_size": 512,
"initializer_range": 0.02,
"intermediate_size": 1408,
"max_position_embeddings": 1024,
"model_type": "tinyllm",
"num_attention_heads": 8,
"num_hidden_layers": 8,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-06,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "float16",
"transformers_version": "4.42.3",
"use_cache": true,
"vocab_size": 64798
}
[rank0]: Traceback (most recent call last):
[rank0]: File "sft_train.py", line 193, in
[rank0]: main()
[rank0]: File "sft_train.py", line 149, in main
[rank0]: config = transformers.AutoConfig.from_pretrained(
[rank0]: File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/transformers/models/auto/configuration_auto.py", line 984, in from_pretrained
[rank0]: raise ValueError(
[rank0]: ValueError: The checkpoint you are trying to load has model type
tinyllmbut Transformers does not recognize this architecture. This could be because of an issue with the checkpoint, or because your version of Transformers is out of date.E0708 15:05:53.145956 139943936660672 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 1131943) of binary: /home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/bin/python
Traceback (most recent call last):
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/bin/torchrun", line 8, in
sys.exit(main())
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/elastic/multiprocessing/errors/init.py", line 347, in wrapper
return f(*args, **kwargs)
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/run.py", line 879, in main
run(args)
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/run.py", line 870, in run
elastic_launch(
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/launcher/api.py", line 132, in call
return launch_agent(self._config, self._entrypoint, list(args))
File "/home/daichenrui2404/miniconda3/envs/TINY_LLM_ZH/lib/python3.8/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
sft_train.py FAILED