-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
66 lines (45 loc) · 1.56 KB
/
train.py
File metadata and controls
66 lines (45 loc) · 1.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
"""
Run with: torchrun --nproc_per_node=2 train.py
"""
import os
import torch
import torch.distributed as dist
from transformers import LlamaConfig, LlamaForCausalLM
from src.fsdp import fully_shard, FSDPModule
def main():
# Get distributed settings from environment (set by torchrun)
local_rank = int(os.environ.get("LOCAL_RANK"))
rank = int(os.environ.get("RANK"))
world_size = int(os.environ.get("WORLD_SIZE"))
backend = "nccl"
torch.cuda.set_device(local_rank)
device = torch.device(f"cuda:{local_rank}")
dist.init_process_group(backend, rank=rank, world_size=world_size, device_id=device)
print(
f"[Rank {rank}] Local rank: {local_rank}, Global rank: {rank}, World size: {world_size}"
)
torch.manual_seed(42)
config = LlamaConfig()
model = LlamaForCausalLM(config)
base_model = model.model
for layer in base_model.layers:
fully_shard(layer)
fully_shard(base_model.embed_tokens)
fully_shard(base_model.norm)
fully_shard(base_model.rotary_emb)
fully_shard(model.lm_head)
model.to(device=device)
optim = torch.optim.Adam(model.parameters(), lr=1e-3)
print(f"✓ Training started")
for step in range(5):
x = torch.randint(0, config.vocab_size, (1, 512), device=device)
optim.zero_grad()
output = model(x).logits
loss = output.sum()
loss.backward()
optim.step()
print(f"✓ Training completed, final output shape: {output.shape}")
dist.barrier()
dist.destroy_process_group()
if __name__ == "__main__":
main()