Kernels
wyldecat commited on
Commit
151bb5a
·
1 Parent(s): 7e4334d

feat: support sequence parallel with fused_add_rms_norm

Browse files
tests/test_fused_add_rms_norm_sequence_parallel.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import sys
3
+ from collections.abc import Sequence
4
+
5
+ import pytest
6
+ import torch
7
+ import torch.distributed as dist
8
+ from packaging import version
9
+ from torch.distributed.tensor.placement_types import (Partial, Placement,
10
+ Replicate, Shard)
11
+
12
+ import activation
13
+
14
+ from .utils import assert_close, opcheck
15
+
16
+ DTYPES = [torch.float32]
17
+ NUM_TOKENS = [512] # Arbitrary values for testing
18
+ SEQUENCE_DIMS = [0, 1] # 0 is for [T, D] (packed), 1 is for [B, S, D]
19
+ D = [16] # Arbitrary values for testing
20
+ SEEDS = [0]
21
+
22
+ from activation.parallel_style import ResidualSequenceParallel
23
+ from torch.distributed._tensor import DTensor
24
+ from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
25
+ from torch.distributed.tensor.parallel import parallelize_module
26
+
27
+
28
+ @pytest.fixture(scope="session", autouse=True)
29
+ def init_dist(request):
30
+ if version.parse(torch.__version__) < version.parse("2.8"):
31
+ pytest.skip("torch>=2.8.0 is required for sequence parallel")
32
+ return
33
+
34
+ try:
35
+ dist.init_process_group(backend="nccl")
36
+ torch.cuda.set_device(dist.get_rank() % torch.cuda.device_count())
37
+ except Exception as e:
38
+ print(f"Failed to initialize torch.distributed: {e}")
39
+ pytest.skip("Failed to initialize torch.distributed")
40
+
41
+ if dist.get_world_size() < 2:
42
+ pytest.skip("Need at least 2 processes in dist group. "
43
+ "You can run with `torchrun --nproc-per-node=2 "
44
+ "--local-ranks-filter 0 -m pytest "
45
+ "test_rms_norm_sequence_parallel.py`")
46
+
47
+ yield
48
+ dist.destroy_process_group()
49
+
50
+
51
+ class Model(torch.nn.Module):
52
+
53
+ def __init__(self, num_tokens, d) -> None:
54
+ super().__init__()
55
+ self.fused_add_rms_norm = activation.layers.FusedAddRMSNorm(d)
56
+
57
+ def forward(self, x: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
58
+ return self.fused_add_rms_norm(x, residual=residual)
59
+
60
+
61
+ @pytest.mark.parametrize("num_tokens", NUM_TOKENS)
62
+ @pytest.mark.parametrize("d", D)
63
+ @pytest.mark.parametrize("dtype", DTYPES)
64
+ @pytest.mark.parametrize("seed", SEEDS)
65
+ @pytest.mark.parametrize("sequence_dim", SEQUENCE_DIMS)
66
+ @pytest.mark.parametrize("x_requires_grad", [True, False])
67
+ @pytest.mark.parametrize("residual_requires_grad", [True, False])
68
+ def test_fused_add_rms_norm_sequence_parallel(
69
+ num_tokens: int,
70
+ d: int,
71
+ dtype: torch.dtype,
72
+ seed: int,
73
+ sequence_dim: int,
74
+ x_requires_grad: bool,
75
+ residual_requires_grad: bool,
76
+ ) -> None:
77
+ if num_tokens % dist.get_world_size() != 0:
78
+ # It hangs at `y.full_tensor()` if not divisible
79
+ pytest.skip("num_tokens must be divisible by world_size for sharding")
80
+
81
+ if not x_requires_grad and not residual_requires_grad:
82
+ pytest.skip("For now, at least one of x or residual must require grad")
83
+
84
+ random.seed(seed)
85
+ torch.manual_seed(seed)
86
+
87
+ num_ranks = dist.get_world_size()
88
+ rank = dist.get_rank()
89
+ mesh = init_device_mesh("cuda", (num_ranks, ), mesh_dim_names=("shard", ))
90
+
91
+ match sequence_dim:
92
+ case 0:
93
+ x_shape = (num_tokens, d)
94
+ case 1:
95
+ BATCH_SIZE = 2
96
+ x_shape = (BATCH_SIZE, num_tokens, d)
97
+ case _:
98
+ raise ValueError(f"Invalid sequence_dim: {sequence_dim}")
99
+
100
+ x = torch.randn(x_shape, dtype=dtype, requires_grad=x_requires_grad).cuda()
101
+ residual = torch.randn(x_shape,
102
+ dtype=dtype,
103
+ requires_grad=residual_requires_grad).cuda()
104
+ weight = torch.ones(d, dtype=dtype, requires_grad=True).cuda()
105
+ eps = 1e-05
106
+
107
+ if x_requires_grad:
108
+ x.retain_grad()
109
+ if residual_requires_grad:
110
+ residual.retain_grad()
111
+ weight.retain_grad()
112
+
113
+ # Copy x, weight for reference
114
+ x_ref = x.detach().clone().requires_grad_(True)
115
+ residual_ref = residual.detach().clone().requires_grad_(True)
116
+ weight_ref = weight.detach().clone().requires_grad_(True)
117
+
118
+ model_sharded = Model(num_tokens, d).to(dtype=dtype).cuda()
119
+ model_sharded.fused_add_rms_norm.weight = torch.nn.Parameter(weight)
120
+ parallelize_module(model_sharded, mesh, {
121
+ "fused_add_rms_norm":
122
+ ResidualSequenceParallel(sequence_dim=sequence_dim)
123
+ })
124
+
125
+ x_sharded = DTensor.from_local(
126
+ x.chunk(num_ranks, dim=sequence_dim)[rank].contiguous(),
127
+ placements=(Shard(sequence_dim), ),
128
+ device_mesh=mesh,
129
+ )
130
+ residual_sharded = DTensor.from_local(
131
+ residual.chunk(num_ranks, dim=sequence_dim)[rank].contiguous(),
132
+ placements=(Shard(sequence_dim), ),
133
+ device_mesh=mesh,
134
+ )
135
+
136
+ y, add_output = model_sharded(x_sharded, residual_sharded)
137
+
138
+ y_from_sharded = y.full_tensor()
139
+ add_output_from_sharded = add_output.full_tensor()
140
+
141
+ model_unsharded = Model(num_tokens, d).to(dtype=dtype).cuda()
142
+ model_unsharded.fused_add_rms_norm.weight = torch.nn.Parameter(weight_ref)
143
+
144
+ y_from_unsharded, add_output_from_unsharded = model_unsharded(
145
+ x_ref, residual_ref)
146
+
147
+ assert_close(y_from_sharded, y_from_unsharded)
148
+ assert_close(add_output_from_sharded, add_output_from_unsharded)
149
+
150
+ # Backward
151
+ y_grad = torch.randn_like(y_from_unsharded)
152
+ add_output_grad = torch.randn_like(add_output_from_unsharded)
153
+
154
+ (y_grad * y_from_sharded +
155
+ add_output_grad * add_output_from_sharded).sum().backward()
156
+ (y_grad * y_from_unsharded +
157
+ add_output_grad * add_output_from_unsharded).sum().backward()
158
+
159
+ weight_grad_from_sharded = model_sharded.fused_add_rms_norm.weight.grad._local_tensor
160
+ weight_grad_from_unsharded = model_unsharded.fused_add_rms_norm.weight.grad
161
+
162
+ assert (x.grad is None) ^ x_requires_grad
163
+ assert (residual.grad is None) ^ residual_requires_grad
164
+
165
+ torch.distributed.all_reduce(weight_grad_from_sharded,
166
+ op=torch.distributed.ReduceOp.SUM)
167
+
168
+ if x.grad is not None:
169
+ torch.distributed.all_reduce(x.grad, op=torch.distributed.ReduceOp.SUM)
170
+ assert_close(x.grad, x_ref.grad)
171
+ if residual.grad is not None:
172
+ torch.distributed.all_reduce(residual.grad,
173
+ op=torch.distributed.ReduceOp.SUM)
174
+ assert_close(residual.grad, residual_ref.grad)
175
+
176
+ assert_close(weight_grad_from_sharded, weight_grad_from_unsharded)
tests/test_rms_norm_sequence_parallel.py CHANGED
@@ -63,7 +63,7 @@ class Model(torch.nn.Module):
63
  @pytest.mark.parametrize("dtype", DTYPES)
64
  @pytest.mark.parametrize("seed", SEEDS)
65
  @pytest.mark.parametrize("sequence_dim", SEQUENCE_DIMS)
66
- def test_rms_norm(
67
  num_tokens: int,
68
  d: int,
69
  dtype: torch.dtype,
 
63
  @pytest.mark.parametrize("dtype", DTYPES)
64
  @pytest.mark.parametrize("seed", SEEDS)
65
  @pytest.mark.parametrize("sequence_dim", SEQUENCE_DIMS)
66
+ def test_rms_norm_sequence_parallel(
67
  num_tokens: int,
68
  d: int,
69
  dtype: torch.dtype,
torch-ext/activation/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
  import torch
2
 
3
- from . import layers
4
  from ._ops import ops
5
  from .poly_norm import FusedMulPolyNormFunction, PolyNormFunction
6
  from .rms_norm import FusedAddRMSNormFunction, RMSNormFunction
@@ -48,5 +48,6 @@ __all__ = [
48
  "rms_norm",
49
  "fused_add_rms_norm",
50
  "layers",
 
51
  "ops",
52
  ]
 
1
  import torch
2
 
3
+ from . import layers, parallel_style
4
  from ._ops import ops
5
  from .poly_norm import FusedMulPolyNormFunction, PolyNormFunction
6
  from .rms_norm import FusedAddRMSNormFunction, RMSNormFunction
 
48
  "rms_norm",
49
  "fused_add_rms_norm",
50
  "layers",
51
+ "parallel_style",
52
  "ops",
53
  ]
torch-ext/activation/fused_add_rms_norm_meta.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+
3
+ import torch
4
+ from torch.distributed.tensor._dtensor_spec import DTensorSpec
5
+ from torch.distributed.tensor._op_schema import (OpSchema, OpSpec, OpStrategy,
6
+ RuntimeSchemaInfo)
7
+ from torch.distributed.tensor._ops.utils import (generate_redistribute_costs,
8
+ register_op_strategy)
9
+ from torch.distributed.tensor.placement_types import (Placement, Replicate,
10
+ Shard)
11
+
12
+ from ._ops import ops
13
+
14
+
15
+ def register_fused_add_rms_norm_meta():
16
+ """Dummy function to register the meta functions.
17
+ Registration happens at import time by the decorators below.
18
+ """
19
+ pass
20
+
21
+
22
+ def _replicate_dims_start_at(placements: Sequence[Placement],
23
+ start_dim: int = 0) -> tuple[Placement, ...]:
24
+ new_placements: list[Placement] = []
25
+ for p in placements:
26
+ if p.is_partial() or (isinstance(p, Shard) and p.dim >= start_dim):
27
+ new_placements.append(Replicate()) # make it replicate
28
+ else:
29
+ new_placements.append(p) # keep the placement
30
+ return tuple(new_placements)
31
+
32
+
33
+ @register_op_strategy(ops.fused_add_rms_norm.default,
34
+ schema_info=RuntimeSchemaInfo(1))
35
+ def fused_add_rms_norm_strategy(op_schema: OpSchema) -> OpStrategy:
36
+ mesh = op_schema.get_mesh_from_args()
37
+
38
+ assert len(op_schema.args_schema) == 4
39
+ (
40
+ input_strategy,
41
+ residual_strategy,
42
+ weight_strategy,
43
+ _, # eps
44
+ ) = op_schema.args_schema
45
+
46
+ assert isinstance(input_strategy, OpStrategy)
47
+ assert isinstance(residual_strategy, OpStrategy)
48
+ assert isinstance(weight_strategy, OpStrategy)
49
+
50
+ lengths = {
51
+ "input": len(input_strategy.strategies),
52
+ "residual": len(residual_strategy.strategies),
53
+ "weight": len(weight_strategy.strategies),
54
+ }
55
+ assert len(set(
56
+ lengths.values())) == 1, f"Strategy length mismatch: {lengths}"
57
+
58
+ last_dim = input_strategy.ndim - 1
59
+ strategy = OpStrategy([])
60
+ for input, residual, weight in zip(input_strategy.strategies,
61
+ residual_strategy.strategies,
62
+ weight_strategy.strategies):
63
+
64
+ input_src = input.output_spec
65
+ residual_src = residual.output_spec
66
+ weight_src = weight.output_spec
67
+
68
+ assert isinstance(input_src, DTensorSpec)
69
+ assert isinstance(residual_src, DTensorSpec)
70
+ assert isinstance(weight_src, DTensorSpec)
71
+
72
+ redistribute_costs = []
73
+
74
+ # Input can be sharded in any dim except the last dim.
75
+ input_tgt = DTensorSpec(
76
+ mesh=mesh,
77
+ placements=_replicate_dims_start_at(input_src.placements,
78
+ last_dim),
79
+ tensor_meta=input_src.tensor_meta,
80
+ )
81
+ redistribute_costs.append(
82
+ generate_redistribute_costs(input_strategy, input_tgt))
83
+
84
+ # Residual add must have the same sharding as input.
85
+ residual_tgt = input_tgt
86
+ redistribute_costs.append(
87
+ generate_redistribute_costs(residual_strategy, residual_tgt))
88
+
89
+ # Weight cannot be sharded, so always replicate it.
90
+ weight_tgt = DTensorSpec(
91
+ mesh=mesh,
92
+ placements=(Replicate(), ),
93
+ tensor_meta=weight_src.tensor_meta,
94
+ )
95
+ redistribute_costs.append(
96
+ generate_redistribute_costs(weight_strategy, weight_tgt))
97
+
98
+ strategy.strategies.append(
99
+ OpSpec(
100
+ output_specs=[input_tgt, input_tgt],
101
+ input_specs=[input_tgt, residual_tgt, weight_tgt],
102
+ redistribute_cost=redistribute_costs,
103
+ ))
104
+ return strategy
105
+
106
+
107
+ @register_op_strategy(ops.fused_add_rms_norm_backward.default,
108
+ schema_info=RuntimeSchemaInfo(2))
109
+ def fused_add_rms_norm_backward_strategy(op_schema: OpSchema) -> OpStrategy:
110
+ mesh = op_schema.get_mesh_from_args()
111
+
112
+ assert len(op_schema.args_schema) == 6
113
+ (
114
+ output_grad_strategy,
115
+ add_output_grad_strategy,
116
+ add_output_strategy,
117
+ weight_strategy,
118
+ _, # eps
119
+ need_input_grad, # need_input_grad
120
+ ) = op_schema.args_schema
121
+
122
+ assert isinstance(output_grad_strategy, OpStrategy)
123
+ assert isinstance(add_output_grad_strategy, OpStrategy)
124
+ assert isinstance(add_output_strategy, OpStrategy)
125
+ assert isinstance(weight_strategy, OpStrategy)
126
+
127
+ lengths = {
128
+ "output_grad": len(output_grad_strategy.strategies),
129
+ "add_output_grad": len(add_output_grad_strategy.strategies),
130
+ "add_output": len(add_output_strategy.strategies),
131
+ "weight": len(weight_strategy.strategies),
132
+ }
133
+ assert len(set(
134
+ lengths.values())) == 1, f"Strategy length mismatch: {lengths}"
135
+
136
+ zipped = zip(
137
+ output_grad_strategy.strategies,
138
+ add_output_grad_strategy.strategies,
139
+ add_output_strategy.strategies,
140
+ weight_strategy.strategies,
141
+ )
142
+
143
+ last_dim = output_grad_strategy.ndim - 1
144
+ strategy = OpStrategy([])
145
+ for output_grad, add_output_grad, add_output, weight in zipped:
146
+ output_grad_src = output_grad.output_spec
147
+ add_output_grad_src = add_output_grad.output_spec
148
+ add_output_src = add_output.output_spec
149
+ weight_src = weight.output_spec
150
+
151
+ assert isinstance(output_grad_src, DTensorSpec)
152
+ assert isinstance(add_output_grad_src, DTensorSpec)
153
+ assert isinstance(add_output_src, DTensorSpec)
154
+ assert isinstance(weight_src, DTensorSpec)
155
+
156
+ redistribute_costs = []
157
+
158
+ # output grad can be sharded in any dim except the last dim.
159
+ output_grad_tgt = DTensorSpec(
160
+ mesh=mesh,
161
+ placements=_replicate_dims_start_at(output_grad_src.placements,
162
+ last_dim),
163
+ tensor_meta=output_grad_src.tensor_meta,
164
+ )
165
+ redistribute_costs.append(
166
+ generate_redistribute_costs(output_grad_strategy, output_grad_tgt))
167
+
168
+ # add_output_grad must have the same sharding as output_grad.
169
+ add_output_grad_tgt = output_grad_tgt
170
+ redistribute_costs.append(
171
+ generate_redistribute_costs(add_output_grad_strategy,
172
+ add_output_grad_tgt))
173
+
174
+ # add_output must have the same sharding as output_grad.
175
+ add_output_tgt = output_grad_tgt
176
+ redistribute_costs.append(
177
+ generate_redistribute_costs(add_output_strategy, add_output_tgt))
178
+
179
+ # Weight cannot be sharded, so always replicate it.
180
+ weight_tgt = DTensorSpec(
181
+ mesh=mesh,
182
+ placements=(Replicate(), ),
183
+ tensor_meta=weight_src.tensor_meta,
184
+ )
185
+ redistribute_costs.append(
186
+ generate_redistribute_costs(weight_strategy, weight_tgt))
187
+
188
+ strategy.strategies.append(
189
+ OpSpec(
190
+ output_specs=[
191
+ output_grad_tgt if need_input_grad else None, weight_tgt
192
+ ],
193
+ input_specs=[
194
+ output_grad_tgt, add_output_grad_tgt, add_output_tgt,
195
+ weight_tgt
196
+ ],
197
+ redistribute_cost=redistribute_costs,
198
+ ))
199
+ return strategy
torch-ext/activation/parallel_style.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from functools import partial
3
+ from typing import Any, Optional, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.distributed.tensor import (DeviceMesh, DTensor, Replicate, Shard,
8
+ distribute_module, distribute_tensor)
9
+ from torch.distributed.tensor.parallel import SequenceParallel
10
+ from torch.distributed.tensor.placement_types import Placement
11
+
12
+
13
+ class ResidualSequenceParallel(SequenceParallel):
14
+ """ Consider the case where we have a residual connection across a sequence parallel layer."""
15
+
16
+ @staticmethod
17
+ def _prepare_input_fn(sequence_sharding, mod, inputs, device_mesh):
18
+ input_tensor = inputs[0]
19
+ residual_tensor = inputs[1]
20
+
21
+ assert isinstance(input_tensor,
22
+ DTensor) == isinstance(residual_tensor, DTensor)
23
+ assert isinstance(input_tensor,
24
+ torch.Tensor) == isinstance(residual_tensor,
25
+ torch.Tensor)
26
+
27
+ if isinstance(input_tensor, DTensor):
28
+ # if the passed in input DTensor is not sharded on the sequence dim, we need to redistribute it
29
+ if input_tensor.placements != sequence_sharding:
30
+ input_tensor = input_tensor.redistribute(
31
+ placements=sequence_sharding, async_op=True)
32
+ if residual_tensor.placements != sequence_sharding:
33
+ residual_tensor = residual_tensor.redistribute(
34
+ placements=sequence_sharding, async_op=True)
35
+ return input_tensor, residual_tensor
36
+
37
+ elif isinstance(input_tensor, torch.Tensor):
38
+ # assume the input passed in already sharded on the sequence dim and create the DTensor
39
+ return DTensor.from_local(input_tensor,
40
+ device_mesh,
41
+ sequence_sharding,
42
+ run_check=False), DTensor.from_local(
43
+ residual_tensor,
44
+ device_mesh,
45
+ sequence_sharding,
46
+ run_check=False)
47
+ else:
48
+ raise ValueError(
49
+ f"expecting input of {mod} to be a torch.Tensor or DTensor, but got {input_tensor}"
50
+ )
torch-ext/activation/rms_norm.py CHANGED
@@ -91,10 +91,12 @@ def fused_add_rms_norm_abstract(x, residual, weight, eps):
91
  def fused_add_rms_norm_backward_abstract(output_grad, add_output_grad,
92
  add_output, weight, eps,
93
  need_input_grad: bool):
94
- return torch.empty_like(x) if need_input_grad else None, torch.empty_like(
95
- weight)
96
 
97
 
98
  if version.parse(torch.__version__) >= version.parse("2.8"):
 
99
  from .rms_norm_meta import register_rms_norm_meta
 
100
  register_rms_norm_meta()
 
91
  def fused_add_rms_norm_backward_abstract(output_grad, add_output_grad,
92
  add_output, weight, eps,
93
  need_input_grad: bool):
94
+ return torch.empty_like(
95
+ output_grad) if need_input_grad else None, torch.empty_like(weight)
96
 
97
 
98
  if version.parse(torch.__version__) >= version.parse("2.8"):
99
+ from .fused_add_rms_norm_meta import register_fused_add_rms_norm_meta
100
  from .rms_norm_meta import register_rms_norm_meta
101
+ register_fused_add_rms_norm_meta()
102
  register_rms_norm_meta()
torch-ext/activation/rms_norm_meta.py CHANGED
@@ -48,9 +48,10 @@ def rms_norm_strategy(op_schema: OpSchema) -> OpStrategy:
48
 
49
  last_dim = input_strategy.ndim - 1
50
  strategy = OpStrategy([])
51
- for idx in range(len(input_strategy.strategies)):
52
- input_src = input_strategy.strategies[idx].output_spec
53
- weight_src = weight_strategy.strategies[idx].output_spec
 
54
 
55
  assert isinstance(input_src, DTensorSpec)
56
  assert isinstance(weight_src, DTensorSpec)
@@ -102,16 +103,27 @@ def rms_norm_backward_strategy(op_schema: OpSchema) -> OpStrategy:
102
  assert isinstance(input_strategy, OpStrategy)
103
  assert isinstance(weight_strategy, OpStrategy)
104
 
105
- assert len(input_strategy.strategies) == len(weight_strategy.strategies)
106
- assert len(input_strategy.strategies) == len(
107
- output_grad_strategy.strategies)
 
 
 
 
 
 
 
 
 
 
 
108
 
109
  last_dim = input_strategy.ndim - 1
110
  strategy = OpStrategy([])
111
- for idx in range(len(input_strategy.strategies)):
112
- output_grad_src = output_grad_strategy.strategies[idx].output_spec
113
- input_src = input_strategy.strategies[idx].output_spec
114
- weight_src = weight_strategy.strategies[idx].output_spec
115
 
116
  assert isinstance(output_grad_src, DTensorSpec)
117
  assert isinstance(input_src, DTensorSpec)
@@ -119,7 +131,7 @@ def rms_norm_backward_strategy(op_schema: OpSchema) -> OpStrategy:
119
 
120
  redistribute_costs = []
121
 
122
- # Output grad and input can be sharded in any dim except the last dim.
123
  output_grad_tgt = DTensorSpec(
124
  mesh=mesh,
125
  placements=_replicate_dims_start_at(output_grad_src.placements,
@@ -128,12 +140,9 @@ def rms_norm_backward_strategy(op_schema: OpSchema) -> OpStrategy:
128
  )
129
  redistribute_costs.append(
130
  generate_redistribute_costs(output_grad_strategy, output_grad_tgt))
131
- input_tgt = DTensorSpec(
132
- mesh=mesh,
133
- placements=_replicate_dims_start_at(input_src.placements,
134
- last_dim),
135
- tensor_meta=input_src.tensor_meta,
136
- )
137
  redistribute_costs.append(
138
  generate_redistribute_costs(input_strategy, input_tgt))
139
 
 
48
 
49
  last_dim = input_strategy.ndim - 1
50
  strategy = OpStrategy([])
51
+ for input, weight in zip(input_strategy.strategies,
52
+ weight_strategy.strategies):
53
+ input_src = input.output_spec
54
+ weight_src = weight.output_spec
55
 
56
  assert isinstance(input_src, DTensorSpec)
57
  assert isinstance(weight_src, DTensorSpec)
 
103
  assert isinstance(input_strategy, OpStrategy)
104
  assert isinstance(weight_strategy, OpStrategy)
105
 
106
+ lengths = {
107
+ "output_grad": len(output_grad_strategy.strategies),
108
+ "input": len(input_strategy.strategies),
109
+ "weight": len(weight_strategy.strategies),
110
+ }
111
+
112
+ assert len(set(
113
+ lengths.values())) == 1, f"Strategies length mismatch {lengths}"
114
+
115
+ zipped = zip(
116
+ output_grad_strategy.strategies,
117
+ input_strategy.strategies,
118
+ weight_strategy.strategies,
119
+ )
120
 
121
  last_dim = input_strategy.ndim - 1
122
  strategy = OpStrategy([])
123
+ for output_grad, input, weight in zipped:
124
+ output_grad_src = output_grad.output_spec
125
+ input_src = input.output_spec
126
+ weight_src = weight.output_spec
127
 
128
  assert isinstance(output_grad_src, DTensorSpec)
129
  assert isinstance(input_src, DTensorSpec)
 
131
 
132
  redistribute_costs = []
133
 
134
+ # Output grad can be sharded in any dim except the last dim.
135
  output_grad_tgt = DTensorSpec(
136
  mesh=mesh,
137
  placements=_replicate_dims_start_at(output_grad_src.placements,
 
140
  )
141
  redistribute_costs.append(
142
  generate_redistribute_costs(output_grad_strategy, output_grad_tgt))
143
+
144
+ # Input must have the same sharding as output grad.
145
+ input_tgt = output_grad_tgt
 
 
 
146
  redistribute_costs.append(
147
  generate_redistribute_costs(input_strategy, input_tgt))
148