diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 2245113c9c..d6920865e5 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -285,6 +285,7 @@ def forward(ctx, input, weight, bias, gradient_accumulation_fusion, @staticmethod @custom_bwd def backward(ctx, grad_output): + args = get_args() input, weight = ctx.saved_tensors use_bias = ctx.use_bias @@ -368,7 +369,12 @@ def backward(ctx, grad_output): # grad_weight = grad_output.t().matmul(total_input) from megatron.core.tensor_parallel.weight_grad_store import WeightGradStore WeightGradStore.put(total_input, grad_output, weight, gradientUpdateFunction) - grad_weight = None + + if args.enable_zbh1_pipeline: + grad_weight = None + else: + grad_weight = weight.grad + grad_bias = grad_output.sum(dim=0) if use_bias else None if ctx.sequence_parallel: