Skip to content

Commit

Permalink
[BugFix] Fix silu kernel (#661)
Browse files Browse the repository at this point in the history
Co-authored-by: sufubao <[email protected]>
  • Loading branch information
sufubao and sufubao authored Dec 11, 2024
1 parent 15237c6 commit a44b698
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions lightllm/models/llama/triton_kernel/silu_and_mul.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ def _silu_and_mul_kernel(
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
stride_input_m = stride_input_m.to(tl.int64)
stride_output_m = stride_output_m.to(tl.int64)
stride_input_m = tl.cast(stride_input_m, dtype=tl.int64)
stride_output_m = tl.cast(stride_output_m, dtype=tl.int64)

tid = tl.program_id(0)
input_m_offsets = tid * BLOCK_M + tl.arange(0, BLOCK_M)
Expand Down Expand Up @@ -53,7 +53,7 @@ def _silu_and_mul_kernel(
)


def silu_and_mul_fwd(input, output):
def silu_and_mul_fwd(input: torch.Tensor, output):
stride_input_m = input.stride(0)
stride_input_n = input.stride(1)
stride_output_m = output.stride(0)
Expand Down Expand Up @@ -88,13 +88,13 @@ def torch_silu_and_mul(input: torch.Tensor):
def test_silu_and_mul(M, N, dtype, device="cuda"):
# create data
X = torch.randn((M, N), dtype=dtype, device=device)

y_tri = torch.empty((M, N // 2), dtype=dtype, device=device)
# run
y_tri = silu_and_mul_fwd(X)
silu_and_mul_fwd(X, y_tri)
y_ref = torch_silu_and_mul(X)

# compare
print("type:", y_tri.dtype, y_ref.dtype)
print("max delta:", torch.max(torch.abs(y_tri - y_ref)))
assert torch.allclose(y_tri, y_ref, atol=1e-6, rtol=0)
assert torch.allclose(y_tri, y_ref, atol=1e-5, rtol=0)
return

0 comments on commit a44b698

Please sign in to comment.