forked from pytorch/FBGEMM
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Summary: Pull Request resolved: pytorch#3138 X-link: facebookresearch/FBGEMM#232 This diff adds a few more tuned shapes to our CK FP8 Kernel dispatch for better EMU1.6 performance. The before / after performance can be seen [here](https://docs.google.com/spreadsheets/d/1SAymyghA8V0ZXD1G7ButMy7GzohYogPISlQ6rHVtVdE/edit?usp=sharing). The quick summary is that we see small to medium improvements across all EMU shapes. E2E performance should improve a bit but not massively as it seems the heuristics did acceptably in this case. Reviewed By: mxz297 Differential Revision: D62761000 fbshipit-source-id: 378fabb7cb2e37ffc415b7e464044a266a24ec9c
- Loading branch information
1 parent
49fa9a5
commit 9634774
Showing
4 changed files
with
179 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
69 changes: 69 additions & 0 deletions
69
...ls/fp8_rowwise_128x128x32x128_32x32_2x1_8x16x1_8x16x1_1x16x1x8_4x4x1_1x1_intrawave_v2.hip
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include "fp8_rowwise_common.h" | ||
|
||
at::Tensor | ||
fp8_rowwise_128x128x32x128_32x32_2x1_8x16x1_8x16x1_1x16x1x8_4x4x1_1x1_intrawave_v2( | ||
at::Tensor XQ, | ||
at::Tensor WQ, | ||
at::Tensor x_scale, | ||
at::Tensor w_scale, | ||
at::Tensor Y) { | ||
// Check if this input needs to be padded. | ||
int M = size_to_dim_(XQ.dim() - 1, XQ.sizes()); | ||
int N = WQ.size(0); | ||
int K = WQ.size(1); | ||
bool pad = (M % 128 != 0) || (N % 32 != 0) || (K % 128 != 0); | ||
|
||
// This kernel seems optimal in the most purely compute bound tasks. | ||
if (pad) { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
128, | ||
128, | ||
32, | ||
128, | ||
32, | ||
32, | ||
2, | ||
1, | ||
S<8, 16, 1>, | ||
S<8, 16, 1>, | ||
S<1, 16, 1, 8>, | ||
S<4, 4, 1>, | ||
1, | ||
1, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v2>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} else { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
128, | ||
128, | ||
32, | ||
128, | ||
32, | ||
32, | ||
2, | ||
1, | ||
S<8, 16, 1>, | ||
S<8, 16, 1>, | ||
S<1, 16, 1, 8>, | ||
S<4, 4, 1>, | ||
1, | ||
1, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v2, | ||
ck::tensor_operation::device::GemmSpecialization::Default>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} | ||
} |
69 changes: 69 additions & 0 deletions
69
...ls/fp8_rowwise_256x256x256x64_16x16_8x8_4x64x1_4x64x1_1x32x1x8_8x8x1_1x2_intrawave_v3.hip
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include "fp8_rowwise_common.h" | ||
|
||
at::Tensor | ||
fp8_rowwise_256x256x256x64_16x16_8x8_4x64x1_4x64x1_1x32x1x8_8x8x1_1x2_intrawave_v3( | ||
at::Tensor XQ, | ||
at::Tensor WQ, | ||
at::Tensor x_scale, | ||
at::Tensor w_scale, | ||
at::Tensor Y) { | ||
// Check if this input needs to be padded. | ||
int M = size_to_dim_(XQ.dim() - 1, XQ.sizes()); | ||
int N = WQ.size(0); | ||
int K = WQ.size(1); | ||
bool pad = (M % 256 != 0) || (N % 256 != 0) || (K % 64 != 0); | ||
|
||
// This kernel seems optimal in the most purely compute bound tasks. | ||
if (pad) { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
256, | ||
256, | ||
256, | ||
64, | ||
16, | ||
16, | ||
8, | ||
8, | ||
S<4, 64, 1>, | ||
S<4, 64, 1>, | ||
S<1, 32, 1, 8>, | ||
S<8, 8, 1>, | ||
1, | ||
2, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v3>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} else { | ||
using DeviceGemmInstance = DeviceGemmHelper< | ||
256, | ||
256, | ||
256, | ||
64, | ||
16, | ||
16, | ||
8, | ||
8, | ||
S<4, 64, 1>, | ||
S<4, 64, 1>, | ||
S<1, 32, 1, 8>, | ||
S<8, 8, 1>, | ||
1, | ||
2, | ||
ck::BlockGemmPipelineScheduler::Intrawave, | ||
ck::BlockGemmPipelineVersion::v3, | ||
ck::tensor_operation::device::GemmSpecialization::Default>; | ||
// Run kernel instance. | ||
return f8f8bf16_rowwise_impl<DeviceGemmInstance>( | ||
XQ, WQ, x_scale, w_scale, Y); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters