Skip to content

Commit

Permalink
cann: add quantize_fp16_q4_0
Browse files Browse the repository at this point in the history
  • Loading branch information
wangshuai09 committed Aug 2, 2024
1 parent 5af1609 commit f5aeac5
Show file tree
Hide file tree
Showing 6 changed files with 271 additions and 12 deletions.
3 changes: 1 addition & 2 deletions ggml/src/ggml-cann.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,6 @@ GGML_CALL static void* ggml_backend_cann_buffer_get_base(
GGML_CALL static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor,
const void* src,
void* dst) {
GGML_ASSERT(tensor->op == GGML_OP_NONE);

int64_t n_elems = ggml_nelements(tensor);
int64_t groups = n_elems / QK4_0;
Expand Down Expand Up @@ -679,7 +678,6 @@ GGML_CALL static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor,
*/
GGML_CALL static void ggml_backend_cann_transform_back_q4_0(
const ggml_tensor* tensor, void* src, void* dst) {
GGML_ASSERT(tensor->op == GGML_OP_NONE);

int64_t n_elems = ggml_nelements(tensor);
int64_t groups = n_elems / QK4_0;
Expand Down Expand Up @@ -1694,6 +1692,7 @@ GGML_CALL static bool ggml_backend_cann_supports_op(ggml_backend_t backend,
case GGML_TYPE_F32:
case GGML_TYPE_F16:
case GGML_TYPE_Q8_0:
case GGML_TYPE_Q4_0:
return true;
default:
return false;
Expand Down
7 changes: 7 additions & 0 deletions ggml/src/ggml-cann/aclnn_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -910,6 +910,13 @@ void ggml_cann_dup(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
((ggml_tensor*)dst->extra)->ne);
return;
}
if (dst->type == GGML_TYPE_Q4_0) {
aclrtlaunch_ascendc_quantize_f16_q4_0(
2, ctx.stream(), src->data, dst->data,
((ggml_tensor*)src->extra)->ne, ((ggml_tensor*)src->extra)->nb,
((ggml_tensor*)dst->extra)->ne);
return;
}
if (dst->type == GGML_TYPE_F16) {
if (ggml_are_same_shape(src, dst)) {
cann_copy(ctx, acl_src, acl_dst);
Expand Down
3 changes: 2 additions & 1 deletion ggml/src/ggml-cann/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ file(GLOB SRC_FILES
get_row_q8_0.cpp
quantize_f32_q8_0.cpp
quantize_f16_q8_0.cpp
quantize_f16_q4_0.cpp
dup.cpp
)

Expand All @@ -29,4 +30,4 @@ ascendc_library(ascendc_kernels STATIC
${SRC_FILES}
)

#ascendc_compile_definitions(ascendc_kernels PRIVATE -DASCENDC_DUMP)
# ascendc_compile_definitions(ascendc_kernels PRIVATE -DASCENDC_DUMP)
1 change: 1 addition & 0 deletions ggml/src/ggml-cann/kernels/ascendc_kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "aclrtlaunch_ascendc_quantize_f32_q8_0.h"
#include "aclrtlaunch_ascendc_quantize_f16_q8_0.h"
#include "aclrtlaunch_ascendc_quantize_f16_q4_0.h"

#include "aclrtlaunch_ascendc_dup_by_rows_fp16.h"
#include "aclrtlaunch_ascendc_dup_by_rows_fp32.h"
Expand Down
231 changes: 231 additions & 0 deletions ggml/src/ggml-cann/kernels/quantize_f16_q4_0.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,231 @@
#include "kernel_operator.h"

using namespace AscendC;

#define BUFFER_NUM 2
#define Group_Size 32

class QUANTIZE_F16_Q4_0 {
public:
__aicore__ inline QUANTIZE_F16_Q4_0() {}
__aicore__ inline void init(GM_ADDR input, GM_ADDR output,
int64_t *input_ne_ub, size_t *input_nb_ub,
int64_t *output_ne_ub) {
int64_t op_block_num = GetBlockNum();
int64_t op_block_idx = GetBlockIdx();

for (int i = 0; i < 4; i++) {
input_ne[i] = input_ne_ub[i];
input_stride[i] = input_nb_ub[i] / input_nb_ub[0];
output_ne[i] = output_ne_ub[i];
}

output_stride[0] = 1;
for (int i = 1; i < 4; i++) {
output_stride[i] = output_stride[i - 1] * output_ne[i - 1];
}

// scale saved one by one:. [group1_scale, group2_scale, ...]
scale_ne = input_ne;
scale_stride[0] = 1;
scale_stride[1] = input_ne[0] / Group_Size;
for (int i = 2; i < 4; i++) {
scale_stride[i] = scale_stride[i - 1] * scale_ne[i - 1];
}

// split input tensor by rows.
uint64_t nr = input_ne[1] * input_ne[2] * input_ne[3];
dr = nr / op_block_num;

uint64_t tails = nr % op_block_num;
if (op_block_idx < tails) {
dr += 1;
ir = dr * op_block_idx;
} else {
ir = dr * op_block_idx + tails;
}

group_size_in_row = scale_stride[1];
int64_t scale_offset = output_ne[0] * output_ne[1] * output_ne[2] *
output_ne[3] * sizeof(uint8_t) / 2;

input_gm.SetGlobalBuffer((__gm__ half *)input);
output_gm.SetGlobalBuffer((__gm__ int4b_t *)output);
scale_gm.SetGlobalBuffer((__gm__ half *)(output + scale_offset + ir *
group_size_in_row *
sizeof(half)));

pipe.InitBuffer(input_queue, BUFFER_NUM, Group_Size * sizeof(half));
pipe.InitBuffer(output_queue, BUFFER_NUM, Group_Size * sizeof(int4b_t));
pipe.InitBuffer(work_queue, 1, 32);
pipe.InitBuffer(max_queue, 1, 32);
pipe.InitBuffer(min_queue, 1, 32);
pipe.InitBuffer(scale_queue, 1, 32);
pipe.InitBuffer(int8_queue, 1, 32);
pipe.InitBuffer(cast_queue , 1 , Group_Size * sizeof(float));
}

__aicore__ inline void copy_in(uint32_t offset) {
LocalTensor<half> input_local = input_queue.AllocTensor<half>();
DataCopy(input_local, input_gm[offset], Group_Size);
input_queue.EnQue(input_local);
}

__aicore__ inline void copy_out(uint32_t offset) {
LocalTensor<int4b_t> output_local = output_queue.DeQue<int4b_t>();
DataCopy(output_gm[offset], output_local, Group_Size);
output_queue.FreeTensor(output_local);
}

__aicore__ inline half calculate_group(int64_t row, int64_t group) {
const int64_t i3 = row / (input_ne[1] * input_ne[2]);
const int64_t i2 = (row - i3 * input_ne[1] * input_ne[2]) / input_ne[1];
const int64_t i1 =
row - i3 * input_ne[1] * input_ne[2] - i2 * input_ne[1];

const int64_t input_offset = i1 * input_stride[1] +
i2 * input_stride[2] +
i3 * input_stride[3] + Group_Size * group;

const int64_t output_offset = i1 * output_stride[1] +
i2 * output_stride[2] +
i3 * output_stride[3] + Group_Size * group;

PRINTF("output offset %d \n", output_offset);
PRINTF("group %d \n", group);
PRINTF("i1 %d, i2 %d, i3 %d, output_stride1 %d\n", i1, i2, i3, output_stride[1]);

copy_in(input_offset);
LocalTensor<half> input_local = input_queue.DeQue<half>();
LocalTensor<int4b_t> output_local = output_queue.AllocTensor<int4b_t>();
LocalTensor<float> work_local = work_queue.AllocTensor<float>();
LocalTensor<float> max_local = max_queue.AllocTensor<float>();
LocalTensor<float> min_local = min_queue.AllocTensor<float>();
LocalTensor<float> cast_local = cast_queue.AllocTensor<float>();
LocalTensor<int8_t> int8_local = int8_queue.AllocTensor<int8_t>();

// TODO: OPTIMIZE
Cast(cast_local, input_local, RoundMode::CAST_NONE, Group_Size);
ReduceMax(max_local, cast_local, work_local, Group_Size);
ReduceMin(min_local, cast_local, work_local, Group_Size);
const float max_value = max_local.GetValue(0);
const float min_value = min_local.GetValue(0);
float d = max_value;
if (min_value < 0 && (-1 * min_value) > max_value) {
d = min_value;
}
PRINTF("d %f \n", d);
pipe_barrier(PIPE_ALL);
d = d / (-8);
if (d != 0) {
Muls(cast_local, cast_local, 1.0f / d, Group_Size);
}

//
Cast(input_local, cast_local, RoundMode::CAST_ROUND, Group_Size);
Cast(output_local, input_local, RoundMode::CAST_ROUND, Group_Size);

output_queue.EnQue(output_local);

//
PRINTF("output: ");
for(int i =0; i<32; i++) {
PRINTF("%f, ", cast_local.GetValue(i));
}
PRINTF("\n");
copy_out(output_offset);

input_queue.FreeTensor(input_local);
work_queue.FreeTensor(work_local);
max_queue.FreeTensor(max_local);
min_queue.FreeTensor(min_local);
int8_queue.FreeTensor(int8_local);
cast_queue.FreeTensor(cast_local);
return (half)d;
}

__aicore__ inline void calculate() {
LocalTensor<half> scale_local = scale_queue.AllocTensor<half>();
uint32_t scale_local_offset = 0;
uint32_t scale_global_offset = 0;
for (int64_t i = ir; i < ir + dr; i++) {
for (int64_t j = 0; j < group_size_in_row; j++) {
half scale = calculate_group(i, j);
scale_local.SetValue(scale_local_offset++, scale);
if (scale_local_offset == 16) {
scale_local_offset = 0;
// TODO: OPTIMIZE ME
pipe_barrier(PIPE_ALL);
DataCopy(scale_gm[scale_global_offset], scale_local, 16);
pipe_barrier(PIPE_ALL);
scale_global_offset += 16;
}
}
}

if (scale_local_offset != 0) {
pipe_barrier(PIPE_ALL);
DataCopyExtParams dataCopyParams;
dataCopyParams.blockCount = 1;
dataCopyParams.blockLen = scale_local_offset * sizeof(half);
DataCopyPad(scale_gm[scale_global_offset], scale_local,
dataCopyParams);
pipe_barrier(PIPE_ALL);
}
}

private:
int64_t input_ne[4];
size_t input_stride[4];

int64_t *scale_ne;
size_t scale_stride[4];

int64_t output_ne[4];
size_t output_stride[4];

int64_t group_size_in_row;

int64_t ir;
int64_t dr;

TPipe pipe;
GlobalTensor<half> input_gm;
GlobalTensor<half> scale_gm;
GlobalTensor<int4b_t> output_gm;
TQue<QuePosition::VECIN, BUFFER_NUM> input_queue;
TQue<QuePosition::VECOUT, BUFFER_NUM> output_queue;
TQue<QuePosition::VECIN, 1> work_queue;
TQue<QuePosition::VECOUT, 1> max_queue;
TQue<QuePosition::VECOUT, 1> min_queue;
TQue<QuePosition::VECOUT, 1> scale_queue;
TQue<QuePosition::VECOUT, 1> cast_queue;
TQue<QuePosition::VECOUT, 1> int8_queue;
TQue<QuePosition::VECOUT, 1> const_15_queue;

};

template <typename T>
__aicore__ inline void copy_to_ub(GM_ADDR gm, T *ub, size_t size) {
auto gm_ptr = (__gm__ uint8_t *)gm;
auto ub_ptr = (uint8_t *)(ub);
for (int32_t i = 0; i < size; ++i, ++ub_ptr, ++gm_ptr) {
*ub_ptr = *gm_ptr;
}
}

extern "C" __global__ __aicore__ void ascendc_quantize_f16_q4_0(
GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm,
GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) {
int64_t input_ne_ub[4];
size_t input_nb_ub[4];
int64_t output_ne_ub[4];

copy_to_ub(input_ne_gm, input_ne_ub, 32);
copy_to_ub(input_nb_gm, input_nb_ub, 32);
copy_to_ub(output_ne_gm, output_ne_ub, 32);

QUANTIZE_F16_Q4_0 op;
op.init(input_gm, output_gm, input_ne_ub, input_nb_ub, output_ne_ub);
op.calculate();
}
38 changes: 29 additions & 9 deletions tests/test-backend-ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,24 @@ struct test_case {
std::vector<float> f1 = tensor_to_float(t1);
std::vector<float> f2 = tensor_to_float(t2);

printf("f1:\n ");
for (int i=0; i<128; i++) {
printf("%f,", f1[i]);
if ((i+1)%32==0) {
printf("\n");
}
}
printf("\n");
printf("f2: \n");
for (int i=0; i<128; i++) {
printf("%f,", f2[i]);
if ((i+1)%32==0) {
printf("\n");
}
}
printf("\n");
// printf("%f, %f \n", f1[0], f2[0]);

for (size_t i = 0; i < f1.size(); i++) {
// check for nans
if (std::isnan(f1[i]) || std::isnan(f2[i])) {
Expand Down Expand Up @@ -2170,17 +2188,19 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3}));
test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3}));

for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
for (ggml_type type_dst : all_types) {
test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows
}
}
for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
for (ggml_type type_dst : {GGML_TYPE_F16, GGML_TYPE_F32}) {
test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
for (ggml_type type_src : {GGML_TYPE_F16}) {
for (ggml_type type_dst : {GGML_TYPE_Q4_0}) {
// test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
// test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 1, 1, 1}));
test_cases.emplace_back(new test_cpy(type_src, type_dst, {32, 4, 1, 1}));
// test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows
}
}
// for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
// for (ggml_type type_dst : {GGML_TYPE_F16, GGML_TYPE_F32}) {
// test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
// }
// }

test_cases.emplace_back(new test_cont());

Expand Down

0 comments on commit f5aeac5

Please sign in to comment.