mirror of
https://github.com/RYDE-WORK/llama.cpp.git
synced 2026-01-28 19:13:15 +08:00
* rwkv6: rename to wkv6 * rwkv6: support avx2 avx512 armv8 armv9 * rwkv6: update cuda file name * rwkv6: rename params * wkv on sycl * sycl: add some ops * sycl: Enhance OP support judgment * wkv6: drop armv9 and tranfer to GGML style ggml-ci * sync : ggml * update the function to use appropriate types * fix define error * Update ggml/src/ggml-cpu.c * add appropriate asserts * move element-wise functions outside * put the declaration outside the loop * rewrite to be more inline with the common pattern for distributing threads * use recommended way GGML_TENSOR_LOCALS --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: Diego Devesa <slarengh@gmail.com> Co-authored-by: Plamen Minev <pacominev@gmail.com> Co-authored-by: Yuri Khrustalev <ykhrustalev@users.noreply.github.com> Co-authored-by: Meng, Hengyu <airdldl@163.com>
56 lines
1.6 KiB
C++
56 lines
1.6 KiB
C++
#include <sycl/sycl.hpp>
|
|
#include "outprod.hpp"
|
|
|
|
|
|
void ggml_sycl_op_out_prod(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
|
|
const ggml_tensor* src1, ggml_tensor* dst) {
|
|
|
|
|
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
|
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
|
GGML_ASSERT(ggml_is_contiguous(dst));
|
|
|
|
GGML_TENSOR_BINARY_OP_LOCALS
|
|
|
|
// Get SYCL queue
|
|
dpct::queue_ptr stream = ctx.stream();
|
|
|
|
// Dimension checks
|
|
GGML_ASSERT(ne01 == ne11); // Inner dimensions must match
|
|
GGML_ASSERT(ne0 == ne00); // Output rows match src0 rows
|
|
GGML_ASSERT(ne1 == ne10); // Output cols match src1 cols
|
|
|
|
// Get data pointers
|
|
const float* src0_d = (const float*)src0->data;
|
|
const float* src1_d = (const float*)src1->data;
|
|
float* dst_d = (float*)dst->data;
|
|
|
|
// GEMM parameters
|
|
const float alpha = 1.0f;
|
|
const float beta = 0.0f;
|
|
|
|
// Handle transposition of src1
|
|
const bool src1_T = ggml_is_transposed(src1);
|
|
const oneapi::mkl::transpose src1_op =
|
|
src1_T ? oneapi::mkl::transpose::nontrans : oneapi::mkl::transpose::trans;
|
|
const int64_t ldb = (src1_T ? nb10 : nb11) / sizeof(float);
|
|
|
|
try {
|
|
// Perform matrix multiplication using oneMKL GEMM
|
|
oneapi::mkl::blas::gemm(*stream,
|
|
oneapi::mkl::transpose::nontrans, src1_op,
|
|
ne0, ne1, ne01,
|
|
alpha,
|
|
src0_d, ne00,
|
|
src1_d, ldb,
|
|
beta,
|
|
dst_d, ne0);
|
|
}
|
|
catch (sycl::exception const& exc) {
|
|
std::cerr << exc.what() << std::endl;
|
|
GGML_ASSERT(false);
|
|
}
|
|
}
|