@@ -398,6 +398,7 @@ struct ggml_backend_opencl_context {
398
398
cl_kernel kernel_scale;
399
399
cl_kernel kernel_silu, kernel_silu_4;
400
400
cl_kernel kernel_gelu, kernel_gelu_4;
401
+ cl_kernel kernel_gelu_erf, kernel_gelu_erf_4;
401
402
cl_kernel kernel_gelu_quick, kernel_gelu_quick_4;
402
403
cl_kernel kernel_relu;
403
404
cl_kernel kernel_sigmoid_f32, kernel_sigmoid_f16;
@@ -736,6 +737,8 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
736
737
737
738
CL_CHECK ((backend_ctx->kernel_gelu = clCreateKernel (backend_ctx->program_gelu , " kernel_gelu" , &err), err));
738
739
CL_CHECK ((backend_ctx->kernel_gelu_4 = clCreateKernel (backend_ctx->program_gelu , " kernel_gelu_4" , &err), err));
740
+ CL_CHECK ((backend_ctx->kernel_gelu_erf = clCreateKernel (backend_ctx->program_gelu , " kernel_gelu_erf" , &err), err));
741
+ CL_CHECK ((backend_ctx->kernel_gelu_erf_4 = clCreateKernel (backend_ctx->program_gelu , " kernel_gelu_erf_4" , &err), err));
739
742
CL_CHECK ((backend_ctx->kernel_gelu_quick = clCreateKernel (backend_ctx->program_gelu , " kernel_gelu_quick" , &err), err));
740
743
CL_CHECK ((backend_ctx->kernel_gelu_quick_4 = clCreateKernel (backend_ctx->program_gelu , " kernel_gelu_quick_4" , &err), err));
741
744
GGML_LOG_CONT (" ." );
@@ -2266,6 +2269,7 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
2266
2269
case GGML_UNARY_OP_GELU:
2267
2270
case GGML_UNARY_OP_SILU:
2268
2271
case GGML_UNARY_OP_RELU:
2272
+ case GGML_UNARY_OP_GELU_ERF:
2269
2273
case GGML_UNARY_OP_GELU_QUICK:
2270
2274
return ggml_is_contiguous (op->src [0 ]) && op->src [0 ]->type == GGML_TYPE_F32;
2271
2275
case GGML_UNARY_OP_SIGMOID:
@@ -3870,6 +3874,44 @@ static void ggml_cl_gelu(ggml_backend_t backend, const ggml_tensor * src0, const
3870
3874
backend_ctx->enqueue_ndrange_kernel (kernel, 3 , global_work_size, local_work_size, dst);
3871
3875
}
3872
3876
3877
+ static void ggml_cl_gelu_erf (ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
3878
+ GGML_ASSERT (src0);
3879
+ GGML_ASSERT (src0->extra );
3880
+ GGML_ASSERT (dst);
3881
+ GGML_ASSERT (dst->extra );
3882
+
3883
+ UNUSED (src1);
3884
+
3885
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context ;
3886
+
3887
+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra ;
3888
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra ;
3889
+
3890
+ cl_ulong offset0 = extra0->offset + src0->view_offs ;
3891
+ cl_ulong offsetd = extrad->offset + dst->view_offs ;
3892
+
3893
+ cl_kernel kernel;
3894
+
3895
+ int n = ggml_nelements (dst);
3896
+
3897
+ if (n % 4 == 0 ) {
3898
+ kernel = backend_ctx->kernel_gelu_erf_4 ;
3899
+ n /= 4 ;
3900
+ } else {
3901
+ kernel = backend_ctx->kernel_gelu_erf ;
3902
+ }
3903
+
3904
+ CL_CHECK (clSetKernelArg (kernel, 0 , sizeof (cl_mem), &extra0->data_device ));
3905
+ CL_CHECK (clSetKernelArg (kernel, 1 , sizeof (cl_ulong), &offset0));
3906
+ CL_CHECK (clSetKernelArg (kernel, 2 , sizeof (cl_mem), &extrad->data_device ));
3907
+ CL_CHECK (clSetKernelArg (kernel, 3 , sizeof (cl_ulong), &offsetd));
3908
+
3909
+ size_t global_work_size[] = {(size_t )n, 1 , 1 };
3910
+ size_t local_work_size[] = {64 , 1 , 1 };
3911
+
3912
+ backend_ctx->enqueue_ndrange_kernel (kernel, 3 , global_work_size, local_work_size, dst);
3913
+ }
3914
+
3873
3915
static void ggml_cl_gelu_quick (ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
3874
3916
GGML_ASSERT (src0);
3875
3917
GGML_ASSERT (src0->extra );
@@ -6388,6 +6430,12 @@ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor
6388
6430
}
6389
6431
func = ggml_cl_gelu;
6390
6432
break ;
6433
+ case GGML_UNARY_OP_GELU_ERF:
6434
+ if (!any_on_device) {
6435
+ return false ;
6436
+ }
6437
+ func = ggml_cl_gelu_erf;
6438
+ break ;
6391
6439
case GGML_UNARY_OP_GELU_QUICK:
6392
6440
if (!any_on_device) {
6393
6441
return false ;
0 commit comments