summaryrefslogtreecommitdiff
path: root/ggml-metal.m
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-05-11 16:57:53 +0300
committerGeorgi Gerganov <ggerganov@gmail.com>2024-05-11 21:34:21 +0300
commit6aeff24f8b91e145e92d17ec7ce3adc4ef60b8e9 (patch)
treef629c2b776d15da789525b4752afb4b44fd6a441 /ggml-metal.m
parent325756d28df7d018a7bac424e1b3bc8acb4ecf07 (diff)
metal : fix indent (ggml/0)
Diffstat (limited to 'ggml-metal.m')
-rw-r--r--ggml-metal.m28
1 files changed, 14 insertions, 14 deletions
diff --git a/ggml-metal.m b/ggml-metal.m
index 66c398d5..28dec762 100644
--- a/ggml-metal.m
+++ b/ggml-metal.m
@@ -1195,24 +1195,24 @@ static enum ggml_status ggml_metal_graph_compute(
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_OP_CLAMP:
- {
- id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
+ {
+ id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
- float min;
- float max;
- memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float));
- memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float));
+ float min;
+ float max;
+ memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float));
+ memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float));
- [encoder setComputePipelineState:pipeline];
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
- [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
- [encoder setBytes:&min length:sizeof(min) atIndex:2];
- [encoder setBytes:&max length:sizeof(max) atIndex:3];
+ [encoder setComputePipelineState:pipeline];
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
+ [encoder setBytes:&min length:sizeof(min) atIndex:2];
+ [encoder setBytes:&max length:sizeof(max) atIndex:3];
- const int64_t n = ggml_nelements(dst);
+ const int64_t n = ggml_nelements(dst);
- [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
- } break;
+ [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
+ } break;
case GGML_OP_UNARY:
switch (ggml_get_unary_op(gf->nodes[i])) {
// we are not taking into account the strides, so for now require contiguous tensors