@@ -406,7 +406,8 @@ enum shader_reduction_mode {
406406 SHADER_REDUCTION_MODE_COUNT,
407407};
408408
409- static constexpr uint32_t num_argsort_pipelines = 11;
409+ // Arbitrary limit for argsort size (about a million columns).
410+ static constexpr uint32_t num_argsort_pipelines = 21;
410411static constexpr uint32_t max_argsort_cols = 1 << (num_argsort_pipelines-1);
411412static constexpr uint32_t num_topk_moe_pipelines = 10;
412413
@@ -526,6 +527,7 @@ struct vk_device_struct {
526527 bool multi_add;
527528 bool shader_int64;
528529 bool buffer_device_address;
530+ bool vulkan_memory_model;
529531
530532 bool add_rms_fusion;
531533 uint32_t partials_binding_alignment;
@@ -539,6 +541,9 @@ struct vk_device_struct {
539541 uint32_t subgroup_max_size;
540542 bool subgroup_require_full_support;
541543
544+ // floor(log2(maxComputeWorkGroupInvocations))
545+ uint32_t max_workgroup_size_log2 {};
546+
542547 bool coopmat_support;
543548 bool coopmat_acc_f32_support {};
544549 bool coopmat_acc_f16_support {};
@@ -683,6 +688,7 @@ struct vk_device_struct {
683688 vk_pipeline pipeline_rope_multi_f32, pipeline_rope_multi_f16;
684689 vk_pipeline pipeline_rope_vision_f32, pipeline_rope_vision_f16;
685690 vk_pipeline pipeline_argsort_f32[num_argsort_pipelines];
691+ vk_pipeline pipeline_argsort_large_f32[num_argsort_pipelines];
686692 vk_pipeline pipeline_sum_rows_f32;
687693 vk_pipeline pipeline_argmax_f32;
688694 vk_pipeline pipeline_count_equal_i32;
@@ -1174,7 +1180,11 @@ struct vk_op_soft_max_push_constants {
11741180struct vk_op_argsort_push_constants {
11751181 uint32_t ncols;
11761182 uint32_t nrows;
1177- int32_t order;
1183+ uint32_t order;
1184+ uint32_t outer_start;
1185+ uint32_t outer_end;
1186+ uint32_t inner_start;
1187+ uint32_t inner_end;
11781188};
11791189
11801190struct vk_op_im2col_push_constants {
@@ -3891,7 +3901,14 @@ static void ggml_vk_load_shaders(vk_device& device) {
38913901 }
38923902
38933903 for (uint32_t i = 0; i < num_argsort_pipelines; ++i) {
3894- ggml_vk_create_pipeline2(device, device->pipeline_argsort_f32[i], "argsort_f32_"+std::to_string(i), argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1u<<i, 1, 1}, {1u<<i, i}, 1, true);
3904+ const uint32_t BLOCK_SIZE = 1u << std::min(i, device->max_workgroup_size_log2);
3905+ const uint32_t NCOLS_PADDED = 1u << i;
3906+ const uint32_t NCOLS_PADDED_LOG2 = i;
3907+ if (i <= device->max_workgroup_size_log2 &&
3908+ 2 * sizeof(int) * BLOCK_SIZE <= device->properties.limits.maxComputeSharedMemorySize) {
3909+ ggml_vk_create_pipeline2(device, device->pipeline_argsort_f32[i], "argsort_f32_"+std::to_string(i), argsort_f32_len, argsort_f32_data, "main", 3, sizeof(vk_op_argsort_push_constants), {BLOCK_SIZE, 1, 1}, {BLOCK_SIZE, NCOLS_PADDED, NCOLS_PADDED_LOG2}, 1, true);
3910+ }
3911+ ggml_vk_create_pipeline2(device, device->pipeline_argsort_large_f32[i], "argsort_large_f32_"+std::to_string(i), argsort_large_f32_len, argsort_large_f32_data, "main", 3, sizeof(vk_op_argsort_push_constants), {BLOCK_SIZE, 1, 1}, {BLOCK_SIZE, NCOLS_PADDED, NCOLS_PADDED_LOG2}, 1, true);
38953912 }
38963913
38973914 ggml_vk_create_pipeline(device, device->pipeline_argmax_f32, "argmax_f32", argmax_f32_len, argmax_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
@@ -4292,6 +4309,8 @@ static vk_device ggml_vk_get_device(size_t idx) {
42924309
42934310 device->integer_dot_product = device->integer_dot_product && shader_integer_dot_product_props.integerDotProduct4x8BitPackedSignedAccelerated;
42944311
4312+ device->max_workgroup_size_log2 = uint32_t(log2f(float(device->properties.limits.maxComputeWorkGroupInvocations)));
4313+
42954314 std::vector<vk::QueueFamilyProperties> queue_family_props = device->physical_device.getQueueFamilyProperties();
42964315
42974316 // Try to find a non-graphics compute queue and transfer-focused queues
@@ -4431,6 +4450,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
44314450
44324451 device->shader_int64 = device_features2.features.shaderInt64;
44334452 device->buffer_device_address = vk12_features.bufferDeviceAddress;
4453+ device->vulkan_memory_model = vk12_features.vulkanMemoryModel;
44344454
44354455 if (device->subgroup_size_control) {
44364456 device->subgroup_min_size = subgroup_size_control_props.minSubgroupSize;
@@ -8344,19 +8364,6 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
83448364 }
83458365 return nullptr;
83468366 }
8347- case GGML_OP_ARGSORT:
8348- if (ctx->num_additional_fused_ops) {
8349- uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
8350- GGML_ASSERT(idx < num_topk_moe_pipelines);
8351- topk_moe_mode mode = ggml_vk_num_additional_ops_to_topk_moe_mode(ctx->num_additional_fused_ops);
8352- return ctx->device->pipeline_topk_moe[idx][mode];
8353- }
8354-
8355- if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_I32) {
8356- uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0])));
8357- return ctx->device->pipeline_argsort_f32[idx];
8358- }
8359- return nullptr;
83608367 case GGML_OP_SUM:
83618368 case GGML_OP_SUM_ROWS:
83628369 case GGML_OP_MEAN:
@@ -8748,8 +8755,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
87488755 elements[2] = std::min(elements[2], ctx->device->properties.limits.maxComputeWorkGroupCount[2]);
87498756 break;
87508757 case GGML_OP_ARGSORT:
8751- elements = { (uint32_t)ne00, (uint32_t)ggml_nrows(src0), 1 };
8752- elements[1] = std::min(elements[1], ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
8758+ GGML_ASSERT(0);
87538759 break;
87548760 case GGML_OP_IM2COL:
87558761 {
@@ -9865,16 +9871,81 @@ static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context& subctx, cons
98659871}
98669872
98679873static void ggml_vk_argsort(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
9868- int32_t * op_params = (int32_t *)dst->op_params;
9874+ const uint32_t * op_params = (const uint32_t *)dst->op_params;
98699875
98709876 uint32_t ncols = src0->ne[0];
98719877 uint32_t nrows = ggml_nrows(src0);
98729878
9873- ggml_vk_op_f32<vk_op_argsort_push_constants>(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_ARGSORT, {
9874- ncols,
9875- nrows,
9876- op_params[0],
9877- });
9879+ uint32_t ncols_pad_log2 = (uint32_t)ceilf(log2f(float(ncols)));
9880+ uint32_t ncolsp2 = 1 << ncols_pad_log2;
9881+
9882+ vk_op_argsort_push_constants pc { ncols, nrows, op_params[0], 0, 0, 0, 0, };
9883+
9884+ // Use the "small" argsort shader if the whole sort can be done by a single workgroup.
9885+ bool use_small = ctx->device->pipeline_argsort_f32[ncols_pad_log2] != nullptr;
9886+
9887+ vk_pipeline pipeline = use_small ? ctx->device->pipeline_argsort_f32[ncols_pad_log2]
9888+ : ctx->device->pipeline_argsort_large_f32[ncols_pad_log2];
9889+
9890+ vk_subbuffer src0_buf = ggml_vk_tensor_subbuffer(ctx, src0);
9891+ vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst);
9892+ vk_subbuffer subbuf1 = dst_buf;
9893+
9894+ // Reserve space for ivec2 per element, with rows padded to a power of two
9895+ if (!use_small) {
9896+ const size_t x_sz = size_t{ncolsp2} * nrows * 2 * sizeof(int);
9897+
9898+ if (ctx->prealloc_size_x < x_sz) {
9899+ ctx->prealloc_size_x = x_sz;
9900+ ggml_vk_preallocate_buffers(ctx, subctx);
9901+ }
9902+ if (ctx->prealloc_x_need_sync) {
9903+ ggml_vk_sync_buffers(ctx, subctx);
9904+ }
9905+ subbuf1 = { ctx->prealloc_x, 0, ctx->prealloc_x->size };
9906+ }
9907+
9908+ std::array<uint32_t, 3> elements;
9909+
9910+ elements[0] = ncolsp2;
9911+ elements[1] = std::min((uint32_t)ggml_nrows(src0), ctx->device->properties.limits.maxComputeWorkGroupCount[1]);
9912+ elements[2] = 1;
9913+
9914+ // First dispatch initializes tmp_idx and does the first N passes where
9915+ // there is only communication between threads in the same workgroup.
9916+ {
9917+ vk_op_argsort_push_constants pc2 = pc;
9918+ pc2.outer_start = 0;
9919+ pc2.outer_end = std::min(ncols_pad_log2, ctx->device->max_workgroup_size_log2);
9920+ pc2.inner_start = 0;
9921+ pc2.inner_end = 100;
9922+ ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
9923+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, subbuf1, dst_buf }, pc2, elements);
9924+ }
9925+ if (!use_small) {
9926+ ggml_vk_sync_buffers(ctx, subctx);
9927+ // Loop over outer/inner passes, synchronizing between each pass.
9928+ for (uint32_t outer = ctx->device->max_workgroup_size_log2; outer < ncols_pad_log2; ++outer) {
9929+ for (uint32_t inner = 0; inner < outer + 1; ++inner) {
9930+ vk_op_argsort_push_constants pc2 = pc;
9931+ pc2.outer_start = outer;
9932+ pc2.outer_end = outer + 1;
9933+ pc2.inner_start = inner;
9934+ pc2.inner_end = inner + 1;
9935+ // When the inner idx is large enough, there's only communication
9936+ // within a workgroup. So the remaining inner iterations can all
9937+ // run in the same dispatch.
9938+ if (outer - inner < ctx->device->max_workgroup_size_log2) {
9939+ pc2.inner_end = 100;
9940+ inner = outer;
9941+ }
9942+ ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1);
9943+ ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { src0_buf, subbuf1, dst_buf }, pc2, elements);
9944+ ggml_vk_sync_buffers(ctx, subctx);
9945+ }
9946+ }
9947+ ctx->prealloc_x_need_sync = true;
9948+ }
98789949}
98799950
98809951static void ggml_vk_sum(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) {
@@ -13695,7 +13766,18 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1369513766 case GGML_OP_LOG:
1369613767 return op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16;
1369713768 case GGML_OP_ARGSORT:
13698- return op->ne[0] <= max_argsort_cols;
13769+ {
13770+ if (!ggml_is_contiguous(op) || !ggml_is_contiguous(op->src[0])) {
13771+ return false;
13772+ }
13773+ ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
13774+ auto device = ggml_vk_get_device(ctx->device);
13775+ if (device->vulkan_memory_model) {
13776+ return op->ne[0] <= max_argsort_cols;
13777+ } else {
13778+ return op->ne[0] <= (1 << device->max_workgroup_size_log2);
13779+ }
13780+ }
1369913781 case GGML_OP_UPSCALE:
1370013782 case GGML_OP_ACC:
1370113783 case GGML_OP_CONCAT:
0 commit comments