Skip to content

Commit a3d6741

Browse files
committed
Use llama-graph.h vertical alignment
1 parent dd9d318 commit a3d6741

File tree

1 file changed

+42
-36
lines changed

1 file changed

+42
-36
lines changed

src/models/models.h

Lines changed: 42 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -424,42 +424,48 @@ struct llm_build_qwen3vlmoe : public llm_graph_context {
424424
struct llm_build_qwen3next : public llm_graph_context_mamba {
425425
llm_build_qwen3next(const llama_model & model, const llm_graph_params & params);
426426
private:
427-
ggml_tensor * build_qwen3next_attention_layer( ggml_tensor * cur,
428-
ggml_tensor * inp_pos,
429-
llm_graph_input_attn_kv * inp_attn,
430-
const llama_model & model,
431-
int64_t n_embd_head,
432-
int il);
433-
ggml_tensor * build_qwen3next_linear_attn_layer(llm_graph_input_rs * inp,
434-
ggml_tensor * cur,
435-
const llama_model & model,
436-
const llama_ubatch & ubatch,
437-
ggml_tensor * causal_mask,
438-
ggml_tensor * identity,
439-
int il);
440-
ggml_tensor * build_layer_ffn( ggml_tensor * cur,
441-
const llama_model & model,
442-
int il);
443-
ggml_tensor * delta_net_unified( ggml_context * ctx,
444-
ggml_tensor * q,
445-
ggml_tensor * k,
446-
ggml_tensor * v,
447-
ggml_tensor * g,
448-
ggml_tensor * beta,
449-
ggml_tensor * state,
450-
ggml_tensor * causal_mask,
451-
ggml_tensor * identity,
452-
bool use_qk_l2norm,
453-
float eps_norm,
454-
int il);
455-
ggml_tensor * build_q3n_norm( ggml_tensor * input,
456-
ggml_tensor * weights,
457-
int layer);
458-
459-
ggml_tensor * build_q3n_gated_norm( ggml_tensor * input,
460-
ggml_tensor * weights,
461-
ggml_tensor * gate,
462-
int layer);
427+
ggml_tensor * build_qwen3next_attention_layer(
428+
ggml_tensor * cur,
429+
ggml_tensor * inp_pos,
430+
llm_graph_input_attn_kv * inp_attn,
431+
const llama_model & model,
432+
int64_t n_embd_head,
433+
int il);
434+
ggml_tensor * build_qwen3next_linear_attn_layer(
435+
llm_graph_input_rs * inp,
436+
ggml_tensor * cur,
437+
const llama_model & model,
438+
const llama_ubatch & ubatch,
439+
ggml_tensor * causal_mask,
440+
ggml_tensor * identity,
441+
int il);
442+
ggml_tensor * build_layer_ffn(
443+
ggml_tensor * cur,
444+
const llama_model & model,
445+
int il);
446+
ggml_tensor * delta_net_unified(
447+
ggml_context * ctx,
448+
ggml_tensor * q,
449+
ggml_tensor * k,
450+
ggml_tensor * v,
451+
ggml_tensor * g,
452+
ggml_tensor * beta,
453+
ggml_tensor * state,
454+
ggml_tensor * causal_mask,
455+
ggml_tensor * identity,
456+
bool use_qk_l2norm,
457+
float eps_norm,
458+
int il);
459+
ggml_tensor * build_q3n_norm(
460+
ggml_tensor * input,
461+
ggml_tensor * weights,
462+
int layer);
463+
464+
ggml_tensor * build_q3n_gated_norm(
465+
ggml_tensor * input,
466+
ggml_tensor * weights,
467+
ggml_tensor * gate,
468+
int layer);
463469
};
464470

465471
struct llm_build_qwen : public llm_graph_context {

0 commit comments

Comments
 (0)