From d44157864cf69dab2e7165f61558365efe53f2eb Mon Sep 17 00:00:00 2001 From: ruiqi Date: Fri, 21 Nov 2025 17:44:06 +0000 Subject: [PATCH 01/14] change default dumping dir for profiling to prevent profile hanging after layer 0 --- llama-tornado | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama-tornado b/llama-tornado index b59473f2..074e86bc 100755 --- a/llama-tornado +++ b/llama-tornado @@ -422,7 +422,7 @@ def create_parser() -> argparse.ArgumentParser: ) debug_group.add_argument( "--profiler-dump-dir", - default="/home/mikepapadim/repos/gpu-llama3.java/prof.json", + default="/home/ruiqi/GPULlama3.java/prof.json", help="Directory for profiler output", ) From c4a1f742c52d78f1a020d7ae7cd53642f1ace716 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Fri, 21 Nov 2025 17:44:28 +0000 Subject: [PATCH 02/14] modify path to point to a local up to date tornado --- set_paths | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/set_paths b/set_paths index fd807c5e..c61d735f 100644 --- a/set_paths +++ b/set_paths @@ -6,10 +6,10 @@ # Resolve root of this project (LLaMA3) and TornadoVM export LLAMA_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -export TORNADO_ROOT="${LLAMA_ROOT}/external/tornadovm" +export TORNADO_ROOT="/home/ruiqi/TornadoVM_OCL/TornadoVM" # Set the path to TornadoVM SDK binaries -export TORNADO_SDK="${TORNADO_ROOT}/bin/sdk" +export TORNADO_SDK="/home/ruiqi/TornadoVM_OCL/TornadoVM/bin/sdk" # Add TornadoVM and LLaMA bin directories to PATH export PATH="${PATH}:${TORNADO_SDK}:${LLAMA_ROOT}" From 3a2b55ca0bcb2a65ad44f611f7cc42df3162102e Mon Sep 17 00:00:00 2001 From: ruiqi Date: Fri, 21 Nov 2025 17:44:45 +0000 Subject: [PATCH 03/14] python venv --- external/tornadovm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/tornadovm b/external/tornadovm index e1d2d12e..6e29a5be 160000 --- a/external/tornadovm +++ b/external/tornadovm @@ -1 +1 @@ -Subproject commit e1d2d12e19f50a8e1d42f15aa0ab3c718bbed2c8 +Subproject commit 6e29a5be7d5e8a70dc780ad9ec5b140a0a09c9c6 From 316726022897ebd2d919767504b9c78321e902b5 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Thu, 27 Nov 2025 10:43:28 +0000 Subject: [PATCH 04/14] experimental changes trying to fuse reduction and map context in FFN layers --- .../TransformerComputeKernelsLayered.java | 35 ++++++++++-- .../layers/type/fp16/LlamaFP16FFNLayers.java | 57 +++++++++++++++++-- .../layers/type/fp16/Phi3FP16FFNLayers.java | 28 +++++++-- .../layers/type/fp16/Qwen2FP16FFNLayers.java | 25 +++++++- .../layers/type/fp16/Qwen3FP16FFNLayers.java | 13 +++-- .../layers/type/q8_0/LlamaQ8_0FFNLayers.java | 26 +++++++-- .../layers/type/q8_0/Phi3Q8_0FFNLayers.java | 28 +++++++-- .../layers/type/q8_0/Qwen2Q8_0FFNLayers.java | 32 ++++++++++- .../layers/type/q8_0/Qwen3Q8_0FFNLayers.java | 14 +++-- 9 files changed, 225 insertions(+), 33 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java index dfe4ef27..f8808a86 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java @@ -35,7 +35,7 @@ public TransformerComputeKernelsLayered() { * @param localMemSize * Size of local memory allocation (must match work group size) */ - public static void reductionOneBlockWithLayer(KernelContext context, FloatArray output, FloatArray x, int size, float ermsNorm, int localMemSize) { + public static void reductionOneBlockWithLayer(KernelContext context, FloatArray output, FloatArray x, FloatArray weights, FloatArray temp, int size, float ermsNorm, int localMemSize) { int gid = context.globalIdx; int lid = context.localIdx; int groupId = context.groupIdx; @@ -43,6 +43,7 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray // Allocate local memory with the provided size float[] localX = context.allocateFloatLocalArray(localMemSize); + //float[] localPartSum = context.allocateFloatLocalArray(size / localMemSize); // Load input value and compute square if (gid < size) { @@ -63,22 +64,46 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray // Each workgroup stores its partial sum in a different location if (lid == 0) { // Store the partial sum from each workgroup - output.set(groupId + 1, localX[0]); + //output.set(groupId, localX[0]); + temp.set(groupId, localX[0]); + //localPartSum[groupId] = localX[0]; + /*for (int i = 0;i < (size / localMemSize); i++){ + localPartSum[i] = localX[0]; + }*/ } // Only the first thread in the first workgroup computes the final normalization factor - if (gid == 0) { + /*if (gid == 0) { // Combine partial sums from all workgroups float ss = 0.0f; - for (int i = 1; i <= (size / localMemSize); i++) { // Assuming 8 workgroups - ss += output.get(i); + for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups + //ss += localPartSum[i]; + //ss += output.get(i); + ss += temp.get(i); } ss /= size; ss += ermsNorm; ss = 1.0f / TornadoMath.sqrt(ss); output.set(0, ss); // Store the final scale factor + }*/ + //output.set(gid, 0.0f); + //System.out.println(output.get(0)); + //float[] ss = context.allocateFloatLocalArray(size); + //ss[gid] = 0.0f; + for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups + //ss[gid] += temp.get(i); + output.set(gid, output.get(gid) + temp.get(i)); } + /*ss[gid] /= size; + ss[gid] += ermsNorm; + ss[gid] = 1.0f / TornadoMath.sqrt(ss[gid]); + output.set(gid, weights.get(gid) * (ss[gid] * x.get(gid)));*/ + output.set(gid, output.get(gid) / size); + output.set(gid, output.get(gid) + ermsNorm); + output.set(gid, 1.0f / TornadoMath.sqrt(output.get(gid))); + output.set(gid, weights.get(gid) * (output.get(gid) * x.get(gid))); + //output.set(gid, ss[gid]); // Store the final scale factor } /** diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java index 96acd650..b731f2a8 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java @@ -111,14 +111,39 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.w2Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); + /*unifiedLayer + .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize); + System.out.println("temp 0 value: " + state.temp.get(0)); + System.out.println("temp size: " + state.temp.getSize());*/ + //System.out.printf("%.2f", state.temp.get(0)); unifiedLayer - .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); + .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); + for (int i = 0; i < state.wrapXb.getSize();i++){ + System.out.println("wrapXb i value: " + state.wrapXb.get(i)); + } + //System.out.println(state.wrapXb); + System.out.println("wrapXb size: " + state.wrapXb.getSize()); if (shouldUseFinalNormalization()) { + System.out.println("reductionFinalNormalization run!"); unifiedLayer.task("reductionFinalNormalization", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.temp, config.dim(), config.rmsNormEps()); } - unifiedLayer.task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) - .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), + //unifiedLayer.task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp); + /*for (int i = 0; i < state.wrapXb.getSize();i++){ + System.out.println("wrapXb i value: " + state.wrapXb.get(i)); + }*/ + //System.out.println(state.wrapXb); + /*System.out.println("wrapXb size: " + state.wrapXb.getSize()); + unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), + LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), + LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapV, weights.wvLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), + LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("rope", TransformerComputeKernelsLayered::ropeRotation, context, state.positionHolder, state.wrapQ, state.wrapK, config.kvDim(), config.headSize()) + .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), + layerIndex, config.contextLength());*/ + unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) @@ -128,14 +153,34 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), layerIndex, config.contextLength()); configureAttention(unifiedLayer, layerIndex); + /*unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), + LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize); + System.out.println("tempFFN 0 size: " + state.tempFFN.get(0)); + System.out.println("tempFFN size: " + state.tempFFN.getSize());*/ unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize); + for (int i = 0; i < state.wrapXb.getSize();i++){ + System.out.println("wrapXb i value: " + state.wrapXb.get(i)); + } + //System.out.println(state.wrapXb); + System.out.println("wrapXb size: " + state.wrapXb.getSize()); if (shouldUseFinalNormalization()) { + System.out.println("reductionFinalNormalization run!"); unifiedLayer.task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.tempFFN, config.dim(), config.rmsNormEps()); } - unifiedLayer.task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN) - .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), + //unifiedLayer.task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN); + //System.out.println(state.wrapXb); + /*for (int i = 0; i < state.wrapXb.getSize();i++){ + System.out.println("wrapXb i value: " + state.wrapXb.get(i)); + }*/ + /*System.out.println("wrapXb size: " + state.wrapXb.getSize()); + unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), + weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), config.hiddenDim(), + config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX);*/ + unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX); diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java index 9f1c335a..46e92e90 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java @@ -166,7 +166,7 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); // RMSNorm for attention input - unifiedLayer.task("reductionsOneBlock", + /*unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, phi3State.temp, @@ -180,7 +180,17 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { phi3State.wrapXb, phi3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), - phi3State.temp); + phi3State.temp);*/ + unifiedLayer.task("reductionsOneBlock", + TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + context, + phi3State.wrapXb, + phi3State.wrapX, + weights.rms_att_weightLayered[layerIndex].asFloatArray(), + phi3State.temp, + phi3Config.dim(), + phi3Config.rmsNormEps(), + phi3State.localSize); // Combined QKV projection unifiedLayer.task("qkvmatmul", @@ -251,7 +261,7 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { LOCAL_WORK_GROUP_SIZE_ALLOC); // FFN section: RMSNorm - unifiedLayer.task("reductionsOneBlockFFN", + /*unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, phi3State.tempFFN, @@ -265,7 +275,17 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { phi3State.wrapXb, phi3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), - phi3State.tempFFN); + phi3State.tempFFN);*/ + unifiedLayer.task("reductionsOneBlockFFN", + TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + context, + phi3State.wrapXb, + phi3State.wrapX, + weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), + phi3State.tempFFN, + phi3Config.dim(), + phi3Config.rmsNormEps(), + phi3State.localSize); // FFN: combined Up and Gate projection (outputs 2 * hiddenDim) unifiedLayer.task("wGateUp", diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java index 858848ea..f122033c 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java @@ -178,7 +178,7 @@ TaskGraph setupSingleQwen2FFNLayer(Qwen2TornadoWeights weights, int layerIndex) weights.w3Layered[layerIndex].asHalfFloatArray()); // unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); // - unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.temp, qwen2State.wrapX, config.dim(), config.rmsNormEps(), + /*unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.temp, qwen2State.wrapX, config.dim(), config.rmsNormEps(), qwen2State.localSize) .task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen2State.temp) @@ -203,6 +203,29 @@ TaskGraph setupSingleQwen2FFNLayer(Qwen2TornadoWeights weights, int layerIndex) qwen2State.tempFFN) .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, qwen2State.wrapXb, qwen2State.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, qwen2State.wrapHb, qwen2State.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), + config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX);*/ + unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen2State.temp, config.dim(), config.rmsNormEps(), + qwen2State.localSize) + .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), + LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), + LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapV, weights.wvLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), + LOCAL_WORK_GROUP_SIZE_ALLOC).task("qbias", TransformerComputeKernelsLayered::addInPlace, qwen2State.wrapQ, weights.q_biasLayered[layerIndex].asFloatArray(), config.dim()) + .task("kbias", TransformerComputeKernelsLayered::addInPlace, qwen2State.wrapK, weights.k_biasLayered[layerIndex].asFloatArray(), config.kvDim()) + .task("vbias", TransformerComputeKernelsLayered::addInPlace, qwen2State.wrapV, weights.v_biasLayered[layerIndex].asFloatArray(), config.kvDim()) + .task("rope", Qwen3Kernels::ropeRotation, context, qwen2State.positionHolder, qwen2State.wrapQ, qwen2State.wrapK, config.numberOfKeyValueHeads(), config.headSize()) + .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, qwen2State.wrapKeyCache, qwen2State.wrapK, qwen2State.wrapValueCache, qwen2State.wrapV, qwen2State.positionHolder, + config.kvDim(), layerIndex, config.contextLength()) + .task("parallel-attention", Qwen2Kernels::processHeadsFlashAttention, context, qwen2State.wrapQ, qwen2State.wrapKeyCache, qwen2State.wrapValueCache, qwen2State.wrapXb, + config.numberOfHeads(), config.headSize(), config.kvDim(), config.kvMul(), qwen2State.positionHolder, layerIndex, config.contextLength()) + .task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, qwen2State.wrapXb, qwen2State.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), + config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen2State.tempFFN, config.dim(), config.rmsNormEps(), + qwen2State.localSize) + .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, qwen2State.wrapXb, qwen2State.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), + weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, qwen2State.wrapHb, qwen2State.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX); diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java index 379921c3..8c4ef7e6 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java @@ -193,9 +193,11 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) weights.w3Layered[layerIndex].asHalfFloatArray() // ); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.temp, qwen3State.wrapX, // in + /*unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.temp, qwen3State.wrapX, // in qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize).task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, qwen3State.wrapXb, // out - qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp); + qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp);*/ + unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp,// in + qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize); int qDim0 = nEmbdHeadK * qwen3Config.numberOfHeads(); int kvDim0 = nEmbdGqa; @@ -247,11 +249,14 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) qwen3Config.dim(), // dim0 = 1024 LOCAL_WORK_GROUP_SIZE_ALLOC); - unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.tempFFN, qwen3State.wrapX, qwen3Config.dim(), + /*unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.tempFFN, qwen3State.wrapX, qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize) .task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, qwen3State.tempFFN, qwen3Config.dim(), qwen3Config.rmsNormEps()) .task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), - qwen3State.tempFFN); + qwen3State.tempFFN);*/ + //FIXME: restore the reductionFinalNormalizationFFN module later + unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN, qwen3Config.dim(), + qwen3Config.rmsNormEps(), qwen3State.localSize); unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, qwen3State.wrapXb, qwen3State.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray(), qwen3Config.dim(), qwen3Config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java index a2d16830..b8e4e3f9 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java @@ -66,12 +66,13 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.woLayered[layerIndex].getScales(), weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w2Layered[layerIndex].getQuants(), weights.w2Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); + //unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); + unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); if (shouldUseFinalNormalization()) { unifiedLayer.task("reductionFinalNormalization", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.temp, config.dim(), config.rmsNormEps()); } - unifiedLayer.task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) + /*unifiedLayer.task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].getQuants(), @@ -79,20 +80,37 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapV, weights.wvLayered[layerIndex].getQuants(), weights.wvLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("rope", TransformerComputeKernelsLayered::ropeRotation, context, state.positionHolder, state.wrapQ, state.wrapK, config.kvDim(), config.headSize()) + .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), + layerIndex, config.contextLength());*/ + unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), + weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].getQuants(), + weights.wkLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapV, weights.wvLayered[layerIndex].getQuants(), + weights.wvLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("rope", TransformerComputeKernelsLayered::ropeRotation, context, state.positionHolder, state.wrapQ, state.wrapK, config.kvDim(), config.headSize()) .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), layerIndex, config.contextLength()); configureAttention(unifiedLayer, layerIndex); + /*unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), + weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize);*/ unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize); if (shouldUseFinalNormalization()) { unifiedLayer.task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.tempFFN, config.dim(), config.rmsNormEps()); } - unifiedLayer.task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN) + /*unifiedLayer.task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN) .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].getQuants(), + weights.w2Layered[layerIndex].getScales(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX);*/ + unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), + weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), + LOCAL_WORK_GROUP_SIZE_ALLOC) .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].getQuants(), weights.w2Layered[layerIndex].getScales(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX); return unifiedLayer; diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java index d4328a1d..f10d5a4d 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java @@ -150,7 +150,7 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); // RMSNorm for attention input - unifiedLayer.task("reductionsOneBlock", + /*unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, phi3State.temp, @@ -164,7 +164,17 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex phi3State.wrapXb, phi3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), - phi3State.temp); + phi3State.temp);*/ + unifiedLayer.task("reductionsOneBlock", + TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + context, + phi3State.wrapXb, + phi3State.wrapX, + weights.rms_att_weightLayered[layerIndex].asFloatArray(), + phi3State.temp, + phi3Config.dim(), + phi3Config.rmsNormEps(), + phi3State.localSize); // Combined QKV projection (quantized) unifiedLayer.task("qkvmatmul", @@ -237,7 +247,7 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex LOCAL_WORK_GROUP_SIZE_ALLOC); // FFN section: RMSNorm - unifiedLayer.task("reductionsOneBlockFFN", + /*unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, phi3State.tempFFN, @@ -251,7 +261,17 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex phi3State.wrapXb, phi3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), - phi3State.tempFFN); + phi3State.tempFFN);*/ + unifiedLayer.task("reductionsOneBlockFFN", + TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + context, + phi3State.wrapXb, + phi3State.wrapX, + weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), + phi3State.tempFFN, + phi3Config.dim(), + phi3Config.rmsNormEps(), + phi3State.localSize); // FFN: combined Up and Gate projection (outputs 2 * hiddenDim, quantized) unifiedLayer.task("wGateUp", diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java index b2d8d773..c05a7aef 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java @@ -187,7 +187,7 @@ TaskGraph setupSingleQwen2Q8_0FFNLayer(Qwen2TornadoWeights weights, int layerInd ); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, + /*unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize) .task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) @@ -220,6 +220,36 @@ TaskGraph setupSingleQwen2Q8_0FFNLayer(Qwen2TornadoWeights weights, int layerInd state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].getQuants(), weights.w2Layered[layerIndex].getScales(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .persistOnDevice( state.wrapX + );*/ + unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, + state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize) + .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, + state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, + state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].getQuants(), weights.wkLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, + state.wrapXb, state.wrapV, weights.wvLayered[layerIndex].getQuants(), weights.wvLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("qbias", TransformerComputeKernelsLayered::addInPlace, state.wrapQ, weights.q_biasLayered[layerIndex].asFloatArray(), config.dim()) + .task("kbias", TransformerComputeKernelsLayered::addInPlace, state.wrapK, weights.k_biasLayered[layerIndex].asFloatArray(), config.kvDim()) + .task("vbias", TransformerComputeKernelsLayered::addInPlace, state.wrapV, weights.v_biasLayered[layerIndex].asFloatArray(), config.kvDim()) + .task("rope", Qwen3Kernels::ropeRotation,context, state.positionHolder, state.wrapQ, state.wrapK, config.numberOfKeyValueHeads(), + config.headSize()) + .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, + state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), layerIndex, config.contextLength()) + .task("parallel-attention", Qwen2Kernels::processHeadsFlashAttention, context, + state.wrapQ, state.wrapKeyCache, state.wrapValueCache, state.wrapXb, + config.numberOfHeads(), config.headSize(), config.kvDim(), config.kvMul(), + state.positionHolder, layerIndex, config.contextLength()) + .task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, + state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, + state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize) + .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, + state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, + state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].getQuants(), weights.w2Layered[layerIndex].getScales(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) + .persistOnDevice( + state.wrapX ); return unifiedLayer; diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java index ba090bf5..474d894c 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java @@ -186,12 +186,15 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) // RMS norm for attention input - unifiedLayer.task("reductionsOneBlock", + /*unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.temp, qwen3State.wrapX, config.dim(), config.rmsNormEps(), qwen3State.localSize) .task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp); + context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp);*/ + unifiedLayer.task("reductionsOneBlock", + TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp, config.dim(), config.rmsNormEps(), qwen3State.localSize); // QKV projections with Qwen3 GQA dimensions // Q8_0 weights pass both quants and scales @@ -260,12 +263,15 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) // ========== FEED-FORWARD BLOCK ========== // RMS norm for FFN input - unifiedLayer.task("reductionsOneBlockFFN", + /*unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.tempFFN, qwen3State.wrapX, config.dim(), config.rmsNormEps(), qwen3State.localSize) .task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN); + context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN);*/ + unifiedLayer.task("reductionsOneBlockFFN", + TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN, config.dim(), config.rmsNormEps(), qwen3State.localSize); // Fused FFN: w1(x) ⊗ w3(x) with SiLU activation (Q8_0 weights) unifiedLayer.task("fused_ffn_w1_w3", From 48345ff14b4d101d4ffdd3c5f29e199102b70d09 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Fri, 28 Nov 2025 14:56:13 +0000 Subject: [PATCH 05/14] rms fuse opts --- .../TransformerComputeKernelsLayered.java | 45 ++++++++++++++++--- .../layers/type/fp16/LlamaFP16FFNLayers.java | 39 ++++++---------- 2 files changed, 52 insertions(+), 32 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java index f8808a86..5f70a026 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java @@ -70,7 +70,38 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray /*for (int i = 0;i < (size / localMemSize); i++){ localPartSum[i] = localX[0]; }*/ + } //can we remove the if statement here so every thread writes instead of only the first thread (might take up more mem space), and then change the for loop logic index below? + context.globalBarrier(); + + /*float[] localss = context.allocateFloatLocalArray(localMemSize); + localss[lid] = 0.0f; + for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups + //ss += localPartSum[i]; + localss[lid] += output.get(i); + //ss += temp.get(i); + } + localss[lid] /= size; + localss[lid] += ermsNorm; + localss[lid] = 1.0f / TornadoMath.sqrt(localss[lid]); + + context.localBarrier(); + + output.set(groupId * groupSize + lid, weights.get(groupId * groupSize + lid) * (localss[lid] * x.get(groupId * groupSize + lid)));*/ + float localss = 0.0f; + for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups + //ss += localPartSum[i]; + //localss += output.get(i); + localss += temp.get(i); } + localss /= size; + localss += ermsNorm; + localss = 1.0f / TornadoMath.sqrt(localss); + + //context.localBarrier(); + + output.set(groupId * groupSize + lid, weights.get(groupId * groupSize + lid) * (localss * x.get(groupId * groupSize + lid))); + //output.set(gid, weights.get(gid) * (localss * x.get(gid))); + // Only the first thread in the first workgroup computes the final normalization factor /*if (gid == 0) { @@ -78,8 +109,8 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray float ss = 0.0f; for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups //ss += localPartSum[i]; - //ss += output.get(i); - ss += temp.get(i); + ss += output.get(i); + //ss += temp.get(i); } ss /= size; @@ -87,22 +118,24 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray ss = 1.0f / TornadoMath.sqrt(ss); output.set(0, ss); // Store the final scale factor }*/ + //context.localBarrier(); + //output.set(gid, weights.get(gid) * (ss * x.get(gid))); //output.set(gid, 0.0f); //System.out.println(output.get(0)); //float[] ss = context.allocateFloatLocalArray(size); //ss[gid] = 0.0f; - for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups + /*for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups //ss[gid] += temp.get(i); output.set(gid, output.get(gid) + temp.get(i)); - } + }*/ /*ss[gid] /= size; ss[gid] += ermsNorm; ss[gid] = 1.0f / TornadoMath.sqrt(ss[gid]); output.set(gid, weights.get(gid) * (ss[gid] * x.get(gid)));*/ - output.set(gid, output.get(gid) / size); + /*output.set(gid, output.get(gid) / size); output.set(gid, output.get(gid) + ermsNorm); output.set(gid, 1.0f / TornadoMath.sqrt(output.get(gid))); - output.set(gid, weights.get(gid) * (output.get(gid) * x.get(gid))); + output.set(gid, weights.get(gid) * (output.get(gid) * x.get(gid)));*/ //output.set(gid, ss[gid]); // Store the final scale factor } diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java index b731f2a8..50928e9c 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java @@ -112,17 +112,17 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.w3Layered[layerIndex].asHalfFloatArray()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); /*unifiedLayer - .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize); - System.out.println("temp 0 value: " + state.temp.get(0)); - System.out.println("temp size: " + state.temp.getSize());*/ + .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize);*/ + //System.out.println("temp 0 value: " + state.temp.get(0)); + //System.out.println("temp size: " + state.temp.getSize()); //System.out.printf("%.2f", state.temp.get(0)); unifiedLayer .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); - for (int i = 0; i < state.wrapXb.getSize();i++){ + /*for (int i = 0; i < state.wrapXb.getSize();i++){ System.out.println("wrapXb i value: " + state.wrapXb.get(i)); } //System.out.println(state.wrapXb); - System.out.println("wrapXb size: " + state.wrapXb.getSize()); + System.out.println("wrapXb size: " + state.wrapXb.getSize());*/ if (shouldUseFinalNormalization()) { System.out.println("reductionFinalNormalization run!"); unifiedLayer.task("reductionFinalNormalization", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.temp, @@ -133,16 +133,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, System.out.println("wrapXb i value: " + state.wrapXb.get(i)); }*/ //System.out.println(state.wrapXb); - /*System.out.println("wrapXb size: " + state.wrapXb.getSize()); - unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), - LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), - LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapV, weights.wvLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), - LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("rope", TransformerComputeKernelsLayered::ropeRotation, context, state.positionHolder, state.wrapQ, state.wrapK, config.kvDim(), config.headSize()) - .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), - layerIndex, config.contextLength());*/ + //System.out.println("wrapXb size: " + state.wrapXb.getSize()); unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), @@ -155,17 +146,17 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, configureAttention(unifiedLayer, layerIndex); /*unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize); - System.out.println("tempFFN 0 size: " + state.tempFFN.get(0)); - System.out.println("tempFFN size: " + state.tempFFN.getSize());*/ + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize);*/ + //System.out.println("tempFFN 0 size: " + state.tempFFN.get(0)); + //System.out.println("tempFFN size: " + state.tempFFN.getSize()); unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize); - for (int i = 0; i < state.wrapXb.getSize();i++){ + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); + /*for (int i = 0; i < state.wrapXb.getSize();i++){ System.out.println("wrapXb i value: " + state.wrapXb.get(i)); } //System.out.println(state.wrapXb); - System.out.println("wrapXb size: " + state.wrapXb.getSize()); + System.out.println("wrapXb size: " + state.wrapXb.getSize());*/ if (shouldUseFinalNormalization()) { System.out.println("reductionFinalNormalization run!"); unifiedLayer.task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.tempFFN, config.dim(), config.rmsNormEps()); @@ -175,11 +166,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, /*for (int i = 0; i < state.wrapXb.getSize();i++){ System.out.println("wrapXb i value: " + state.wrapXb.get(i)); }*/ - /*System.out.println("wrapXb size: " + state.wrapXb.getSize()); - unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), - weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), config.hiddenDim(), - config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX);*/ + //System.out.println("wrapXb size: " + state.wrapXb.getSize()); unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), config.hiddenDim(), From f69e9d90f2f98cf572a76a7d656f2a83c7932003 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Mon, 1 Dec 2025 14:36:03 +0000 Subject: [PATCH 06/14] remove comments, refactor host code to reflect the merge of reduction and context mapping kernels --- .../TransformerComputeKernelsLayered.java | 62 +------------------ .../layers/type/fp16/LlamaFP16FFNLayers.java | 41 ------------ .../layers/type/fp16/Phi3FP16FFNLayers.java | 30 --------- .../layers/type/fp16/Qwen2FP16FFNLayers.java | 27 -------- .../layers/type/fp16/Qwen3FP16FFNLayers.java | 8 --- .../layers/type/q8_0/LlamaQ8_0FFNLayers.java | 28 --------- .../layers/type/q8_0/Phi3Q8_0FFNLayers.java | 30 --------- .../layers/type/q8_0/Qwen2Q8_0FFNLayers.java | 34 ---------- .../layers/type/q8_0/Qwen3Q8_0FFNLayers.java | 12 ---- 9 files changed, 1 insertion(+), 271 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java index 5f70a026..41dfa81f 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java @@ -43,7 +43,6 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray // Allocate local memory with the provided size float[] localX = context.allocateFloatLocalArray(localMemSize); - //float[] localPartSum = context.allocateFloatLocalArray(size / localMemSize); // Load input value and compute square if (gid < size) { @@ -64,79 +63,20 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray // Each workgroup stores its partial sum in a different location if (lid == 0) { // Store the partial sum from each workgroup - //output.set(groupId, localX[0]); temp.set(groupId, localX[0]); - //localPartSum[groupId] = localX[0]; - /*for (int i = 0;i < (size / localMemSize); i++){ - localPartSum[i] = localX[0]; - }*/ - } //can we remove the if statement here so every thread writes instead of only the first thread (might take up more mem space), and then change the for loop logic index below? - context.globalBarrier(); - - /*float[] localss = context.allocateFloatLocalArray(localMemSize); - localss[lid] = 0.0f; - for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups - //ss += localPartSum[i]; - localss[lid] += output.get(i); - //ss += temp.get(i); } - localss[lid] /= size; - localss[lid] += ermsNorm; - localss[lid] = 1.0f / TornadoMath.sqrt(localss[lid]); - context.localBarrier(); + context.globalBarrier(); - output.set(groupId * groupSize + lid, weights.get(groupId * groupSize + lid) * (localss[lid] * x.get(groupId * groupSize + lid)));*/ float localss = 0.0f; for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups - //ss += localPartSum[i]; - //localss += output.get(i); localss += temp.get(i); } localss /= size; localss += ermsNorm; localss = 1.0f / TornadoMath.sqrt(localss); - //context.localBarrier(); - output.set(groupId * groupSize + lid, weights.get(groupId * groupSize + lid) * (localss * x.get(groupId * groupSize + lid))); - //output.set(gid, weights.get(gid) * (localss * x.get(gid))); - - - // Only the first thread in the first workgroup computes the final normalization factor - /*if (gid == 0) { - // Combine partial sums from all workgroups - float ss = 0.0f; - for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups - //ss += localPartSum[i]; - ss += output.get(i); - //ss += temp.get(i); - } - - ss /= size; - ss += ermsNorm; - ss = 1.0f / TornadoMath.sqrt(ss); - output.set(0, ss); // Store the final scale factor - }*/ - //context.localBarrier(); - //output.set(gid, weights.get(gid) * (ss * x.get(gid))); - //output.set(gid, 0.0f); - //System.out.println(output.get(0)); - //float[] ss = context.allocateFloatLocalArray(size); - //ss[gid] = 0.0f; - /*for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups - //ss[gid] += temp.get(i); - output.set(gid, output.get(gid) + temp.get(i)); - }*/ - /*ss[gid] /= size; - ss[gid] += ermsNorm; - ss[gid] = 1.0f / TornadoMath.sqrt(ss[gid]); - output.set(gid, weights.get(gid) * (ss[gid] * x.get(gid)));*/ - /*output.set(gid, output.get(gid) / size); - output.set(gid, output.get(gid) + ermsNorm); - output.set(gid, 1.0f / TornadoMath.sqrt(output.get(gid))); - output.set(gid, weights.get(gid) * (output.get(gid) * x.get(gid)));*/ - //output.set(gid, ss[gid]); // Store the final scale factor } /** diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java index 50928e9c..0d975f33 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java @@ -111,29 +111,8 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.w2Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - /*unifiedLayer - .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize);*/ - //System.out.println("temp 0 value: " + state.temp.get(0)); - //System.out.println("temp size: " + state.temp.getSize()); - //System.out.printf("%.2f", state.temp.get(0)); unifiedLayer .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); - /*for (int i = 0; i < state.wrapXb.getSize();i++){ - System.out.println("wrapXb i value: " + state.wrapXb.get(i)); - } - //System.out.println(state.wrapXb); - System.out.println("wrapXb size: " + state.wrapXb.getSize());*/ - if (shouldUseFinalNormalization()) { - System.out.println("reductionFinalNormalization run!"); - unifiedLayer.task("reductionFinalNormalization", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.temp, - config.dim(), config.rmsNormEps()); - } - //unifiedLayer.task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp); - /*for (int i = 0; i < state.wrapXb.getSize();i++){ - System.out.println("wrapXb i value: " + state.wrapXb.get(i)); - }*/ - //System.out.println(state.wrapXb); - //System.out.println("wrapXb size: " + state.wrapXb.getSize()); unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), @@ -144,29 +123,9 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), layerIndex, config.contextLength()); configureAttention(unifiedLayer, layerIndex); - /*unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), - LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), config.dim(), config.rmsNormEps(), state.localSize);*/ - //System.out.println("tempFFN 0 size: " + state.tempFFN.get(0)); - //System.out.println("tempFFN size: " + state.tempFFN.getSize()); unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); - /*for (int i = 0; i < state.wrapXb.getSize();i++){ - System.out.println("wrapXb i value: " + state.wrapXb.get(i)); - } - //System.out.println(state.wrapXb); - System.out.println("wrapXb size: " + state.wrapXb.getSize());*/ - if (shouldUseFinalNormalization()) { - System.out.println("reductionFinalNormalization run!"); - unifiedLayer.task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.tempFFN, config.dim(), config.rmsNormEps()); - } - //unifiedLayer.task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN); - //System.out.println(state.wrapXb); - /*for (int i = 0; i < state.wrapXb.getSize();i++){ - System.out.println("wrapXb i value: " + state.wrapXb.get(i)); - }*/ - //System.out.println("wrapXb size: " + state.wrapXb.getSize()); unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), config.hiddenDim(), diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java index 46e92e90..a2f0830c 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java @@ -166,21 +166,6 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); // RMSNorm for attention input - /*unifiedLayer.task("reductionsOneBlock", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, - phi3State.temp, - phi3State.wrapX, - phi3Config.dim(), - phi3Config.rmsNormEps(), - phi3State.localSize) - .task("mapContext", - TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, - phi3State.wrapXb, - phi3State.wrapX, - weights.rms_att_weightLayered[layerIndex].asFloatArray(), - phi3State.temp);*/ unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, @@ -261,21 +246,6 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { LOCAL_WORK_GROUP_SIZE_ALLOC); // FFN section: RMSNorm - /*unifiedLayer.task("reductionsOneBlockFFN", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, - phi3State.tempFFN, - phi3State.wrapX, - phi3Config.dim(), - phi3Config.rmsNormEps(), - phi3State.localSize) - .task("mapContextFFN", - TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, - phi3State.wrapXb, - phi3State.wrapX, - weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), - phi3State.tempFFN);*/ unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java index f122033c..efe76355 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java @@ -178,33 +178,6 @@ TaskGraph setupSingleQwen2FFNLayer(Qwen2TornadoWeights weights, int layerIndex) weights.w3Layered[layerIndex].asHalfFloatArray()); // unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); // - /*unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.temp, qwen2State.wrapX, config.dim(), config.rmsNormEps(), - qwen2State.localSize) - .task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), - qwen2State.temp) - .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), - LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), - LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapV, weights.wvLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), - LOCAL_WORK_GROUP_SIZE_ALLOC).task("qbias", TransformerComputeKernelsLayered::addInPlace, qwen2State.wrapQ, weights.q_biasLayered[layerIndex].asFloatArray(), config.dim()) - .task("kbias", TransformerComputeKernelsLayered::addInPlace, qwen2State.wrapK, weights.k_biasLayered[layerIndex].asFloatArray(), config.kvDim()) - .task("vbias", TransformerComputeKernelsLayered::addInPlace, qwen2State.wrapV, weights.v_biasLayered[layerIndex].asFloatArray(), config.kvDim()) - .task("rope", Qwen3Kernels::ropeRotation, context, qwen2State.positionHolder, qwen2State.wrapQ, qwen2State.wrapK, config.numberOfKeyValueHeads(), config.headSize()) - .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, qwen2State.wrapKeyCache, qwen2State.wrapK, qwen2State.wrapValueCache, qwen2State.wrapV, qwen2State.positionHolder, - config.kvDim(), layerIndex, config.contextLength()) - .task("parallel-attention", Qwen2Kernels::processHeadsFlashAttention, context, qwen2State.wrapQ, qwen2State.wrapKeyCache, qwen2State.wrapValueCache, qwen2State.wrapXb, - config.numberOfHeads(), config.headSize(), config.kvDim(), config.kvMul(), qwen2State.positionHolder, layerIndex, config.contextLength()) - .task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, qwen2State.wrapXb, qwen2State.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), - config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.tempFFN, qwen2State.wrapX, config.dim(), config.rmsNormEps(), - qwen2State.localSize) - .task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), - qwen2State.tempFFN) - .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, qwen2State.wrapXb, qwen2State.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), - weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, qwen2State.wrapHb, qwen2State.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), - config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX);*/ unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen2State.temp, config.dim(), config.rmsNormEps(), qwen2State.localSize) .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java index 8c4ef7e6..e045d3c5 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java @@ -193,9 +193,6 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) weights.w3Layered[layerIndex].asHalfFloatArray() // ); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - /*unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.temp, qwen3State.wrapX, // in - qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize).task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, qwen3State.wrapXb, // out - qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp);*/ unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp,// in qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize); @@ -249,11 +246,6 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) qwen3Config.dim(), // dim0 = 1024 LOCAL_WORK_GROUP_SIZE_ALLOC); - /*unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.tempFFN, qwen3State.wrapX, qwen3Config.dim(), - qwen3Config.rmsNormEps(), qwen3State.localSize) - .task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, qwen3State.tempFFN, qwen3Config.dim(), qwen3Config.rmsNormEps()) - .task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), - qwen3State.tempFFN);*/ //FIXME: restore the reductionFinalNormalizationFFN module later unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN, qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize); diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java index b8e4e3f9..f8c87c7d 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java @@ -66,22 +66,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.woLayered[layerIndex].getScales(), weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w2Layered[layerIndex].getQuants(), weights.w2Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - //unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); - if (shouldUseFinalNormalization()) { - unifiedLayer.task("reductionFinalNormalization", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.temp, - config.dim(), config.rmsNormEps()); - } - /*unifiedLayer.task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) - .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), - weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].getQuants(), - weights.wkLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapV, weights.wvLayered[layerIndex].getQuants(), - weights.wvLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("rope", TransformerComputeKernelsLayered::ropeRotation, context, state.positionHolder, state.wrapQ, state.wrapK, config.kvDim(), config.headSize()) - .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), - layerIndex, config.contextLength());*/ unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].getQuants(), @@ -92,22 +77,9 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), layerIndex, config.contextLength()); configureAttention(unifiedLayer, layerIndex); - /*unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), - weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize);*/ unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize); - if (shouldUseFinalNormalization()) { - unifiedLayer.task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.tempFFN, - config.dim(), config.rmsNormEps()); - } - /*unifiedLayer.task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN) - .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), - weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), - LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].getQuants(), - weights.w2Layered[layerIndex].getScales(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX);*/ unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java index f10d5a4d..4960c090 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java @@ -150,21 +150,6 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); // RMSNorm for attention input - /*unifiedLayer.task("reductionsOneBlock", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, - phi3State.temp, - phi3State.wrapX, - phi3Config.dim(), - phi3Config.rmsNormEps(), - phi3State.localSize) - .task("mapContext", - TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, - phi3State.wrapXb, - phi3State.wrapX, - weights.rms_att_weightLayered[layerIndex].asFloatArray(), - phi3State.temp);*/ unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, @@ -247,21 +232,6 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex LOCAL_WORK_GROUP_SIZE_ALLOC); // FFN section: RMSNorm - /*unifiedLayer.task("reductionsOneBlockFFN", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, - phi3State.tempFFN, - phi3State.wrapX, - phi3Config.dim(), - phi3Config.rmsNormEps(), - phi3State.localSize) - .task("mapContextFFN", - TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, - phi3State.wrapXb, - phi3State.wrapX, - weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), - phi3State.tempFFN);*/ unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java index c05a7aef..f016604a 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java @@ -187,40 +187,6 @@ TaskGraph setupSingleQwen2Q8_0FFNLayer(Qwen2TornadoWeights weights, int layerInd ); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - /*unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, - state.wrapX, config.dim(), config.rmsNormEps(), state.localSize) - .task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, - state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) - .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, - state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, - state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].getQuants(), weights.wkLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("vmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, - state.wrapXb, state.wrapV, weights.wvLayered[layerIndex].getQuants(), weights.wvLayered[layerIndex].getScales(), config.dim(), config.kvDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("qbias", TransformerComputeKernelsLayered::addInPlace, state.wrapQ, weights.q_biasLayered[layerIndex].asFloatArray(), config.dim()) - .task("kbias", TransformerComputeKernelsLayered::addInPlace, state.wrapK, weights.k_biasLayered[layerIndex].asFloatArray(), config.kvDim()) - .task("vbias", TransformerComputeKernelsLayered::addInPlace, state.wrapV, weights.v_biasLayered[layerIndex].asFloatArray(), config.kvDim()) - .task("rope", Qwen3Kernels::ropeRotation,context, state.positionHolder, state.wrapQ, state.wrapK, config.numberOfKeyValueHeads(), - config.headSize()) - .task("copyToCaches", TransformerComputeKernelsLayered::copyToCache, - state.wrapKeyCache, state.wrapK, state.wrapValueCache, state.wrapV, state.positionHolder, config.kvDim(), layerIndex, config.contextLength()) - .task("parallel-attention", Qwen2Kernels::processHeadsFlashAttention, context, - state.wrapQ, state.wrapKeyCache, state.wrapValueCache, state.wrapXb, - config.numberOfHeads(), config.headSize(), config.kvDim(), config.kvMul(), - state.positionHolder, layerIndex, config.contextLength()) - .task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, - state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, - state.wrapX, config.dim(), config.rmsNormEps(), state.localSize) - .task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, - state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN) - .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, - state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, - state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].getQuants(), weights.w2Layered[layerIndex].getScales(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .persistOnDevice( - state.wrapX - );*/ unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize) .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java index 474d894c..2274959a 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java @@ -186,12 +186,6 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) // RMS norm for attention input - /*unifiedLayer.task("reductionsOneBlock", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, qwen3State.temp, qwen3State.wrapX, config.dim(), config.rmsNormEps(), qwen3State.localSize) - .task("mapContext", - TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp);*/ unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp, config.dim(), config.rmsNormEps(), qwen3State.localSize); @@ -263,12 +257,6 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) // ========== FEED-FORWARD BLOCK ========== // RMS norm for FFN input - /*unifiedLayer.task("reductionsOneBlockFFN", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, qwen3State.tempFFN, qwen3State.wrapX, config.dim(), config.rmsNormEps(), qwen3State.localSize) - .task("mapContextFFN", - TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN);*/ unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN, config.dim(), config.rmsNormEps(), qwen3State.localSize); From 7c1a63ef3c34777bbf36bbc7c8740326ba5957a7 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Tue, 9 Dec 2025 11:55:26 +0000 Subject: [PATCH 07/14] fix the fused reduction kernel and refactor the host code accordingly, now it should work on both OpenCL and PTX backends, with all the FP16 and Q8 models --- .../TransformerComputeKernelsLayered.java | 207 +++++++++++------- .../layers/type/fp16/LlamaFP16FFNLayers.java | 9 +- .../layers/type/fp16/Phi3FP16FFNLayers.java | 27 ++- .../layers/type/fp16/Qwen2FP16FFNLayers.java | 6 +- .../layers/type/fp16/Qwen3FP16FFNLayers.java | 7 +- .../layers/type/q8_0/LlamaQ8_0FFNLayers.java | 7 +- .../layers/type/q8_0/Phi3Q8_0FFNLayers.java | 6 +- .../layers/type/q8_0/Qwen2Q8_0FFNLayers.java | 6 +- .../layers/type/q8_0/Qwen3Q8_0FFNLayers.java | 6 +- 9 files changed, 160 insertions(+), 121 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java index 41dfa81f..1a80edc5 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java @@ -35,7 +35,8 @@ public TransformerComputeKernelsLayered() { * @param localMemSize * Size of local memory allocation (must match work group size) */ - public static void reductionOneBlockWithLayer(KernelContext context, FloatArray output, FloatArray x, FloatArray weights, FloatArray temp, int size, float ermsNorm, int localMemSize) { + + public static void reductionOneBlockWithLayer(KernelContext context, FloatArray output, FloatArray x, int size, float ermsNorm, int localMemSize) { int gid = context.globalIdx; int lid = context.localIdx; int groupId = context.groupIdx; @@ -63,37 +64,111 @@ public static void reductionOneBlockWithLayer(KernelContext context, FloatArray // Each workgroup stores its partial sum in a different location if (lid == 0) { // Store the partial sum from each workgroup - temp.set(groupId, localX[0]); + output.set(groupId + 1, localX[0]); } - context.globalBarrier(); + // Only the first thread in the first workgroup computes the final normalization factor + if (gid == 0) { + // Combine partial sums from all workgroups + float ss = 0.0f; + for (int i = 1; i <= (size / localMemSize); i++) { // Assuming 8 workgroups + ss += output.get(i); + } - float localss = 0.0f; - for (int i = 0; i < (size / localMemSize); i++) { // Assuming 8 workgroups - localss += temp.get(i); + ss /= size; + ss += ermsNorm; + ss = 1.0f / TornadoMath.sqrt(ss); + output.set(0, ss); // Store the final scale factor } - localss /= size; - localss += ermsNorm; - localss = 1.0f / TornadoMath.sqrt(localss); - - output.set(groupId * groupSize + lid, weights.get(groupId * groupSize + lid) * (localss * x.get(groupId * groupSize + lid))); } /** - * Applies the computed normalization factor to input and weight elements. This is the second phase of RMS normalization. + * Performs RMS (Root Mean Square) normalization using parallel reduction. It first computes the variance and scaling factor across all work groups, + * then it applies the computed normalization factor to input and weight elements. * + *

* Formula: output[i] = weight[i] * (normalizationFactor * x[i]) * + * Algorithm: 1. Each thread computes square of its input element 2. Work group performs parallel reduction of squares 3. Partial sums stored per work group 4. All thread combines all partial + * sums and computes normalization factor 5. Applies the computed normalization factor to input and weight elements. + * * @param context * Kernel execution context * @param output - * Array for normalized output + * Array to store partial sums and final normalization factor * @param x - * Input values to normalize + * Input array to normalize * @param weights * Weight values for each element * @param temp * Temporary array containing normalization factor at index 0 + * @param size + * Number of elements to process + * @param ermsNorm + * Epsilon value squared for numerical stability + * @param localMemSize + * Size of local memory allocation (must match work group size) + */ + + public static void reductionOneBlockWithLayerFuse(KernelContext context, FloatArray output, FloatArray x, FloatArray weights, FloatArray temp, int size, float ermsNorm, int localMemSize) { + int gid = context.globalIdx; + int lid = context.localIdx; + int groupId = context.groupIdx; + int groupSize = context.localGroupSizeX; + + // Allocate local memory with the provided size + float[] localX = context.allocateFloatLocalArray(localMemSize); + + // Load input value and compute square + if (gid < size) { + float v = x.get(gid); + localX[lid] = v * v; + } else { + localX[lid] = 0.0f; + } + + // Perform parallel reduction within the work group + for (int stride = (groupSize / 2); stride > 0; stride /= 2) { + context.localBarrier(); + if (lid < stride) { + localX[lid] += localX[lid + stride]; + } + } + + // Each workgroup stores its partial sum in a different location + if (lid == 0) { + // Store the partial sum from each workgroup + temp.set(groupId, localX[0]); + } + + context.globalBarrier(); + + float localss = 0.0f; + int numGroups = (size + groupSize - 1) / groupSize; + for (int i = 0; i < numGroups; i++) { // Assuming 8 workgroups + localss += temp.get(i); + } + localss /= size; + localss += ermsNorm; + localss = 1.0f / TornadoMath.sqrt(localss); + + if (gid < size) { + float in = x.get(gid); + float w = weights.get(gid); + output.set(gid, w * (localss * in)); + } + } + + /** + * Applies the computed normalization factor to input and weight elements. This is the second phase of RMS normalization. + *

+ * Formula: output[i] = weight[i] * (normalizationFactor * x[i]) + * + * @param context Kernel execution context + * @param output Array for normalized output + * @param x Input values to normalize + * @param weights Weight values for each element + * @param temp Temporary array containing normalization factor at index 0 */ public static void reductionOneBlock2WithLayer(KernelContext context, FloatArray output, FloatArray x, FloatArray weights, FloatArray temp) { int gid = context.globalIdx; @@ -104,25 +179,17 @@ public static void reductionOneBlock2WithLayer(KernelContext context, FloatArray /** * Copies keys and values into the key-value cache for attention computation. Enables efficient access to past key-value pairs during autoregressive generation. - * + *

* Cache layout: [layer][position][dimension] - Each layer has its own key and value cache - Each position in sequence has a key and value vector * - * @param destKeyCache - * Destination array for key cache - * @param srcKey - * Source keys to copy - * @param destValueCache - * Destination array for value cache - * @param srcValue - * Source values to copy - * @param positioNlayer - * Array containing current position - * @param kvDim - * Dimension of key/value vectors - * @param layer - * Current transformer layer index - * @param contextLength - * Maximum sequence length + * @param destKeyCache Destination array for key cache + * @param srcKey Source keys to copy + * @param destValueCache Destination array for value cache + * @param srcValue Source values to copy + * @param positioNlayer Array containing current position + * @param kvDim Dimension of key/value vectors + * @param layer Current transformer layer index + * @param contextLength Maximum sequence length */ public static void copyToCache(FloatArray destKeyCache, FloatArray srcKey, FloatArray destValueCache, FloatArray srcValue, IntArray positioNlayer, int kvDim, int layer, int contextLength) { @@ -158,21 +225,15 @@ public static void splitQKV(FloatArray qkv, FloatArray q, FloatArray k, FloatArr /** * Applies Rotary Position Encoding (RoPE) to query and key vectors. RoPE rotates pairs of dimensions based on their position in the sequence, enabling the model to learn relative positional * information. - * + *

* For each pair of dimensions (2*i, 2*i+1): - Compute rotation angle based on position and frequency - Apply 2D rotation to the pair * - * @param context - * Kernel execution context - * @param positionHolder - * Array containing current position - * @param sq - * Query vectors to rotate - * @param sk - * Key vectors to rotate - * @param kv_dim - * Dimension of key/value vectors - * @param head_size - * Dimension of each attention head + * @param context Kernel execution context + * @param positionHolder Array containing current position + * @param sq Query vectors to rotate + * @param sk Key vectors to rotate + * @param kv_dim Dimension of key/value vectors + * @param head_size Dimension of each attention head */ public static void ropeRotation(KernelContext context, IntArray positionHolder, FloatArray sq, FloatArray sk, int kv_dim, int head_size) { int i = context.globalIdx * 2; @@ -247,31 +308,20 @@ public static void ropeRotationPhi3(KernelContext context, IntArray positionHold /** * Computes attention for a single head. Implements scaled dot-product attention with softmax normalization. - * + *

* Steps: 1. Compute attention scores: Q·K / sqrt(head_size) 2. Apply softmax (with max subtraction for numerical stability) 3. Compute weighted sum of values * - * @param allQ - * All query vectors - * @param key_cache - * Cached keys - * @param value_cache - * Cached values - * @param allXb - * Output buffer - * @param h - * Head index to process - * @param headSize - * Dimension per head - * @param kvDim - * Key/value dimension - * @param kvMul - * Key multiplier for grouped attention - * @param loff - * Layer offset in cache - * @param pos - * Current position - * @param wrapAtt - * Attention weights buffer + * @param allQ All query vectors + * @param key_cache Cached keys + * @param value_cache Cached values + * @param allXb Output buffer + * @param h Head index to process + * @param headSize Dimension per head + * @param kvDim Key/value dimension + * @param kvMul Key multiplier for grouped attention + * @param loff Layer offset in cache + * @param pos Current position + * @param wrapAtt Attention weights buffer */ private static void processHeadTornado(FloatArray allQ, FloatArray key_cache, FloatArray value_cache, FloatArray allXb, int h, int headSize, int kvDim, int kvMul, long loff, int pos, FloatArray wrapAtt) { @@ -627,23 +677,16 @@ public static void processHeadsFlashAttentionOpt(KernelContext context, FloatArr /** * Performs optimized matrix-vector multiplication where each work group processes one row of the matrix. - * + *

* Algorithm: 1. Each work group handles one output dimension 2. Threads in work group compute partial dot products 3. Parallel reduction yields final row result * - * @param context - * Kernel execution context - * @param x - * Input vector - * @param hb - * Output vector - * @param w - * Weight matrix (row-major) - * @param n - * Input dimension - * @param d - * Output dimension - * @param localWorkGroupSize - * Number of threads per work group + * @param context Kernel execution context + * @param x Input vector + * @param hb Output vector + * @param w Weight matrix (row-major) + * @param n Input dimension + * @param d Output dimension + * @param localWorkGroupSize Number of threads per work group */ public static void matrixVectorGeneric(KernelContext context, FloatArray x, FloatArray hb, FloatArray w, int n, int d, int localWorkGroupSize) { // One row per workgroup (not per thread) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java index 0d975f33..6ddcdd18 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java @@ -31,6 +31,7 @@ public LlamaFP16FFNLayers(String taskGraph, State state, Weights weights, Config public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) { WorkerGrid ropeWorker = WorkerGridFactory.genericWorker(config.dim()/2, 128); WorkerGrid rmsNormWorker = WorkerGridFactory.createRmsNormWorker(config.dim(), 256); + //System.out.println("llama config dim: " + config.dim()); int configDimRowMajorGlobal = config.dim() * LOCAL_WORK_GROUP_SIZE_ALLOC; WorkerGrid configDimRowMajorGlobalWorker = WorkerGridFactory.genericWorker(configDimRowMajorGlobal, LOCAL_WORK_GROUP_SIZE_ALLOC); @@ -54,9 +55,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } @@ -112,7 +111,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.w3Layered[layerIndex].asHalfFloatArray()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); unifiedLayer - .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); + .task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].asHalfFloatArray(), config.dim(), config.kvDim(), @@ -125,8 +124,8 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, configureAttention(unifiedLayer, layerIndex); unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); - unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize); + unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapHb, state.wrapX, weights.w2Layered[layerIndex].asHalfFloatArray(), config.hiddenDim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC).persistOnDevice(state.wrapX); diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java index 7efa73e6..66628a0c 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Phi3FP16FFNLayers.java @@ -63,7 +63,6 @@ public Phi3FP16FFNLayers(String taskGraphName, Phi3State state, Phi3TornadoWeigh public GridScheduler updateGridScheduler(GridScheduler gridScheduler) { // RMS norm worker WorkerGrid rmsNormWorker = WorkerGridFactory.createRmsNormWorker(config.dim(), state.localSize); - // Combined QKV matmul worker int matmulQkvGlobal = opSize * LOCAL_WORK_GROUP_SIZE_ALLOC; WorkerGrid matmulQkvRowMajorWorker = WorkerGridFactory.genericWorker(matmulQkvGlobal, LOCAL_WORK_GROUP_SIZE_ALLOC); @@ -169,13 +168,18 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, - phi3State.wrapXb, - phi3State.wrapX, - weights.rms_att_weightLayered[layerIndex].asFloatArray(), phi3State.temp, + phi3State.wrapX, phi3Config.dim(), phi3Config.rmsNormEps(), - phi3State.localSize); + phi3State.localSize) + .task("mapContext", + TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, + context, + phi3State.wrapXb, + phi3State.wrapX, + weights.rms_att_weightLayered[layerIndex].asFloatArray(), + phi3State.temp); // Combined QKV projection unifiedLayer.task("qkvmatmul", @@ -249,13 +253,18 @@ TaskGraph setupSinglePhi3FFNLayer(Phi3TornadoWeights weights, int layerIndex) { unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, - phi3State.wrapXb, - phi3State.wrapX, - weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), phi3State.tempFFN, + phi3State.wrapX, phi3Config.dim(), phi3Config.rmsNormEps(), - phi3State.localSize); + phi3State.localSize) + .task("mapContextFFN", + TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, + context, + phi3State.wrapXb, + phi3State.wrapX, + weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), + phi3State.tempFFN); // FFN: combined Up and Gate projection (outputs 2 * hiddenDim) unifiedLayer.task("wGateUp", diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java index efe76355..d5400789 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen2FP16FFNLayers.java @@ -108,9 +108,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } @@ -178,7 +176,7 @@ TaskGraph setupSingleQwen2FFNLayer(Qwen2TornadoWeights weights, int layerIndex) weights.w3Layered[layerIndex].asHalfFloatArray()); // unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); // - unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen2State.temp, config.dim(), config.rmsNormEps(), + unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen2State.temp, config.dim(), config.rmsNormEps(), qwen2State.localSize) .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, qwen2State.wrapXb, qwen2State.wrapQ, weights.wqLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) @@ -195,7 +193,7 @@ TaskGraph setupSingleQwen2FFNLayer(Qwen2TornadoWeights weights, int layerIndex) config.numberOfHeads(), config.headSize(), config.kvDim(), config.kvMul(), qwen2State.positionHolder, layerIndex, config.contextLength()) .task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, qwen2State.wrapXb, qwen2State.wrapX, weights.woLayered[layerIndex].asHalfFloatArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen2State.tempFFN, config.dim(), config.rmsNormEps(), + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, qwen2State.wrapXb, qwen2State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen2State.tempFFN, config.dim(), config.rmsNormEps(), qwen2State.localSize) .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, qwen2State.wrapXb, qwen2State.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), weights.w3Layered[layerIndex].asHalfFloatArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java index e045d3c5..e9302361 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/Qwen3FP16FFNLayers.java @@ -103,7 +103,6 @@ public GridScheduler updateGridScheduler(GridScheduler gridScheduler) { // Map workers to tasks for each layer for (int i = 0; i < config.numberOfLayers(); i++) { gridScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - gridScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); gridScheduler.addWorkerGrid("layer_" + i + ".qmatmul", matmulQRowMajorWorker); gridScheduler.addWorkerGrid("layer_" + i + ".kmatmul", matmulKVRowMajorWorker); @@ -121,7 +120,6 @@ public GridScheduler updateGridScheduler(GridScheduler gridScheduler) { gridScheduler.addWorkerGrid("layer_" + i + ".matmul1", matmul1Worker); gridScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - gridScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); gridScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", fusedFFNW1W3Worker); gridScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", projectionTwoWorker); } @@ -193,7 +191,7 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) weights.w3Layered[layerIndex].asHalfFloatArray() // ); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp,// in + unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp,// in qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize); int qDim0 = nEmbdHeadK * qwen3Config.numberOfHeads(); @@ -246,8 +244,7 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) qwen3Config.dim(), // dim0 = 1024 LOCAL_WORK_GROUP_SIZE_ALLOC); - //FIXME: restore the reductionFinalNormalizationFFN module later - unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN, qwen3Config.dim(), + unifiedLayer.task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN, qwen3Config.dim(), qwen3Config.rmsNormEps(), qwen3State.localSize); unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, qwen3State.wrapXb, qwen3State.wrapHb, weights.w1Layered[layerIndex].asHalfFloatArray(), diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java index f8c87c7d..172cab0c 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java @@ -66,7 +66,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.woLayered[layerIndex].getScales(), weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w2Layered[layerIndex].getQuants(), weights.w2Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); + unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); unifiedLayer.task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapK, weights.wkLayered[layerIndex].getQuants(), @@ -79,7 +79,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, configureAttention(unifiedLayer, layerIndex); unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize); + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize); unifiedLayer.task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) @@ -114,6 +114,7 @@ protected TaskGraph configureLayerDataTransfers(TaskGraph unifiedLayer, int laye public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) { WorkerGrid ropeWorker = WorkerGridFactory.genericWorker(config.dim() / 2, 128); WorkerGrid rmsNormWorker = WorkerGridFactory.createRmsNormWorker(config.dim(), 256); + //System.out.println("llama config dim: " + config.dim()); int configDimRowMajorGlobal = config.dim() * LOCAL_WORK_GROUP_SIZE_ALLOC; WorkerGrid configDimRowMajorGlobalWorker = WorkerGridFactory.genericWorker(configDimRowMajorGlobal, LOCAL_WORK_GROUP_SIZE_ALLOC); @@ -135,9 +136,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java index 4960c090..f72faa85 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Phi3Q8_0FFNLayers.java @@ -77,9 +77,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".wDown", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".wGateUp", wgetHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".gateUpSiLU", splitGateUpSiLUWorker); @@ -151,7 +149,7 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex // RMSNorm for attention input unifiedLayer.task("reductionsOneBlock", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, phi3State.wrapXb, phi3State.wrapX, @@ -233,7 +231,7 @@ TaskGraph setupSinglePhi3Q8_0FFNLayer(Phi3TornadoWeights weights, int layerIndex // FFN section: RMSNorm unifiedLayer.task("reductionsOneBlockFFN", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, phi3State.wrapXb, phi3State.wrapX, diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java index f016604a..e62441ce 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java @@ -111,9 +111,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } @@ -187,7 +185,7 @@ TaskGraph setupSingleQwen2Q8_0FFNLayer(Qwen2TornadoWeights weights, int layerInd ); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, + unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize) .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGeneric, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].getQuants(), weights.wqLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) @@ -208,7 +206,7 @@ TaskGraph setupSingleQwen2Q8_0FFNLayer(Qwen2TornadoWeights weights, int layerInd state.positionHolder, layerIndex, config.contextLength()) .task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidual, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].getQuants(), weights.woLayered[layerIndex].getScales(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.wrapXb, + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize) .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivation, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].getQuants(), weights.w1Layered[layerIndex].getScales(), weights.w3Layered[layerIndex].getQuants(), weights.w3Layered[layerIndex].getScales(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java index 2274959a..52c7aee5 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen3Q8_0FFNLayers.java @@ -93,7 +93,6 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) for (int i = 0; i < config.numberOfLayers(); i++) { tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".qmatmul", matmulQRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".kmatmul", matmulKVRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".vmatmul", matmulKVRowMajorWorker); @@ -106,7 +105,6 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".matmul1", matmul1Worker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", fusedFFNW1W3Worker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", projectionTwoWorker); } @@ -187,7 +185,7 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) // RMS norm for attention input unifiedLayer.task("reductionsOneBlock", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), qwen3State.temp, config.dim(), config.rmsNormEps(), qwen3State.localSize); // QKV projections with Qwen3 GQA dimensions @@ -258,7 +256,7 @@ TaskGraph setupSingleQwen3FFNLayer(Qwen3TornadoWeights weights, int layerIndex) // RMS norm for FFN input unifiedLayer.task("reductionsOneBlockFFN", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, + TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, qwen3State.wrapXb, qwen3State.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), qwen3State.tempFFN, config.dim(), config.rmsNormEps(), qwen3State.localSize); // Fused FFN: w1(x) ⊗ w3(x) with SiLU activation (Q8_0 weights) From 884d1979282c4ef118c92e44f65deb42c689dbb2 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Wed, 10 Dec 2025 11:32:51 +0000 Subject: [PATCH 08/14] update on reduction fuse --- .../layers/type/q8_0/LlamaQ8_0FFNLayers.java | 18 ++++-------------- .../layers/type/q8_0/Qwen2Q8_0FFNLayers.java | 16 ++++++---------- 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java index 385b626f..44a2ceab 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java @@ -71,12 +71,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, weights.w2Layered[layerIndex].asByteArray(), weights.w3Layered[layerIndex].asByteArray()); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); - if (shouldUseFinalNormalization()) { - unifiedLayer.task("reductionFinalNormalization", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.temp, - config.dim(), config.rmsNormEps()); - } - unifiedLayer.task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) + unifiedLayer.task("reductionsOneBlock", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize) .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGenericQ8Byte, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asByteArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGenericQ8Byte, context, state.wrapXb, state.wrapK, @@ -89,12 +84,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, configureAttention(unifiedLayer, layerIndex); unifiedLayer.task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidualQ8_0Byte, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asByteArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); - if (shouldUseFinalNormalization()) { - unifiedLayer.task("reductionFinalNormalizationFFN", TransformerComputeKernelsLayered::reductionFinalNormalization, context, state.tempFFN, - config.dim(), config.rmsNormEps()); - } - unifiedLayer.task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN) + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize) .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivationQ8_0Byte, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asByteArray(), weights.w3Layered[layerIndex].asByteArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) @@ -150,9 +140,9 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); + //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); + //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java index d21f3509..2259c840 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java @@ -111,9 +111,9 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); + //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); + //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } @@ -180,10 +180,8 @@ TaskGraph setupSingleQwen2Q8_0FFNLayer(Qwen2TornadoWeights weights, int layerInd ); unifiedLayer = configureLayerDataTransfers(unifiedLayer, layerIndex); - unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, - state.wrapX, config.dim(), config.rmsNormEps(), state.localSize) - .task("mapContext", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, - state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp) + unifiedLayer.task("reductionsOneBlock" , TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, + state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize) .task("qmatmul", TransformerComputeKernelsLayered::matrixVectorGenericQ8Byte, context, state.wrapXb, state.wrapQ, weights.wqLayered[layerIndex].asByteArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("kmatmul", TransformerComputeKernelsLayered::matrixVectorGenericQ8Byte, context, @@ -203,10 +201,8 @@ TaskGraph setupSingleQwen2Q8_0FFNLayer(Qwen2TornadoWeights weights, int layerInd state.positionHolder, layerIndex, config.contextLength()) .task("matmul1", TransformerComputeKernelsLayered::matrixVectorGenericWithResidualQ8_0Byte, context, state.wrapXb, state.wrapX, weights.woLayered[layerIndex].asByteArray(), config.dim(), config.dim(), LOCAL_WORK_GROUP_SIZE_ALLOC) - .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.tempFFN, - state.wrapX, config.dim(), config.rmsNormEps(), state.localSize) - .task("mapContextFFN", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, - state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN) + .task("reductionsOneBlockFFN", TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, + state.wrapX, weights.rms_ffn_weightLayered[layerIndex].asFloatArray(), state.tempFFN, config.dim(), config.rmsNormEps(), state.localSize) .task("fused_ffn_w1_w3", TransformerComputeKernelsLayered::fusedFeedForwardWithSiLUAndGLUActivationQ8_0Byte, context, state.wrapXb, state.wrapHb, weights.w1Layered[layerIndex].asByteArray(), weights.w3Layered[layerIndex].asByteArray(), config.dim(), config.hiddenDim(), LOCAL_WORK_GROUP_SIZE_ALLOC) .task("projectionTwo", TransformerComputeKernelsLayered::matrixVectorGenericWithResidualQ8_0Byte, context, From 9336dc879c26f77b6187fc10369bf6c2f9bd3c73 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Wed, 10 Dec 2025 11:35:43 +0000 Subject: [PATCH 09/14] remove comments --- .../tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java | 2 -- .../tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java | 2 -- 2 files changed, 4 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java index 44a2ceab..e62950fd 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java @@ -140,9 +140,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java index 2259c840..1add4843 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/Qwen2Q8_0FFNLayers.java @@ -111,9 +111,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".projectionTwo", configDimRowMajorGlobalWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".fused_ffn_w1_w3", configHiddenDimRowMajorWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlock", rmsNormWorker); - //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContext", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".reductionsOneBlockFFN", rmsNormWorker); - //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".mapContextFFN", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".parallel-attention", parallelAttentionWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".copyToCaches", copyToCachesWorker); } From 7b1d1724995e5ac7e8586c06ed49e9daee6a5556 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Mon, 15 Dec 2025 12:11:35 +0000 Subject: [PATCH 10/14] reduction fuse opt in RMS normalization layer for llama after the recent half float update --- .../TransformerComputeKernelsLayered.java | 78 +++++++++++++++++++ .../layers/type/fp16/LlamaFP16FFNLayers.java | 8 +- .../layers/type/q8_0/LlamaQ8_0FFNLayers.java | 8 +- 3 files changed, 90 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java index 3fb1d0f1..803f38f4 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/kernels/TransformerComputeKernelsLayered.java @@ -408,6 +408,84 @@ public static void reductionOneBlockWithLayerFuse(KernelContext context, FloatAr } } + /** + * Performs RMS (Root Mean Square) normalization using parallel reduction. It first computes the variance and scaling factor across all work groups, + * then it applies the computed normalization factor to input and weight elements. + * + *

+ * Formula: output[i] = weight[i] * (normalizationFactor * x[i]) + * + * Algorithm: 1. Each thread computes square of its input element 2. Work group performs parallel reduction of squares 3. Partial sums stored per work group 4. All thread combines all partial + * sums and computes normalization factor 5. Applies the computed normalization factor to input and weight elements. + * + * @param context + * Kernel execution context + * @param outputFP16 + * Half float array to store partial sums and final normalization factor + * @param x + * Input array to normalize + * @param weights + * Weight values for each element + * @param temp + * Temporary array containing normalization factor at index 0 + * @param size + * Number of elements to process + * @param ermsNorm + * Epsilon value squared for numerical stability + * @param localMemSize + * Size of local memory allocation (must match work group size) + */ + + public static void reductionOneBlockWithLayerFuseFP16(KernelContext context, HalfFloatArray outputFP16, FloatArray x, FloatArray weights, FloatArray temp, int size, float ermsNorm, int localMemSize) { + int gid = context.globalIdx; + int lid = context.localIdx; + int groupId = context.groupIdx; + int groupSize = context.localGroupSizeX; + + // Allocate local memory with the provided size + float[] localX = context.allocateFloatLocalArray(localMemSize); + + // Load input value and compute square + if (gid < size) { + float v = x.get(gid); + localX[lid] = v * v; + } else { + localX[lid] = 0.0f; + } + + // Perform parallel reduction within the work group + for (int stride = (groupSize / 2); stride > 0; stride /= 2) { + context.localBarrier(); + if (lid < stride) { + localX[lid] += localX[lid + stride]; + } + } + + // Each workgroup stores its partial sum in a different location + if (lid == 0) { + // Store the partial sum from each workgroup + temp.set(groupId, localX[0]); + } + + context.globalBarrier(); + + float localss = 0.0f; + int numGroups = (size + groupSize - 1) / groupSize; + for (int i = 0; i < numGroups; i++) { // Assuming 8 workgroups + localss += temp.get(i); + } + localss /= size; + localss += ermsNorm; + localss = 1.0f / TornadoMath.sqrt(localss); + + if (gid < size) { + float in = x.get(gid); + float w = weights.get(gid); + outputFP16.set(gid, new HalfFloat(w * (localss * in))); + } + } + + /** * Applies the computed normalization factor to input and weight elements. This is the second phase of RMS normalization. *

diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java index 8d105e89..a11f3e4a 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java @@ -50,7 +50,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) for (int i = 0; i < config.numberOfLayers(); i++) { // === Attention Block === tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_reduce", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_apply_fp16", rmsNormWorker); + //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_apply_fp16", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".qkv_projection", fusedQKVWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".rope_and_kv_cache", ropeWithCacheWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attention", parallelAttentionWorker); @@ -199,6 +199,10 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, // === Attention Block === // RMS Normalization unifiedLayer.task("attn_rms_reduce", + TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuseFP16, + context, state.wrapXbFP16, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, + config.dim(), config.rmsNormEps(), state.localSize); + /*unifiedLayer.task("attn_rms_reduce", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); @@ -212,7 +216,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, unifiedLayer.task("attn_rms_apply_fp16", TransformerComputeKernels::mapContextWithQuantize, context, state.wrapXbFP16, state.wrapX, - weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp); + weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp);*/ // QKV Projection (fused) unifiedLayer.task("qkv_projection", diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java index ba1b6a79..7e771246 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java @@ -161,6 +161,10 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, // === Attention Block === // RMS Normalization unifiedLayer.task("attn_rms_reduce", + TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, + context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, + config.dim(), config.rmsNormEps(), state.localSize); + /*unifiedLayer.task("attn_rms_reduce", TransformerComputeKernelsLayered::reductionOneBlockWithLayer, context, state.temp, state.wrapX, config.dim(), config.rmsNormEps(), state.localSize); @@ -174,7 +178,7 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, unifiedLayer.task("attn_rms_apply", TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, context, state.wrapXb, state.wrapX, - weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp); + weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp);*/ // QKV Projection (fused with Q8 dequantization) unifiedLayer.task("qkv_projection", @@ -306,7 +310,7 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) // --- Attention Block --- // RMS Normalization tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_reduce", rmsNormWorker); - tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_apply", rmsNormWorker); + //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_apply", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".qkv_projection", fusedQkvWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".rope_and_kv_cache", ropeWithCacheWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attention", parallelAttentionWorker); From 3f542b7251546c59e267246cef4bf59aa82be691 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Mon, 15 Dec 2025 12:13:08 +0000 Subject: [PATCH 11/14] remove comments --- .../layers/type/fp16/LlamaFP16FFNLayers.java | 16 ---------------- .../layers/type/q8_0/LlamaQ8_0FFNLayers.java | 16 ---------------- 2 files changed, 32 deletions(-) diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java index a11f3e4a..b22ff923 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/fp16/LlamaFP16FFNLayers.java @@ -50,7 +50,6 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) for (int i = 0; i < config.numberOfLayers(); i++) { // === Attention Block === tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_reduce", rmsNormWorker); - //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_apply_fp16", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".qkv_projection", fusedQKVWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".rope_and_kv_cache", ropeWithCacheWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attention", parallelAttentionWorker); @@ -202,21 +201,6 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuseFP16, context, state.wrapXbFP16, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); - /*unifiedLayer.task("attn_rms_reduce", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, state.temp, state.wrapX, - config.dim(), config.rmsNormEps(), state.localSize); - - if (shouldUseFinalNormalization()) { - unifiedLayer.task("attn_rms_finalize", - TransformerComputeKernelsLayered::reductionFinalNormalization, - context, state.temp, config.dim(), config.rmsNormEps()); - } - - unifiedLayer.task("attn_rms_apply_fp16", - TransformerComputeKernels::mapContextWithQuantize, - context, state.wrapXbFP16, state.wrapX, - weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp);*/ // QKV Projection (fused) unifiedLayer.task("qkv_projection", diff --git a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java index 7e771246..c170b039 100644 --- a/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java +++ b/src/main/java/org/beehive/gpullama3/tornadovm/layers/type/q8_0/LlamaQ8_0FFNLayers.java @@ -164,21 +164,6 @@ TaskGraph setupSingleFFNLayer(LlamaTornadoWeights weights, Configuration config, TransformerComputeKernelsLayered::reductionOneBlockWithLayerFuse, context, state.wrapXb, state.wrapX, weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp, config.dim(), config.rmsNormEps(), state.localSize); - /*unifiedLayer.task("attn_rms_reduce", - TransformerComputeKernelsLayered::reductionOneBlockWithLayer, - context, state.temp, state.wrapX, - config.dim(), config.rmsNormEps(), state.localSize); - - if (shouldUseFinalNormalization()) { - unifiedLayer.task("attn_rms_finalize", - TransformerComputeKernelsLayered::reductionFinalNormalization, - context, state.temp, config.dim(), config.rmsNormEps()); - } - - unifiedLayer.task("attn_rms_apply", - TransformerComputeKernelsLayered::reductionOneBlock2WithLayer, - context, state.wrapXb, state.wrapX, - weights.rms_att_weightLayered[layerIndex].asFloatArray(), state.temp);*/ // QKV Projection (fused with Q8 dequantization) unifiedLayer.task("qkv_projection", @@ -310,7 +295,6 @@ public GridScheduler updateGridScheduler(GridScheduler tornadoForwardScheduler) // --- Attention Block --- // RMS Normalization tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_reduce", rmsNormWorker); - //tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attn_rms_apply", rmsNormWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".qkv_projection", fusedQkvWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".rope_and_kv_cache", ropeWithCacheWorker); tornadoForwardScheduler.addWorkerGrid("layer_" + i + ".attention", parallelAttentionWorker); From ccbc2ea4fe074da60a36a4fab883f5c9db3494da Mon Sep 17 00:00:00 2001 From: ruiqi Date: Mon, 15 Dec 2025 13:13:57 +0000 Subject: [PATCH 12/14] revert unnecessary changes --- llama-tornado | 2 +- set_paths | 10 +++------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/llama-tornado b/llama-tornado index 4f4f695a..34d3f405 100755 --- a/llama-tornado +++ b/llama-tornado @@ -422,7 +422,7 @@ def create_parser() -> argparse.ArgumentParser: ) debug_group.add_argument( "--profiler-dump-dir", - default="/home/ruiqi/GPULlama3.java/prof.json", + default=None, help="Directory for profiler output", ) diff --git a/set_paths b/set_paths index c61d735f..d5909616 100644 --- a/set_paths +++ b/set_paths @@ -6,19 +6,15 @@ # Resolve root of this project (LLaMA3) and TornadoVM export LLAMA_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -export TORNADO_ROOT="/home/ruiqi/TornadoVM_OCL/TornadoVM" - -# Set the path to TornadoVM SDK binaries -export TORNADO_SDK="/home/ruiqi/TornadoVM_OCL/TornadoVM/bin/sdk" # Add TornadoVM and LLaMA bin directories to PATH -export PATH="${PATH}:${TORNADO_SDK}:${LLAMA_ROOT}" +export PATH="${PATH}:${TORNADO_SDK}/bin:${LLAMA_ROOT}" # Optional: Set JAVA_HOME if required # export JAVA_HOME=/path/to/graalvm # export PATH="${JAVA_HOME}/bin:${PATH}" -echo "[INFO] Environment configured for LLaMA3 with TornadoVM at: $TORNADO_ROOT" +echo "[INFO] Environment configured for LLaMA3 with TornadoVM at: $TORNADO_SDK" # ===== Notes ===== # After sourcing this script: # 1. TornadoVM will be available for GPU computation @@ -26,4 +22,4 @@ echo "[INFO] Environment configured for LLaMA3 with TornadoVM at: $TORNADO_ROOT" # 3. You can run LLaMA3 with GPU acceleration using TornadoVM # # To use this script: source ./setup_environment.sh -# or: . ./setup_environment.sh +# or: . ./setup_environment.sh \ No newline at end of file From b330bcf7bba221acde0db807b46241d4a4807dc8 Mon Sep 17 00:00:00 2001 From: ruiqi Date: Mon, 15 Dec 2025 17:16:00 +0000 Subject: [PATCH 13/14] remove external folder --- external/tornadovm | 1 - 1 file changed, 1 deletion(-) delete mode 160000 external/tornadovm diff --git a/external/tornadovm b/external/tornadovm deleted file mode 160000 index 6e29a5be..00000000 --- a/external/tornadovm +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6e29a5be7d5e8a70dc780ad9ec5b140a0a09c9c6 From e4cb5fb04e6720ecf6234d9a13265806c382d5bc Mon Sep 17 00:00:00 2001 From: ruiqi Date: Tue, 16 Dec 2025 09:32:40 +0000 Subject: [PATCH 14/14] revert changes --- llama-tornado | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/llama-tornado b/llama-tornado index 34d3f405..c98090f8 100755 --- a/llama-tornado +++ b/llama-tornado @@ -498,6 +498,11 @@ def main(): parser = create_parser() args = parser.parse_args() + # Set default profiler log path relative to LLAMA_ROOT + if args.profiler_dump_dir is None: + llama_root = os.environ.get("LLAMA_ROOT") + args.profiler_dump_dir = os.path.join(llama_root, "profiler-log.json") + # Set default seed if not provided if args.seed is None: args.seed = int(time.time())