diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml
index be9b2587685e4..df6a62f676f12 100644
--- a/.github/actions/lava_job_render/action.yml
+++ b/.github/actions/lava_job_render/action.yml
@@ -1,154 +1,154 @@
-name: Test Action
-inputs:
- docker_image:
- description: Docker image
- required: true
- default: kmake-image:latest
-
-runs:
- using: "composite"
- steps:
- - name: Process presigned_urls.json
- id: process_urls
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs');
- const p = require('path');
- // Helper function to find URL by filename
- function findUrlByFilename(filename) {
- for (const [path, url] of Object.entries(data)) {
- if (path.endsWith(filename)) {
- return url;
- }
- }
- return null;
- }
- const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json');
- if (fs.existsSync(filePath)) {
- console.log("File exists");
- } else {
- console.log("File does not exist");
- core.setFailed(`File not found: ${filePath}`);
- }
- // Read the JSON file
- const data = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
- // Extract URLs into variables
- const modulesTarUrl = findUrlByFilename('modules.tar.xz');
- const imageUrl = findUrlByFilename('Image');
- const vmlinuxUrl = findUrlByFilename('vmlinux');
- const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb');
- // Set outputs
- core.setOutput('modules_url', modulesTarUrl);
- core.setOutput('image_url', imageUrl);
- core.setOutput('vmlinux_url', vmlinuxUrl);
- core.setOutput('dtb_url', dtbUrl);
- console.log(`Modules URL: ${modulesTarUrl}`);
- console.log(`Image URL: ${imageUrl}`);
- console.log(`Vmlinux URL: ${vmlinuxUrl}`);
- console.log(`Dtb URL: ${dtbUrl}`);
-
- - name: Create metadata.json
- id: create_metadata
- shell: bash
- run: |
- echo "Creating job definition"
- # Create the job definition using the processed URLs
- cd ../job_render
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \
- ${{ inputs.docker_image }} \
- jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json
-
- - name: Upload metadata.json
- id: upload_metadata
- uses: ./.github/actions/aws_s3_helper
- with:
- local_file: ../job_render/data/metadata.json
- s3_bucket: qli-prd-kernel-gh-artifacts
- mode: single-upload
-
- - name: Create template json
- shell: bash
- run: |
- echo "Creating job definition"
- metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}"
- vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}"
- image_url="${{ steps.process_urls.outputs.image_url }}"
- modules_url="${{ steps.process_urls.outputs.modules_url }}"
- # Create the job definition using the processed URLs
- cd ../job_render
- # using metadata_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e metadata_url="$metadata_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
- # using image_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e image_url="$image_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
- # using vmlinux_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e vmlinux_url="$vmlinux_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
- # using modules_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e modules_url="$modules_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
-
- - name: Update firmware and ramdisk
- shell: bash
- run: |
- cd ../job_render
- ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)"
- firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)"
- # using ramdisk_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e ramdisk_url="$ramdisk_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
-
- # using firmware_url
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- -e firmware_url="$firmware_url" \
- ${{ inputs.docker_image }} \
- jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
-
- - name: Create lava_job_definition
- shell: bash
- run: |
- cd ../job_render
- mkdir renders
- docker run -i --rm \
- --user "$(id -u):$(id -g)" \
- --workdir="$PWD" \
- -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
- ${{ inputs.docker_image }} \
- sh -c 'export BOOT_METHOD=fastboot && \
- export TARGET=qcs6490-rb3gen2 && \
- export TARGET_DTB=qcs6490-rb3gen2 && \
- python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json'
+name: Test Action
+inputs:
+ docker_image:
+ description: Docker image
+ required: true
+ default: kmake-image:latest
+
+runs:
+ using: "composite"
+ steps:
+ - name: Process presigned_urls.json
+ id: process_urls
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+ const p = require('path');
+ // Helper function to find URL by filename
+ function findUrlByFilename(filename) {
+ for (const [path, url] of Object.entries(data)) {
+ if (path.endsWith(filename)) {
+ return url;
+ }
+ }
+ return null;
+ }
+ const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json');
+ if (fs.existsSync(filePath)) {
+ console.log("File exists");
+ } else {
+ console.log("File does not exist");
+ core.setFailed(`File not found: ${filePath}`);
+ }
+ // Read the JSON file
+ const data = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
+ // Extract URLs into variables
+ const modulesTarUrl = findUrlByFilename('modules.tar.xz');
+ const imageUrl = findUrlByFilename('Image');
+ const vmlinuxUrl = findUrlByFilename('vmlinux');
+ const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb');
+ // Set outputs
+ core.setOutput('modules_url', modulesTarUrl);
+ core.setOutput('image_url', imageUrl);
+ core.setOutput('vmlinux_url', vmlinuxUrl);
+ core.setOutput('dtb_url', dtbUrl);
+ console.log(`Modules URL: ${modulesTarUrl}`);
+ console.log(`Image URL: ${imageUrl}`);
+ console.log(`Vmlinux URL: ${vmlinuxUrl}`);
+ console.log(`Dtb URL: ${dtbUrl}`);
+
+ - name: Create metadata.json
+ id: create_metadata
+ shell: bash
+ run: |
+ echo "Creating job definition"
+ # Create the job definition using the processed URLs
+ cd ../job_render
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json
+
+ - name: Upload metadata.json
+ id: upload_metadata
+ uses: ./.github/actions/aws_s3_helper
+ with:
+ local_file: ../job_render/data/metadata.json
+ s3_bucket: qli-prd-kernel-gh-artifacts
+ mode: single-upload
+
+ - name: Create template json
+ shell: bash
+ run: |
+ echo "Creating job definition"
+ metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}"
+ vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}"
+ image_url="${{ steps.process_urls.outputs.image_url }}"
+ modules_url="${{ steps.process_urls.outputs.modules_url }}"
+ # Create the job definition using the processed URLs
+ cd ../job_render
+ # using metadata_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e metadata_url="$metadata_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+ # using image_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e image_url="$image_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+ # using vmlinux_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e vmlinux_url="$vmlinux_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+ # using modules_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e modules_url="$modules_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+
+ - name: Update firmware and ramdisk
+ shell: bash
+ run: |
+ cd ../job_render
+ ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)"
+ firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)"
+ # using ramdisk_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e ramdisk_url="$ramdisk_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+
+ # using firmware_url
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ -e firmware_url="$firmware_url" \
+ ${{ inputs.docker_image }} \
+ jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
+
+ - name: Create lava_job_definition
+ shell: bash
+ run: |
+ cd ../job_render
+ mkdir renders
+ docker run -i --rm \
+ --user "$(id -u):$(id -g)" \
+ --workdir="$PWD" \
+ -v "$(dirname "$PWD")":"$(dirname "$PWD")" \
+ ${{ inputs.docker_image }} \
+ sh -c 'export BOOT_METHOD=fastboot && \
+ export TARGET=qcs6490-rb3gen2 && \
+ export TARGET_DTB=qcs6490-rb3gen2 && \
+ python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --KernelCI_PreMerge'
\ No newline at end of file
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 669e35beb347f..c5d19c97ed718 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,98 +1,98 @@
-name: _test
-description: Run tests on LAVA
-
-on:
- workflow_call:
- inputs:
- docker_image:
- description: Docker image
- type: string
- required: true
- default: kmake-image:latest
-
-jobs:
- test:
- runs-on:
- group: GHA-Kernel-SelfHosted-RG
- labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ]
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ github.ref }}
- fetch-depth: 0
-
- - name: Pull docker image
- uses: ./.github/actions/pull_docker_image
- with:
- image: ${{ inputs.docker_image }}
- github_token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Download URLs list
- uses: actions/download-artifact@v4
- with:
- name: presigned_urls.json
- path: ${{ github.workspace }}
-
- - name: Clone lava job render scripts
- run: cd .. && git clone https://github.com/qualcomm-linux/job_render
-
- - name: Create lava job definition
- uses: ./.github/actions/lava_job_render
- id: create_job_definition
- with:
- docker_image: ${{ inputs.docker_image }}
-
- - name: Submit lava job
- id: submit_job
- run: |
- cd ../job_render
- job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml")
- job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id"
- echo "job_id=$job_id" >> $GITHUB_OUTPUT
- echo "job_url=$job_url" >> $GITHUB_OUTPUT
- echo "Lava Job: $job_url"
- echo "JOB_ID=$job_id" >> $GITHUB_ENV
-
- - name: Check lava job results
- id: check_job
- run: |
- STATE=""
- while [ "$STATE" != "Finished" ]; do
- state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state)
- STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
- echo "Current status: $STATE"
- sleep 30
- done
- health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health)
- HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
- if [[ "$HEALTH" == "Complete" ]]; then
- echo "Lava job passed."
- summary=":heavy_check_mark: Lava job passed."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 0
- else
- echo "Lava job failed."
- summary=":x: Lava job failed."
- echo "summary=$summary" >> $GITHUB_OUTPUT
- exit 1
- fi
-
- - name: Update summary
- if: success() || failure()
- shell: bash
- run: |
- if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then
- status=":x: Test job failed"
- else
- status="${{ steps.check_job.outputs.summary }}"
- job_url="${{ steps.submit_job.outputs.job_url }}"
- job_id="${{ steps.submit_job.outputs.job_id }}"
- fi
- SUMMARY='
- '${status}'
-
- JOB ID: '${job_id}'
-
- '
+name: _test
+description: Run tests on LAVA
+
+on:
+ workflow_call:
+ inputs:
+ docker_image:
+ description: Docker image
+ type: string
+ required: true
+ default: kmake-image:latest
+
+jobs:
+ test:
+ runs-on:
+ group: GHA-Kernel-SelfHosted-RG
+ labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ]
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.ref }}
+ fetch-depth: 0
+
+ - name: Pull docker image
+ uses: ./.github/actions/pull_docker_image
+ with:
+ image: ${{ inputs.docker_image }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Download URLs list
+ uses: actions/download-artifact@v4
+ with:
+ name: presigned_urls.json
+ path: ${{ github.workspace }}
+
+ - name: Clone lava job render scripts
+ run: cd .. && git clone https://github.com/sgaud-quic/job_render
+
+ - name: Create lava job definition
+ uses: ./.github/actions/lava_job_render
+ id: create_job_definition
+ with:
+ docker_image: ${{ inputs.docker_image }}
+
+ - name: Submit lava job
+ id: submit_job
+ run: |
+ cd ../job_render
+ job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml")
+ job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id"
+ echo "job_id=$job_id" >> $GITHUB_OUTPUT
+ echo "job_url=$job_url" >> $GITHUB_OUTPUT
+ echo "Lava Job: $job_url"
+ echo "JOB_ID=$job_id" >> $GITHUB_ENV
+
+ - name: Check lava job results
+ id: check_job
+ run: |
+ STATE=""
+ while [ "$STATE" != "Finished" ]; do
+ state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state)
+ STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
+ echo "Current status: $STATE"
+ sleep 30
+ done
+ health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health)
+ HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//')
+ if [[ "$HEALTH" == "Complete" ]]; then
+ echo "Lava job passed."
+ summary=":heavy_check_mark: Lava job passed."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 0
+ else
+ echo "Lava job failed."
+ summary=":x: Lava job failed."
+ echo "summary=$summary" >> $GITHUB_OUTPUT
+ exit 1
+ fi
+
+ - name: Update summary
+ if: success() || failure()
+ shell: bash
+ run: |
+ if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then
+ status=":x: Test job failed"
+ else
+ status="${{ steps.check_job.outputs.summary }}"
+ job_url="${{ steps.submit_job.outputs.job_url }}"
+ job_id="${{ steps.submit_job.outputs.job_id }}"
+ fi
+ SUMMARY='
+ '${status}'
+
+ JOB ID: '${job_id}'
+
+ '
echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY
\ No newline at end of file
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 7d5dac26b244b..3d96baa705a46 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -519,6 +519,14 @@ config QCS_GCC_8300
QCS8300 devices.
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config QCS_GCC_615
+ tristate "QCS615 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QCS615 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
config QCS_GCC_615
tristate "QCS615 Global Clock Controller"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 96862e99e5d43..61a783c01c5dc 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o
+obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
index 11bd2e2348119..bd75f59d3ffeb 100644
--- a/drivers/clk/qcom/camcc-sa8775p.c
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -10,7 +10,7 @@
#include
#include
-#include
+#include
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -1681,6 +1681,24 @@ static struct clk_branch cam_cc_sm_obs_clk = {
},
};
+static struct clk_branch cam_cc_titan_top_accu_shift_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_titan_top_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc cam_cc_titan_top_gdsc = {
.gdscr = 0x131bc,
.en_rest_wait_val = 0x2,
@@ -1775,6 +1793,7 @@ static struct clk_regmap *cam_cc_sa8775p_clocks[] = {
[CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
[CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr,
+ [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL,
[CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
[CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
};
@@ -1811,6 +1830,7 @@ static const struct qcom_cc_desc cam_cc_sa8775p_desc = {
};
static const struct of_device_id cam_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-camcc" },
{ .compatible = "qcom,sa8775p-camcc" },
{ }
};
@@ -1841,10 +1861,83 @@ static int cam_cc_sa8775p_probe(struct platform_device *pdev)
clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- /* Keep some clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcs8300-camcc")) {
+ cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154;
+ cam_cc_camnoc_axi_clk.halt_reg = 0x1316c;
+ cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c;
+ cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174;
+ cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174;
+
+ cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054;
+ cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078;
+ cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098;
+ cam_cc_csid_clk_src.cmd_rcgr = 0x13134;
+
+ cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000;
+ cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c;
+ cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038;
+
+ cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104;
+ cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c;
+ cam_cc_xo_clk_src.cmd_rcgr = 0x131b8;
+ cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4;
+
+ cam_cc_core_ahb_clk.halt_reg = 0x131b4;
+ cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4;
+
+ cam_cc_cpas_ahb_clk.halt_reg = 0x130f4;
+ cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4;
+ cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc;
+ cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc;
+
+ cam_cc_csi0phytimer_clk.halt_reg = 0x1506c;
+ cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c;
+ cam_cc_csi1phytimer_clk.halt_reg = 0x15090;
+ cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090;
+ cam_cc_csi2phytimer_clk.halt_reg = 0x150b0;
+ cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0;
+ cam_cc_csid_clk.halt_reg = 0x1314c;
+ cam_cc_csid_clk.clkr.enable_reg = 0x1314c;
+ cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074;
+ cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074;
+ cam_cc_csiphy0_clk.halt_reg = 0x15070;
+ cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070;
+ cam_cc_csiphy1_clk.halt_reg = 0x15094;
+ cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094;
+ cam_cc_csiphy2_clk.halt_reg = 0x150b4;
+ cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4;
+
+ cam_cc_mclk0_clk.halt_reg = 0x15018;
+ cam_cc_mclk0_clk.clkr.enable_reg = 0x15018;
+ cam_cc_mclk1_clk.halt_reg = 0x15034;
+ cam_cc_mclk1_clk.clkr.enable_reg = 0x15034;
+ cam_cc_mclk2_clk.halt_reg = 0x15050;
+ cam_cc_mclk2_clk.clkr.enable_reg = 0x15050;
+ cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c;
+ cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c;
+
+ cam_cc_titan_top_gdsc.gdscr = 0x131a0;
+
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] =
+ &cam_cc_titan_top_accu_shift_clk.clkr;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */
+ } else {
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ }
ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap);
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
index 23015b055f6a9..17ca743c2210f 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
@@ -1062,7 +1062,7 @@ static const struct msm_pinctrl_soc_data qcs615_tlmm = {
.nfunctions = ARRAY_SIZE(qcs615_functions),
.groups = qcs615_groups,
.ngroups = ARRAY_SIZE(qcs615_groups),
- .ngpios = 123,
+ .ngpios = 124,
.tiles = qcs615_tiles,
.ntiles = ARRAY_SIZE(qcs615_tiles),
.wakeirq_map = qcs615_pdc_map,