diff --git a/docs/domain_support.md b/docs/domain_support.md index 65b61427166..b34e43aa8da 100644 --- a/docs/domain_support.md +++ b/docs/domain_support.md @@ -41,6 +41,7 @@ has following details: * **name** - Name of this domain * **assigned_harts** - HARTs assigned to this domain * **possible_harts** - HARTs possible in this domain +* **hartindex_to_context_table** - Contexts corresponding to possible HARTs * **regions** - Array of memory regions terminated by a memory region with order zero * **boot_hartid** - HART id of the HART booting this domain. The domain @@ -80,6 +81,7 @@ following manner: platform support * **possible_harts** - All valid HARTs of a RISC-V platform are possible HARTs of the ROOT domain +* **hartindex_to_context_table** - Contexts corresponding to ROOT domain's possible HARTs * **regions** - Two memory regions available to the ROOT domain: **A)** A memory region to protect OpenSBI firmware from S-mode and U-mode **B)** A memory region of **order=__riscv_xlen** allowing S-mode and diff --git a/firmware/payloads/objects.mk b/firmware/payloads/objects.mk index 21e0185a8d5..91373aa42e9 100644 --- a/firmware/payloads/objects.mk +++ b/firmware/payloads/objects.mk @@ -10,7 +10,7 @@ firmware-bins-$(FW_PAYLOAD) += payloads/test.bin test-y += test_head.o -test-y += test_main.o +test-y += test_main.o test_sse.o %/test.o: $(foreach obj,$(test-y),%/$(obj)) $(call merge_objs,$@,$^) diff --git a/firmware/payloads/test_main.c b/firmware/payloads/test_main.c index 194dbbe6c50..80f39e1c0a9 100644 --- a/firmware/payloads/test_main.c +++ b/firmware/payloads/test_main.c @@ -40,7 +40,7 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, return ret; } -static inline void sbi_ecall_console_puts(const char *str) +void sbi_ecall_console_puts(const char *str) { sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE, sbi_strlen(str), (unsigned long)str, 0, 0, 0, 0); @@ -51,10 +51,14 @@ static inline void sbi_ecall_console_puts(const char *str) __asm__ __volatile__("wfi" ::: "memory"); \ } while (0) +void test_sse(void); + void test_main(unsigned long a0, unsigned long a1) { sbi_ecall_console_puts("\nTest payload running\n"); + test_sse(); + while (1) wfi(); } diff --git a/firmware/payloads/test_sse.c b/firmware/payloads/test_sse.c new file mode 100644 index 00000000000..b94c63aa7d6 --- /dev/null +++ b/firmware/payloads/test_sse.c @@ -0,0 +1,226 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ +#include +#include + +struct sse_entry_state { + /** Entry program counter */ + unsigned long pc; + /** ra register state */ + unsigned long ra; + /** sp register state */ + unsigned long sp; + /** gp register state */ + unsigned long gp; + /** tp register state */ + unsigned long tp; + /** t0 register state */ + unsigned long t0; + /** t1 register state */ + unsigned long t1; + /** t2 register state */ + unsigned long t2; + /** s0 register state */ + unsigned long s0; + /** s1 register state */ + unsigned long s1; + /** a0 register state */ + unsigned long a0; + /** a1 register state */ + unsigned long a1; + /** a2 register state */ + unsigned long a2; + /** a3 register state */ + unsigned long a3; + /** a4 register state */ + unsigned long a4; + /** a5 register state */ + unsigned long a5; + /** a6 register state */ + unsigned long a6; + /** a7 register state */ + unsigned long a7; + /** s2 register state */ + unsigned long s2; + /** s3 register state */ + unsigned long s3; + /** s4 register state */ + unsigned long s4; + /** s5 register state */ + unsigned long s5; + /** s6 register state */ + unsigned long s6; + /** s7 register state */ + unsigned long s7; + /** s8 register state */ + unsigned long s8; + /** s9 register state */ + unsigned long s9; + /** s10 register state */ + unsigned long s10; + /** s11 register state */ + unsigned long s11; + /** t3 register state */ + unsigned long t3; + /** t4 register state */ + unsigned long t4; + /** t5 register state */ + unsigned long t5; + /** t6 register state */ + unsigned long t6; +} __packed; + +struct sse_interrupted_state { + /** Interrupted program counter */ + unsigned long pc; + /** ra register state */ + unsigned long ra; + /** sp register state */ + unsigned long sp; + /** gp register state */ + unsigned long gp; + /** tp register state */ + unsigned long tp; + /** t0 register state */ + unsigned long t0; + /** t1 register state */ + unsigned long t1; + /** t2 register state */ + unsigned long t2; + /** s0 register state */ + unsigned long s0; + /** s1 register state */ + unsigned long s1; + /** a0 register state */ + unsigned long a0; + /** a1 register state */ + unsigned long a1; + /** a2 register state */ + unsigned long a2; + /** a3 register state */ + unsigned long a3; + /** a4 register state */ + unsigned long a4; + /** a5 register state */ + unsigned long a5; + /** a6 register state */ + unsigned long a6; + /** a7 register state */ + unsigned long a7; + /** s2 register state */ + unsigned long s2; + /** s3 register state */ + unsigned long s3; + /** s4 register state */ + unsigned long s4; + /** s5 register state */ + unsigned long s5; + /** s6 register state */ + unsigned long s6; + /** s7 register state */ + unsigned long s7; + /** s8 register state */ + unsigned long s8; + /** s9 register state */ + unsigned long s9; + /** s10 register state */ + unsigned long s10; + /** s11 register state */ + unsigned long s11; + /** t3 register state */ + unsigned long t3; + /** t4 register state */ + unsigned long t4; + /** t5 register state */ + unsigned long t5; + /** t6 register state */ + unsigned long t6; + /** Exception mode */ + unsigned long exc_mode; +}; + +struct sbi_sse_handler_ctx { + struct sse_entry_state entry; + struct sse_interrupted_state interrupted; +}; + + +#define SBI_ECALL_OUTVAL(__eid, __fid, __a0, __a1, __a2, __outval) \ + ({ \ + register unsigned long a0 asm("a0") = (unsigned long)(__a0); \ + register unsigned long a1 asm("a1") = (unsigned long)(__a1); \ + register unsigned long a2 asm("a2") = (unsigned long)(__a2); \ + register unsigned long a6 asm("a6") = (unsigned long)(__fid); \ + register unsigned long a7 asm("a7") = (unsigned long)(__eid); \ + asm volatile("ecall" \ + : "+r"(a0) \ + : "r"(a1), "r"(a2), "r"(a6), "r"(a7) \ + : "memory"); \ + __outval = a1; \ + a0; \ + }) + +void sbi_ecall_console_puts(const char *str); + +static u8 sse_stack[2][1024]; +static int first_time = 1; + +static void sse_test_handler(void *arg) +{ + unsigned long out; + sbi_ecall_console_puts("Handler invoked !\n"); + + if (first_time) { + first_time = 0; + SBI_ECALL_OUTVAL(SBI_EXT_SSE, SBI_EXT_SSE_INJECT, + SBI_SSE_EVENT_LOCAL_RAS_0, 0, 0, out); + } + + SBI_ECALL_OUTVAL(SBI_EXT_SSE, SBI_EXT_SSE_COMPLETE, + SBI_SSE_EVENT_LOCAL_RAS_0, 0, 0, out); + + out = out; +} + +void test_sse(void) +{ + struct sbi_sse_handler_ctx ctx; + unsigned long out, ret; + + sbi_memset(&ctx, 0, sizeof(ctx)); + ctx.entry.pc = (unsigned long)sse_test_handler; + ctx.entry.sp = (unsigned long)sse_stack[0]; + + sbi_ecall_console_puts("Starting SSE test\n"); + + ret = SBI_ECALL_OUTVAL(SBI_EXT_SSE, SBI_EXT_SSE_REGISTER, + SBI_SSE_EVENT_LOCAL_RAS_0, &ctx, 0, out); + if (ret) { + sbi_ecall_console_puts("SSE Register failed\n"); + return; + } + + ret = SBI_ECALL_OUTVAL(SBI_EXT_SSE, SBI_EXT_SSE_ENABLE, + SBI_SSE_EVENT_LOCAL_RAS_0, 0, 0, out); + if (ret) { + sbi_ecall_console_puts("SSE Enable failed\n"); + return; + } + + ret = SBI_ECALL_OUTVAL(SBI_EXT_SSE, SBI_EXT_SSE_INJECT, + SBI_SSE_EVENT_LOCAL_RAS_0, 0, 0, out); + if (ret) { + sbi_ecall_console_puts("SSE Inject failed\n"); + return; + } + + out = out; + + sbi_ecall_console_puts("Finished SSE test\n"); +} diff --git a/include/sbi/riscv_encoding.h b/include/sbi/riscv_encoding.h index e74cc0df1c4..de953f2345a 100644 --- a/include/sbi/riscv_encoding.h +++ b/include/sbi/riscv_encoding.h @@ -91,6 +91,7 @@ #define IRQ_M_EXT 11 #define IRQ_S_GEXT 12 #define IRQ_PMU_OVF 13 +#define IRQ_RASHP_INT 43 #define MIP_SSIP (_UL(1) << IRQ_S_SOFT) #define MIP_VSSIP (_UL(1) << IRQ_VS_SOFT) @@ -103,6 +104,11 @@ #define MIP_MEIP (_UL(1) << IRQ_M_EXT) #define MIP_SGEIP (_UL(1) << IRQ_S_GEXT) #define MIP_LCOFIP (_UL(1) << IRQ_PMU_OVF) +#if __riscv_xlen == 64 +#define MIP_RASHP_INTP (_UL(1) << IRQ_RASHP_INT) +#else +#define MIPH_RASHP_INTP (_UL(1) << (IRQ_RASHP_INT - 32)) +#endif #define SIP_SSIP MIP_SSIP #define SIP_STIP MIP_STIP diff --git a/include/sbi/sbi_domain.h b/include/sbi/sbi_domain.h index c88dbac63d5..3cf58894d64 100644 --- a/include/sbi/sbi_domain.h +++ b/include/sbi/sbi_domain.h @@ -12,6 +12,8 @@ #include #include +#include +#include struct sbi_scratch; @@ -176,6 +178,10 @@ struct sbi_domain { char name[64]; /** Possible HARTs in this domain */ const struct sbi_hartmask *possible_harts; + /** Contexts for possible HARTs indexed by hartindex */ + struct sbi_context *hartindex_to_context_table[SBI_HARTMASK_MAX_BITS]; + /** rpxy state for possible HARTs indexed by hartindex */ + struct rpxy_state *hartindex_to_rs_table[SBI_HARTMASK_MAX_BITS]; /** Array of memory regions terminated by a region with order zero */ struct sbi_domain_memregion *regions; /** HART id of the HART booting this domain */ @@ -200,6 +206,9 @@ extern struct sbi_domain root; /** Get pointer to sbi_domain from HART index */ struct sbi_domain *sbi_hartindex_to_domain(u32 hartindex); +/** Update HART local pointer to point to specified domain */ +void sbi_update_hartindex_to_domain(u32 hartindex, struct sbi_domain *dom); + /** Get pointer to sbi_domain for current HART */ #define sbi_domain_thishart_ptr() \ sbi_hartindex_to_domain(sbi_hartid_to_hartindex(current_hartid())) diff --git a/include/sbi/sbi_domain_context.h b/include/sbi/sbi_domain_context.h new file mode 100755 index 00000000000..edba764fa21 --- /dev/null +++ b/include/sbi/sbi_domain_context.h @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) IPADS@SJTU 2023. All rights reserved. + */ + +#ifndef __SBI_DOMAIN_CONTEXT_H__ +#define __SBI_DOMAIN_CONTEXT_H__ + +#include +#include +#include + +/** Context representation for a hart within a domain */ +struct sbi_context { + /** Trap-related states such as GPRs, mepc, and mstatus */ + struct sbi_trap_regs regs; + + /** Supervisor status register */ + unsigned long sstatus; + /** Supervisor interrupt enable register */ + unsigned long sie; + /** Supervisor trap vector base address register */ + unsigned long stvec; + /** Supervisor scratch register for temporary storage */ + unsigned long sscratch; + /** Supervisor exception program counter register */ + unsigned long sepc; + /** Supervisor cause register */ + unsigned long scause; + /** Supervisor trap value register */ + unsigned long stval; + /** Supervisor interrupt pending register */ + unsigned long sip; + /** Supervisor address translation and protection register */ + unsigned long satp; + /** Counter-enable register */ + unsigned long scounteren; + /** Supervisor environment configuration register */ + unsigned long senvcfg; + + /** Reference to the owning domain */ + struct sbi_domain *dom; + /** Previous context (caller) to jump to during context exits */ + struct sbi_context *prev_ctx; + /** Is context initialized and runnable */ + bool initialized; +}; + +/** Get the context pointer for a given hart index and domain */ +#define sbi_hartindex_to_domain_context(__hartindex, __d) \ + (__d)->hartindex_to_context_table[__hartindex] + +/** Macro to obtain the current hart's context pointer */ +#define sbi_domain_context_thishart_ptr() \ + sbi_hartindex_to_domain_context( \ + sbi_hartid_to_hartindex(current_hartid()), \ + sbi_domain_thishart_ptr()) + +/** + * Enter a specific domain context synchronously + * @param dom pointer to domain + * + * @return 0 on success and negative error code on failure + */ +int sbi_domain_context_enter(struct sbi_domain *dom); + +/** + * Exit the current domain context, and then return to the caller + * of sbi_domain_context_enter or attempt to start the next domain + * context to be initialized + * + * @return 0 on success and negative error code on failure + */ +int sbi_domain_context_exit(void); + +#endif // __SBI_DOMAIN_CONTEXT_H__ diff --git a/include/sbi/sbi_ecall_interface.h b/include/sbi/sbi_ecall_interface.h index 690c31bc561..025cf07633e 100644 --- a/include/sbi/sbi_ecall_interface.h +++ b/include/sbi/sbi_ecall_interface.h @@ -33,6 +33,8 @@ #define SBI_EXT_SUSP 0x53555350 #define SBI_EXT_CPPC 0x43505043 #define SBI_EXT_DBTR 0x44425452 +#define SBI_EXT_RPXY 0x52505859 +#define SBI_EXT_SSE 0x535345 /* SBI function IDs for BASE extension*/ #define SBI_EXT_BASE_GET_SPEC_VERSION 0x0 @@ -304,6 +306,45 @@ enum sbi_cppc_reg_id { SBI_CPPC_NON_ACPI_LAST = SBI_CPPC_TRANSITION_LATENCY, }; +/* SBI function IDs for RPXY extension */ +#define SBI_EXT_RPXY_PROBE 0x0 +#define SBI_EXT_RPXY_SET_SHMEM 0x1 +#define SBI_EXT_RPXY_SEND_NORMAL_MESSAGE 0x2 +#define SBI_EXT_RPXY_SEND_POSTED_MESSAGE 0x3 +#define SBI_EXT_RPXY_GET_NOTIFICATION_EVENTS 0x4 + +/* SBI Function IDs for SSE extension */ +#define SBI_EXT_SSE_GET_ATTR 0x00000000 +#define SBI_EXT_SSE_SET_ATTR 0x00000001 +#define SBI_EXT_SSE_REGISTER 0x00000002 +#define SBI_EXT_SSE_UNREGISTER 0x00000003 +#define SBI_EXT_SSE_ENABLE 0x00000004 +#define SBI_EXT_SSE_DISABLE 0x00000005 +#define SBI_EXT_SSE_COMPLETE 0x00000006 +#define SBI_EXT_SSE_INJECT 0x00000007 + +/* SBI SSE Event Attributes. */ +#define SBI_SSE_ATTR_STATE 0x00000000 +#define SBI_SSE_ATTR_PRIO 0x00000001 +#define SBI_SSE_ATTR_ALLOW_INJECT 0x00000002 +#define SBI_SSE_ATTR_HART_ID 0x00000003 +#define SBI_SSE_ATTR_PENDING 0x00000004 + +/* SBI SSE Event IDs. */ +enum sbi_sse_event_id { + SBI_SSE_EVENT_LOCAL_RAS_0, + SBI_SSE_EVENT_LOCAL_RAS_1, + SBI_SSE_EVENT_LOCAL_RAS_RSVD = 0x00000100, + SBI_SSE_EVENT_LOCAL_PMU, + SBI_SSE_EVENT_LOCAL_ASYNC_PF, + SBI_SSE_EVENT_LOCAL_DEBUG = 0x7fffffff, + SBI_SSE_EVENT_GLOBAL_RAS = 0x80000000, + SBI_SSE_EVENT_GLOBAL_RAS_RSVC = 0x80000100, + SBI_SSE_EVENT_GLOBAL_DEBUG = 0xffffffff, +}; + +#define SBI_SSE_COMPLETE_FLAG_EVENT_DISABLE (1 << 0) + /* SBI base specification related macros */ #define SBI_SPEC_VERSION_MAJOR_OFFSET 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f @@ -324,8 +365,11 @@ enum sbi_cppc_reg_id { #define SBI_ERR_ALREADY_STARTED -7 #define SBI_ERR_ALREADY_STOPPED -8 #define SBI_ERR_NO_SHMEM -9 +#define SBI_ERR_INVALID_STATE -10 +#define SBI_ERR_BAD_RANGE -11 +#define SBI_ERR_BUSY -12 -#define SBI_LAST_ERR SBI_ERR_NO_SHMEM +#define SBI_LAST_ERR SBI_ERR_BUSY /* clang-format on */ diff --git a/include/sbi/sbi_error.h b/include/sbi/sbi_error.h index a77e3f8bb3c..f1a8724ef78 100644 --- a/include/sbi/sbi_error.h +++ b/include/sbi/sbi_error.h @@ -24,6 +24,9 @@ #define SBI_EALREADY_STARTED SBI_ERR_ALREADY_STARTED #define SBI_EALREADY_STOPPED SBI_ERR_ALREADY_STOPPED #define SBI_ENO_SHMEM SBI_ERR_NO_SHMEM +#define SBI_EINVALID_STATE SBI_ERR_INVALID_STATE +#define SBI_EBAD_RANGE SBI_ERR_BAD_RANGE +#define SBI_EBUSY SBI_ERR_BUSY #define SBI_ENODEV -1000 #define SBI_ENOSYS -1001 @@ -34,6 +37,7 @@ #define SBI_ENOMEM -1006 #define SBI_EUNKNOWN -1007 #define SBI_ENOENT -1008 +#define SBI_EJUMP -1009 /* clang-format on */ diff --git a/include/sbi/sbi_hsm.h b/include/sbi/sbi_hsm.h index 4b5601ba40c..54086b3a7f3 100644 --- a/include/sbi/sbi_hsm.h +++ b/include/sbi/sbi_hsm.h @@ -39,8 +39,12 @@ struct sbi_hsm_device { * * For successful non-retentive suspend, the hart will resume from * the warm boot entry point. + * + * NOTE: mmode_resume_addr(resume address) is optional, + * which may or may not be honored by the platform. If its not, + * SBI will resume with pre defined warmboot address */ - int (*hart_suspend)(u32 suspend_type); + int (*hart_suspend)(u32 suspend_type, ulong mmode_resume_addr); /** * Perform platform-specific actions to resume from a suspended state. diff --git a/include/sbi/sbi_ipi.h b/include/sbi/sbi_ipi.h index d396233487e..0b321946e75 100644 --- a/include/sbi/sbi_ipi.h +++ b/include/sbi/sbi_ipi.h @@ -11,6 +11,7 @@ #define __SBI_IPI_H__ #include +#include /* clang-format off */ @@ -68,7 +69,8 @@ struct sbi_ipi_event_ops { * Note: This is a mandatory callback and it is called on the * remote HART after IPI is triggered. */ - void (* process)(struct sbi_scratch *scratch); + void (* process)(struct sbi_scratch *scratch, + struct sbi_trap_regs *regs); }; int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data); @@ -83,7 +85,7 @@ void sbi_ipi_clear_smode(void); int sbi_ipi_send_halt(ulong hmask, ulong hbase); -void sbi_ipi_process(void); +void sbi_ipi_process(struct sbi_trap_regs *regs); int sbi_ipi_raw_send(u32 hartindex); diff --git a/include/sbi/sbi_platform.h b/include/sbi/sbi_platform.h index 2fb33e16e4e..bc774ff9aea 100644 --- a/include/sbi/sbi_platform.h +++ b/include/sbi/sbi_platform.h @@ -133,6 +133,9 @@ struct sbi_platform_operations { /** Exit platform timer for current HART */ void (*timer_exit)(void); + /** Initialize the platform RPMI proxy service groups */ + int (*rpxy_init)(void); + /** Check if SBI vendor extension is implemented or not */ bool (*vendor_ext_check)(void); /** platform specific SBI extension implementation provider */ @@ -635,6 +638,20 @@ static inline void sbi_platform_timer_exit(const struct sbi_platform *plat) sbi_platform_ops(plat)->timer_exit(); } +/** + * Initialize the platform RPMI proxy service groups + * + * @param plat pointer to struct sbi_platform + * + * @return 0 on success and negative error code on failure + */ +static inline int sbi_platform_rpxy_init(const struct sbi_platform *plat) +{ + if (plat && sbi_platform_ops(plat)->rpxy_init) + return sbi_platform_ops(plat)->rpxy_init(); + return 0; +} + /** * Check if SBI vendor extension is implemented or not. * diff --git a/include/sbi/sbi_ras.h b/include/sbi/sbi_ras.h new file mode 100644 index 00000000000..890caf04d89 --- /dev/null +++ b/include/sbi/sbi_ras.h @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems, Inc. + * + * Author(s): + * Himanshu Chauhan + */ + +#ifndef __SBI_RAS_H__ +#define __SBI_RAS_H__ + +#include + +/** RAS Agent */ +struct sbi_ras_agent { + /** Name of the RAS agent */ + char name[32]; + + /** probe - returns register width if implemented, 0 otherwise */ + int (*ras_probe)(void); + + /** synchronize CPU errors */ + int (*ras_sync_hart_errs)(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining); + + /** synchronize device errors */ + int (*ras_sync_dev_errs)(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining); +}; + +int sbi_ras_probe(void); +int sbi_ras_sync_hart_errs(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining); +int sbi_ras_sync_dev_errs(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining); + +const struct sbi_ras_agent *sbi_ras_get_agent(void); +void sbi_ras_set_agent(const struct sbi_ras_agent *agent); + +#endif diff --git a/include/sbi/sbi_rpxy.h b/include/sbi/sbi_rpxy.h new file mode 100644 index 00000000000..de656b3124d --- /dev/null +++ b/include/sbi/sbi_rpxy.h @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#ifndef __SBI_RPXY_H__ +#define __SBI_RPXY_H__ + +#include + +struct sbi_scratch; + +/** A RPMI proxy service accessible through SBI interface */ +struct sbi_rpxy_service { + u8 id; + u32 min_tx_len; + u32 max_tx_len; + u32 min_rx_len; + u32 max_rx_len; +}; + +/** A RPMI proxy service group accessible through SBI interface */ +struct sbi_rpxy_service_group { + /** List head to a set of service groups */ + struct sbi_dlist head; + + /** Details identifying this service group */ + u32 transport_id; + u32 service_group_id; + unsigned long max_message_data_len; + + /** Array of supported services */ + int num_services; + struct sbi_rpxy_service *services; + + /** + * Send a normal/posted message for this service group + * NOTE: For posted message, ack_len == NULL + */ + int (*send_message)(struct sbi_rpxy_service_group *grp, + struct sbi_rpxy_service *srv, + void *tx, u32 tx_len, + void *rx, u32 rx_len, + unsigned long *ack_len); + + /** Get notification events for this service group */ + int (*get_notification_events)(struct sbi_rpxy_service_group *grp, + void *output_data, + u32 output_data_len, + unsigned long *events_len); +}; + +/** A RPXY state structure */ +struct rpxy_state { + unsigned long shmem_size; + unsigned long shmem_addr; +}; + +/** Get the context pointer for a given hart index and domain */ +#define sbi_hartindex_to_domain_rs(__hartindex, __d) \ + (__d)->hartindex_to_rs_table[__hartindex] + +/** Macro to obtain the current hart's context pointer */ +#define sbi_domain_rs_thishart_ptr() \ + sbi_hartindex_to_domain_rs( \ + sbi_hartid_to_hartindex(current_hartid()), \ + sbi_domain_thishart_ptr()) + +/** Check if some RPMI proxy service group is available */ +bool sbi_rpxy_service_group_available(void); + +/** Probe RPMI proxy service group */ +int sbi_rpxy_probe(u32 transport_id, u32 service_group_id, + unsigned long *out_max_data_len); + +/** Set RPMI proxy shared memory on the calling HART */ +int sbi_rpxy_set_shmem(unsigned long shmem_size, + unsigned long shmem_phys_lo, + unsigned long shmem_phys_hi, + unsigned long flags); + +/** Send a normal/posted RPMI proxy message */ +int sbi_rpxy_send_message(u32 transport_id, + u32 service_group_id, + u8 service_id, + unsigned long message_data_len, + unsigned long *ack_data_len); + +/** Get RPMI proxy notification events */ +int sbi_rpxy_get_notification_events(u32 transport_id, u32 service_group_id, + unsigned long *events_len); + +/** Register a RPMI proxy service group */ +int sbi_rpxy_register_service_group(struct sbi_rpxy_service_group *grp); + +/** Initialize RPMI proxy subsystem */ +int sbi_rpxy_init(struct sbi_scratch *scratch); + +#endif diff --git a/include/sbi/sbi_sse.h b/include/sbi/sbi_sse.h new file mode 100644 index 00000000000..ca16db0f2f3 --- /dev/null +++ b/include/sbi/sbi_sse.h @@ -0,0 +1,188 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Rivos Systems. + */ + +#ifndef __SBI_SSE_H__ +#define __SBI_SSE_H__ + +#include +#include +#include + +struct sbi_scratch; + +#define EXC_MODE_PP _UL(1 << 0) +#define EXC_MODE_PP_SHIFT 0 +#define EXC_MODE_PV _UL(1 << 1) +#define EXC_MODE_PV_SHIFT 1 +#define EXC_MODE_SSTATUS_SPIE _UL(1 << 2) +#define EXC_MODE_SSTATUS_SPIE_SHIFT 2 + +struct sse_entry_state { + /** Entry program counter */ + unsigned long pc; + /** ra register state */ + unsigned long ra; + /** sp register state */ + unsigned long sp; + /** gp register state */ + unsigned long gp; + /** tp register state */ + unsigned long tp; + /** t0 register state */ + unsigned long t0; + /** t1 register state */ + unsigned long t1; + /** t2 register state */ + unsigned long t2; + /** s0 register state */ + unsigned long s0; + /** s1 register state */ + unsigned long s1; + /** a0 register state */ + unsigned long a0; + /** a1 register state */ + unsigned long a1; + /** a2 register state */ + unsigned long a2; + /** a3 register state */ + unsigned long a3; + /** a4 register state */ + unsigned long a4; + /** a5 register state */ + unsigned long a5; + /** a6 register state */ + unsigned long a6; + /** a7 register state */ + unsigned long a7; + /** s2 register state */ + unsigned long s2; + /** s3 register state */ + unsigned long s3; + /** s4 register state */ + unsigned long s4; + /** s5 register state */ + unsigned long s5; + /** s6 register state */ + unsigned long s6; + /** s7 register state */ + unsigned long s7; + /** s8 register state */ + unsigned long s8; + /** s9 register state */ + unsigned long s9; + /** s10 register state */ + unsigned long s10; + /** s11 register state */ + unsigned long s11; + /** t3 register state */ + unsigned long t3; + /** t4 register state */ + unsigned long t4; + /** t5 register state */ + unsigned long t5; + /** t6 register state */ + unsigned long t6; +} __packed; + +struct sse_interrupted_state { + /** Interrupted program counter */ + unsigned long pc; + /** ra register state */ + unsigned long ra; + /** sp register state */ + unsigned long sp; + /** gp register state */ + unsigned long gp; + /** tp register state */ + unsigned long tp; + /** t0 register state */ + unsigned long t0; + /** t1 register state */ + unsigned long t1; + /** t2 register state */ + unsigned long t2; + /** s0 register state */ + unsigned long s0; + /** s1 register state */ + unsigned long s1; + /** a0 register state */ + unsigned long a0; + /** a1 register state */ + unsigned long a1; + /** a2 register state */ + unsigned long a2; + /** a3 register state */ + unsigned long a3; + /** a4 register state */ + unsigned long a4; + /** a5 register state */ + unsigned long a5; + /** a6 register state */ + unsigned long a6; + /** a7 register state */ + unsigned long a7; + /** s2 register state */ + unsigned long s2; + /** s3 register state */ + unsigned long s3; + /** s4 register state */ + unsigned long s4; + /** s5 register state */ + unsigned long s5; + /** s6 register state */ + unsigned long s6; + /** s7 register state */ + unsigned long s7; + /** s8 register state */ + unsigned long s8; + /** s9 register state */ + unsigned long s9; + /** s10 register state */ + unsigned long s10; + /** s11 register state */ + unsigned long s11; + /** t3 register state */ + unsigned long t3; + /** t4 register state */ + unsigned long t4; + /** t5 register state */ + unsigned long t5; + /** t6 register state */ + unsigned long t6; + /** Exception mode */ + unsigned long exc_mode; +} __packed; + +struct sbi_sse_handler_ctx { + struct sse_entry_state entry; + struct sse_interrupted_state interrupted; +} __packed; + +enum sbi_sse_state { + SSE_STATE_UNUSED = 0, + SSE_STATE_REGISTERED = 1, + SSE_STATE_ENABLED = 2, + SSE_STATE_RUNNING = 3, +}; +typedef void (*set_hartid_cb_t)(uint32_t event_id, unsigned long val); + +int sbi_sse_event_set_hartid_cb(uint32_t event_id, set_hartid_cb_t set_attr_cb); +int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot); +void sbi_sse_exit(struct sbi_scratch *scratch); + +int sbi_sse_get_attr(uint32_t event_id, uint32_t attr_id, unsigned long *out_val); +int sbi_sse_set_attr(uint32_t event_id, uint32_t attr_id, unsigned long value); +int sbi_sse_register(uint32_t event_id, unsigned long phys_hi, unsigned long phys_lo); +int sbi_sse_unregister(uint32_t event_id); +int sbi_sse_enable(uint32_t event_id); +int sbi_sse_disable(uint32_t event_id); +int sbi_sse_complete(uint32_t event_id, uint32_t status, uint32_t flags, + struct sbi_trap_regs *regs); +int sbi_sse_inject_from_ecall(uint32_t event_id, unsigned long hart_id, + struct sbi_trap_regs *regs); +int sbi_sse_inject_event(uint32_t event_id, struct sbi_trap_regs *regs); + +#endif diff --git a/include/sbi_utils/cppc/fdt_cppc.h b/include/sbi_utils/cppc/fdt_cppc.h new file mode 100644 index 00000000000..9dae501eebd --- /dev/null +++ b/include/sbi_utils/cppc/fdt_cppc.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#ifndef __FDT_CPPC_H__ +#define __FDT_CPPC_H__ + +#include + +#ifdef CONFIG_FDT_CPPC + +struct fdt_cppc { + const struct fdt_match *match_table; + int (*cold_init)(void *fdt, int nodeoff, const struct fdt_match *match); + int (*warm_init)(void); + void (*exit)(void); +}; + +void fdt_cppc_exit(void); + +int fdt_cppc_init(bool cold_boot); + +#else + +static inline void fdt_cppc_exit(void) { } +static inline int fdt_cppc_init(bool cold_boot) { return 0; } + +#endif + +#endif diff --git a/include/sbi_utils/hsm/fdt_hsm.h b/include/sbi_utils/hsm/fdt_hsm.h new file mode 100644 index 00000000000..acab04f0648 --- /dev/null +++ b/include/sbi_utils/hsm/fdt_hsm.h @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#ifndef __FDT_HSM_H__ +#define __FDT_HSM_H__ + +#include + +#ifdef CONFIG_FDT_HSM + +struct fdt_hsm { + const struct fdt_match *match_table; + int (*fdt_fixup)(void *fdt); + int (*cold_init)(void *fdt, int nodeoff, const struct fdt_match *match); + int (*warm_init)(void); + void (*exit)(void); +}; + +int fdt_hsm_fixup(void *fdt); + +void fdt_hsm_exit(void); + +int fdt_hsm_init(bool cold_boot); + +#else + +static inline int fdt_hsm_fixup(void *fdt) { return 0; } +static inline void fdt_hsm_exit(void) { } +static inline int fdt_hsm_init(bool cold_boot) { return 0; } + +#endif + +#endif diff --git a/include/sbi_utils/mailbox/fdt_mailbox.h b/include/sbi_utils/mailbox/fdt_mailbox.h new file mode 100644 index 00000000000..0398e09d196 --- /dev/null +++ b/include/sbi_utils/mailbox/fdt_mailbox.h @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2022 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#ifndef __FDT_MAILBOX_H__ +#define __FDT_MAILBOX_H__ + +#include + +struct fdt_phandle_args; + +/** FDT based mailbox driver */ +struct fdt_mailbox { + const struct fdt_match *match_table; + int (*xlate)(struct mbox_controller *mbox, + const struct fdt_phandle_args *pargs, + u32 *out_chan_args); + int (*init)(void *fdt, int nodeoff, u32 phandle, + const struct fdt_match *match); +}; + +/** Request a mailbox channel using "mboxes" DT property of client DT node */ +int fdt_mailbox_request_chan(void *fdt, int nodeoff, int index, + struct mbox_chan **out_chan); + +/** Simple xlate function to convert one mailbox FDT cell into channel args */ +int fdt_mailbox_simple_xlate(struct mbox_controller *mbox, + const struct fdt_phandle_args *pargs, + u32 *out_chan_args); + +#endif diff --git a/include/sbi_utils/mailbox/mailbox.h b/include/sbi_utils/mailbox/mailbox.h new file mode 100644 index 00000000000..daf32ce1cd5 --- /dev/null +++ b/include/sbi_utils/mailbox/mailbox.h @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2022 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#ifndef __MAILBOX_H__ +#define __MAILBOX_H__ + +#include +#include +#include + +/** Representation of a mailbox channel */ +struct mbox_chan { + /** List head */ + struct sbi_dlist node; + /** Pointer to the mailbox controller */ + struct mbox_controller *mbox; + /** + * Arguments (or parameters) to identify a mailbox channel + * within a mailbox controller. + */ +#define MBOX_CHAN_MAX_ARGS 2 + u32 chan_args[MBOX_CHAN_MAX_ARGS]; +}; + +#define to_mbox_chan(__node) \ + container_of((__node), struct mbox_chan, node) + +/** + * Representation of a mailbox data transfer + * + * NOTE: If both "tx" and "rx" are non-NULL then Tx is done before Rx. + */ +struct mbox_xfer { +#define MBOX_XFER_SEQ (1UL << 0) + /** Transfer flags */ + unsigned long flags; + /** Transfer arguments (or parameters) */ + void *args; + /** + * Sequence number + * + * If MBOX_XFER_SEQ is not set in flags then mbox_chan_xfer() + * will generate a unique sequence number and update this field + * else mbox_chan_xfer() will blindly use the sequence number + * specified by this field. + */ + long seq; + /** Send data pointer */ + void *tx; + /** Send data length (valid only if tx != NULL) */ + unsigned long tx_len; + /** + * Send timeout milliseconds (valid only if tx != NULL) + * + * If this field is non-zero along with tx != NULL then the + * mailbox controller driver will wait specified milliseconds + * for send data transfer to complete else the mailbox controller + * driver will not wait. + */ + unsigned long tx_timeout; + /** Receive data pointer */ + void *rx; + /** Receive data length (valid only if rx != NULL) */ + unsigned long rx_len; + /** + * Receive timeout milliseconds (valid only if rx != NULL) + * + * If this field is non-zero along with rx != NULL then the + * mailbox controller driver will wait specified milliseconds + * for receive data transfer to complete else the mailbox + * controller driver will not wait. + */ + unsigned long rx_timeout; +}; + +#define mbox_xfer_init_tx(__p, __a, __t, __t_len, __t_tim) \ +do { \ + (__p)->flags = 0; \ + (__p)->args = (__a); \ + (__p)->tx = (__t); \ + (__p)->tx_len = (__t_len); \ + (__p)->tx_timeout = (__t_tim); \ + (__p)->rx = NULL; \ + (__p)->rx_len = 0; \ + (__p)->rx_timeout = 0; \ +} while (0) + +#define mbox_xfer_init_rx(__p, __a, __r, __r_len, __r_tim) \ +do { \ + (__p)->flags = 0; \ + (__p)->args = (__a); \ + (__p)->tx = NULL; \ + (__p)->tx_len = 0; \ + (__p)->tx_timeout = 0; \ + (__p)->rx = (__r); \ + (__p)->rx_len = (__r_len); \ + (__p)->rx_timeout = (__r_tim); \ +} while (0) + +#define mbox_xfer_init_txrx(__p, __a, __t, __t_len, __t_tim, __r, __r_len, __r_tim)\ +do { \ + (__p)->flags = 0; \ + (__p)->args = (__a); \ + (__p)->tx = (__t); \ + (__p)->tx_len = (__t_len); \ + (__p)->tx_timeout = (__t_tim); \ + (__p)->rx = (__r); \ + (__p)->rx_len = (__r_len); \ + (__p)->rx_timeout = (__r_tim); \ +} while (0) + +#define mbox_xfer_set_sequence(__p, __seq) \ +do { \ + (__p)->flags |= MBOX_XFER_SEQ; \ + (__p)->seq = (__seq); \ +} while (0) + +/** Representation of a mailbox controller */ +struct mbox_controller { + /** List head */ + struct sbi_dlist node; + /** Next sequence atomic counter */ + atomic_t xfer_next_seq; + /* List of mailbox channels */ + struct sbi_dlist chan_list; + /** Unique ID of the mailbox controller assigned by the driver */ + unsigned int id; + /** Maximum length of transfer supported by the mailbox controller */ + unsigned int max_xfer_len; + /** Pointer to mailbox driver owning this mailbox controller */ + void *driver; + /** Request a mailbox channel from the mailbox controller */ + struct mbox_chan *(*request_chan)(struct mbox_controller *mbox, + u32 *chan_args); + /** Free a mailbox channel from the mailbox controller */ + void *(*free_chan)(struct mbox_controller *mbox, + struct mbox_chan *chan); + /** Transfer data over mailbox channel */ + int (*xfer)(struct mbox_chan *chan, struct mbox_xfer *xfer); +}; + +#define to_mbox_controller(__node) \ + container_of((__node), struct mbox_controller, node) + +/** Find a registered mailbox controller */ +struct mbox_controller *mbox_controller_find(unsigned int id); + +/** Register mailbox controller */ +int mbox_controller_add(struct mbox_controller *mbox); + +/** Un-register mailbox controller */ +void mbox_controller_remove(struct mbox_controller *mbox); + +/** Request a mailbox channel */ +struct mbox_chan *mbox_controller_request_chan(struct mbox_controller *mbox, + u32 *chan_args); + +/** Free a mailbox channel */ +void mbox_controller_free_chan(struct mbox_chan *chan); + +/** Data transfer over mailbox channel */ +int mbox_chan_xfer(struct mbox_chan *chan, struct mbox_xfer *xfer); + +#endif diff --git a/include/sbi_utils/mailbox/rpmi_mailbox.h b/include/sbi_utils/mailbox/rpmi_mailbox.h new file mode 100644 index 00000000000..61af51a8de5 --- /dev/null +++ b/include/sbi_utils/mailbox/rpmi_mailbox.h @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#ifndef __RPMI_MAILBOX_H__ +#define __RPMI_MAILBOX_H__ + +#include +#include + +#define rpmi_u32_count(__var) (sizeof(__var) / sizeof(u32)) + +/** Convert RPMI error to SBI error */ +int rpmi_xlate_error(enum rpmi_error error); + +/** Typical RPMI normal request with at least status code in response */ +int rpmi_normal_request_with_status( + struct mbox_chan *chan, u32 service_id, + void *req, u32 req_words, u32 req_endian_words, + void *resp, u32 resp_words, u32 resp_endian_words); + +/* RPMI posted request which is without any response*/ +int rpmi_posted_request( + struct mbox_chan *chan, u32 service_id, + void *req, u32 req_words, u32 req_endian_words); + +#endif /* !__RPMI_MAILBOX_H__ */ diff --git a/include/sbi_utils/mailbox/rpmi_msgprot.h b/include/sbi_utils/mailbox/rpmi_msgprot.h new file mode 100644 index 00000000000..81a1c06f1b7 --- /dev/null +++ b/include/sbi_utils/mailbox/rpmi_msgprot.h @@ -0,0 +1,541 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Rahul Pathak + * Subrahmanya Lingappa + */ + +#ifndef __RPMI_MSGPROT_H__ +#define __RPMI_MSGPROT_H__ + +#include +#include + +/* + * 31 0 + * +---------------------------------------------+ + * | TOKEN | + * +---------+------------------+----------------+ + * | FLAGS | SERVICEGROUP_ID | SERVICE_ID | + * +---------+------------------+----------------+ + * | DATA LENGTH | + * +---------------------------------------------+ + * | DATA/PAYLOAD | + * +---------------------------------------------+ + */ + +/** Message Header Offset */ +#define RPMI_MSG_HDR_OFFSET (0x0) +#define RPMI_MSG_HDR_SIZE (12) /* bytes */ + +/** Token is unique message identifier in the system */ +#define RPMI_MSG_TOKEN_OFFSET (0x0) +#define RPMI_MSG_TOKEN_SIZE (4) /* bytes */ + +/** Message Identity = Flags + Service Group ID + Service ID */ +#define RPMI_MSG_IDN_OFFSET (0x4) +#define RPMI_MSG_IDN_SIZE (4) /* bytes */ + +#define RPMI_MSG_IDN_SERVICE_ID_POS (0U) +#define RPMI_MSG_IDN_SERVICE_ID_MASK \ + ((0xFF) << RPMI_MSG_IDN_SERVICE_ID_POS) + +#define RPMI_MSG_IDN_SERVICEGROUP_ID_POS (8U) +#define RPMI_MSG_IDN_SERVICEGROUP_ID_MASK \ + ((0xFFFFF) << RPMI_MSG_IDN_SERVICEGROUP_ID_POS) + +#define RPMI_MSG_IDN_TYPE_POS (28U) +#define RPMI_MSG_IDN_TYPE_MASK \ + ((0x3) << RPMI_MSG_IDN_TYPE_POS) + +#define RPMI_MSG_IDN_DOORBELL_POS (30U) +#define RPMI_MSG_IDN_DOORBELL_MASK \ + ((0x1) << RPMI_MSG_IDN_DOORBELL_POS) + +/** Data length field */ +#define RPMI_MSG_DATALEN_OFFSET (0x8) +#define RPMI_MSG_DATALEN_SIZE (4) /* bytes */ + +/** Data field */ +#define RPMI_MSG_DATA_OFFSET (0xc) +#define RPMI_MSG_DATA_SIZE (52) /* bytes */ + +/** Minimum message size Header + Data */ +#define RPMI_MSG_SIZE_MIN (RPMI_MSG_HDR_SIZE + \ + RPMI_MSG_DATA_SIZE) + +/** Name length of 16 characters */ +#define RPMI_NAME_CHARS_MAX (16) + +/** Default timeout values */ +#define RPMI_DEF_TX_TIMEOUT 20 +#define RPMI_DEF_RX_TIMEOUT 20 + +/** RPMI Message Header */ +struct rpmi_message_header { + le32_t token; + le32_t msgidn; + le32_t datalen; +} __packed; + +/** RPMI Message */ +struct rpmi_message { + struct rpmi_message_header header; + u8 data[0]; +} __packed; + +/** RPMI Messages Types */ +enum rpmi_message_type { + /* Normal request backed with ack */ + RPMI_MSG_NORMAL_REQUEST = 0x0, + /* Request without any ack */ + RPMI_MSG_POSTED_REQUEST = 0x1, + /* Acknowledgment for normal request message */ + RPMI_MSG_ACKNOWLDGEMENT = 0x2, + /* Notification message */ + RPMI_MSG_NOTIFICATION = 0x3, +}; + +/** RPMI Error Types */ +enum rpmi_error { + RPMI_SUCCESS = 0, + RPMI_ERR_FAILED = -1, + RPMI_ERR_NOTSUPP = -2, + RPMI_ERR_INVAL = -3, + RPMI_ERR_DENIED = -4, + RPMI_ERR_NOTFOUND = -5, + RPMI_ERR_OUTOFRANGE = -6, + RPMI_ERR_OUTOFRES = -7, + RPMI_ERR_HWFAULT = -8, +}; + +/** RPMI Message Arguments */ +struct rpmi_message_args { + u32 flags; +#define RPMI_MSG_FLAGS_NO_TX (1U << 0) +#define RPMI_MSG_FLAGS_NO_RX (1U << 1) +#define RPMI_MSG_FLAGS_NO_RX_TOKEN (1U << 2) + enum rpmi_message_type type; + u32 service_id; + u32 tx_endian_words; + u32 rx_endian_words; + u32 rx_token; + u32 rx_data_len; +}; + +/* + * RPMI SERVICEGROUPS AND SERVICES + */ + +/** RPMI ServiceGroups IDs */ +enum rpmi_servicegroup_id { + RPMI_SRVGRP_ID_MIN = 0, + RPMI_SRVGRP_BASE = 0x00001, + RPMI_SRVGRP_SYSTEM_RESET = 0x00002, + RPMI_SRVGRP_SYSTEM_SUSPEND = 0x00003, + RPMI_SRVGRP_HSM = 0x00004, + RPMI_SRVGRP_CPPC = 0x00005, + RPMI_SRVGRP_CLOCK = 0x00007, + RPMI_SRVGRP_ID_MAX_COUNT, +}; + +/** RPMI enable notification request */ +struct rpmi_enable_notification_req { + u32 eventid; +}; + +/** RPMI enable notification response */ +struct rpmi_enable_notification_resp { + s32 status; +}; + +/** RPMI Base ServiceGroup Service IDs */ +enum rpmi_base_service_id { + RPMI_BASE_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_BASE_SRV_GET_IMPLEMENTATION_VERSION = 0x02, + RPMI_BASE_SRV_GET_IMPLEMENTATION_IDN = 0x03, + RPMI_BASE_SRV_GET_SPEC_VERSION = 0x04, + RPMI_BASE_SRV_GET_HW_INFO = 0x05, + RPMI_BASE_SRV_PROBE_SERVICE_GROUP = 0x06, + RPMI_BASE_SRV_GET_ATTRIBUTES = 0x07, + RPMI_BASE_SRV_SET_MSI = 0x08, +}; + +struct rpmi_base_get_attributes_resp { + s32 status_code; +#define RPMI_BASE_FLAGS_F0_EV_NOTIFY (1U << 31) +#define RPMI_BASE_FLAGS_F0_MSI_EN (1U << 30) + u32 f0; + u32 f1; + u32 f2; + u32 f3; +}; + +/** RPMI System Reset ServiceGroup Service IDs */ +enum rpmi_system_reset_service_id { + RPMI_SYSRST_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_SYSRST_SRV_GET_SYSTEM_RESET_ATTRIBUTES = 0x02, + RPMI_SYSRST_SRV_SYSTEM_RESET = 0x03, + RPMI_SYSRST_SRV_ID_MAX_COUNT, +}; + +/** RPMI System Reset types */ +enum rpmi_sysrst_reset_type { + RPMI_SYSRST_SHUTDOWN = 0, + RPMI_SYSRST_COLD_RESET = 1, + RPMI_SYSRST_WARM_RESET = 2, + RPMI_SYSRST_MAX_IDN_COUNT, +}; + +/** Response for system reset attributes */ +struct rpmi_sysrst_get_reset_attributes_resp { + s32 status; +#define RPMI_SYSRST_FLAGS_SUPPORTED_POS (31) +#define RPMI_SYSRST_FLAGS_SUPPORTED_MASK \ + (1U << RPMI_SYSRST_FLAGS_SUPPORTED_POS) + u32 flags; +}; + +/** RPMI System Suspend ServiceGroup Service IDs */ +enum rpmi_system_suspend_service_id { + RPMI_SYSSUSP_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_SYSSUSP_SRV_GET_SYSTEM_SUSPEND_ATTRIBUTES = 0x02, + RPMI_SYSSUSP_SRV_SYSTEM_SUSPEND = 0x03, + RPMI_SYSSUSP_SRV_ID_MAX_COUNT, +}; + +/** Response for system suspend attributes */ +struct rpmi_syssusp_get_attr_resp { + s32 status; +#define RPMI_SYSSUSP_FLAGS_CUSTOM_RESUME_ADDR_SUPPORTED (1U << 31) +#define RPMI_SYSSUSP_FLAGS_SUPPORTED (1U << 30) + u32 flags; +}; + +struct rpmi_syssusp_suspend_req { + u32 hartid; + u32 suspend_type; + u32 resume_addr_lo; + u32 resume_addr_hi; +}; + +struct rpmi_syssusp_suspend_resp { + s32 status; +}; + +/** RPMI HSM State Management ServiceGroup Service IDs */ +enum rpmi_cpu_hsm_service_id { + RPMI_HSM_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_HSM_SRV_HART_START = 0x02, + RPMI_HSM_SRV_HART_STOP = 0x03, + RPMI_HSM_SRV_HART_SUSPEND = 0x04, + RPMI_HSM_SRV_GET_HART_STATUS = 0x05, + RPMI_HSM_SRV_GET_HART_LIST = 0x06, + RPMI_HSM_SRV_GET_SUSPEND_TYPES = 0x07, + RPMI_HSM_SRV_GET_SUSPEND_INFO = 0x08, + RPMI_HSM_SRV_ID_MAX_COUNT, +}; + +/* HSM service group request and response structs */ +struct rpmi_hsm_hart_start_req { + u32 hartid; + u32 start_addr_lo; + u32 start_addr_hi; +}; + +struct rpmi_hsm_hart_start_resp { + s32 status; +}; + +struct rpmi_hsm_hart_stop_req { + u32 hartid; +}; + +struct rpmi_hsm_hart_stop_resp { + s32 status; +}; + +struct rpmi_hsm_hart_susp_req { + u32 hartid; + u32 suspend_type; + u32 resume_addr_lo; + u32 resume_addr_hi; +}; + +struct rpmi_hsm_hart_susp_resp { + s32 status; +}; + +struct rpmi_hsm_get_hart_status_req { + u32 hartid; +}; + +struct rpmi_hsm_get_hart_status_resp { + s32 status; + u32 hart_status; +}; + +struct rpmi_hsm_get_hart_list_req { + u32 start_index; +}; + +struct rpmi_hsm_get_hart_list_resp { + s32 status; + u32 remaining; + u32 returned; + /* remaining space need to be adjusted for the above 3 u32's */ + u32 hartid[(RPMI_MSG_DATA_SIZE - (sizeof(u32) * 3)) / sizeof(u32)]; +}; + +struct rpmi_hsm_get_susp_types_req { + u32 start_index; +}; + +struct rpmi_hsm_get_susp_types_resp { + s32 status; + u32 remaining; + u32 returned; + /* remaining space need to be adjusted for the above 3 u32's */ + u32 types[(RPMI_MSG_DATA_SIZE - (sizeof(u32) * 3)) / sizeof(u32)]; +}; + +struct rpmi_hsm_get_susp_info_req { + u32 suspend_type; +}; + +struct rpmi_hsm_get_susp_info_resp { + s32 status; + u32 flags; +#define RPMI_HSM_FLAGS_LOCAL_TIME_STOP (1U << 31) + u32 entry_latency_us; + u32 exit_latency_us; + u32 wakeup_latency_us; + u32 min_residency_us; +}; + +/** RPMI CPPC ServiceGroup Service IDs */ +enum rpmi_cppc_service_id { + RPMI_CPPC_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_CPPC_SRV_PROBE_REG = 0x02, + RPMI_CPPC_SRV_READ_REG = 0x03, + RPMI_CPPC_SRV_WRITE_REG = 0x04, + RPMI_CPPC_SRV_GET_FAST_CHANNEL_ADDR = 0x05, + RPMI_CPPC_SRV_POKE_FAST_CHANNEL = 0x06, + RPMI_CPPC_SRV_GET_HART_LIST = 0x07, + RPMI_CPPC_SRV_MAX_COUNT, +}; + +struct rpmi_cppc_probe_req { + u32 hart_id; + u32 reg_id; +}; + +struct rpmi_cppc_probe_resp { + s32 status; + u32 reg_len; +}; + +struct rpmi_cppc_read_reg_req { + u32 hart_id; + u32 reg_id; +}; + +struct rpmi_cppc_read_reg_resp { + s32 status; + u32 data_lo; + u32 data_hi; +}; + +struct rpmi_cppc_write_reg_req { + u32 hart_id; + u32 reg_id; + u32 data_lo; + u32 data_hi; +}; + +struct rpmi_cppc_write_reg_resp { + s32 status; +}; + +struct rpmi_cppc_get_fast_channel_addr_req { + u32 hart_id; +}; + +struct rpmi_cppc_get_fast_channel_addr_resp { + s32 status; +#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_POS 1 +#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_MASK \ + (3U << RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_POS) +#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_SUPPORTED (1U << 0) + u32 flags; + u32 addr_lo; + u32 addr_hi; + u32 db_addr_lo; + u32 db_addr_hi; + u32 db_id_lo; + u32 db_id_hi; +}; + +enum rpmi_cppc_fast_channel_db_width { + RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_8 = 0x0, + RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_16 = 0x1, + RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_32 = 0x2, + RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_64 = 0x3, +}; + +struct rpmi_cppc_hart_list_req { + u32 start_index; +}; + +struct rpmi_cppc_hart_list_resp { + s32 status; + u32 remaining; + u32 returned; + /* remaining space need to be adjusted for the above 3 u32's */ + u32 hartid[(RPMI_MSG_DATA_SIZE - (sizeof(u32) * 3)) / sizeof(u32)]; +}; + +/** RPMI Clock ServiceGroup Service IDs */ +enum rpmi_clock_service_id { + RPMI_CLOCK_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_CLOCK_SRV_GET_NUM_CLOCKS = 0x02, + RPMI_CLOCK_SRV_GET_ATTRIBUTES = 0x03, + RPMI_CLOCK_SRV_GET_SUPPORTED_RATES = 0x04, + RPMI_CLOCK_SRV_SET_CONFIG = 0x05, + RPMI_CLOCK_SRV_GET_CONFIG = 0x06, + RPMI_CLOCK_SRV_SET_RATE = 0x07, + RPMI_CLOCK_SRV_GET_RATE = 0x08, + RPMI_CLOCk_SRV_MAX_COUNT, +}; + +struct rpmi_clock_get_num_clocks_resp { + s32 status; + u32 num_clocks; +}; + +struct rpmi_clock_get_attributes_req { + u32 clock_id; +}; + +struct rpmi_clock_get_attributes_resp { + s32 status; +#define RPMI_CLOCK_FLAGS_FORMAT_POS 30 +#define RPMI_CLOCK_FLAGS_FORMAT_MASK \ + (3U << RPMI_CLOCK_FLAGS_CLOCK_FORMAT_POS) +#define RPMI_CLOCK_FLAGS_FORMAT_DISCRETE 0 +#define RPMI_CLOCK_FLAGS_FORMAT_LINEAR 1 + u32 flags; + u32 num_rates; + u32 transition_latency; + u8 name[16]; +}; + +struct rpmi_clock_get_supported_rates_req { + u32 clock_id; + u32 clock_rate_index; +}; + +struct rpmi_clock_get_supported_rates_resp { + s32 status; + u32 flags; + u32 remaining; + u32 returned; + u32 clock_rate[0]; +}; + +struct rpmi_clock_set_config_req { + u32 clock_id; +#define RPMI_CLOCK_CONFIG_ENABLE (1U << 0) + u32 config; +}; + +struct rpmi_clock_set_config_resp { + s32 status; +}; + +struct rpmi_clock_get_config_req { + u32 clock_id; +}; + +struct rpmi_clock_get_config_resp { + s32 status; + u32 config; +}; + +struct rpmi_clock_set_rate_req { + u32 clock_id; +#define RPMI_CLOCK_SET_RATE_FLAGS_MASK (3U << 0) +#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_DOWN 0 +#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_UP 1 +#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_PLAT 2 + u32 flags; + u32 clock_rate_low; + u32 clock_rate_high; +}; + +struct rpmi_clock_set_rate_resp { + s32 status; +}; + +struct rpmi_clock_get_rate_req { + u32 clock_id; +}; + +struct rpmi_clock_get_rate_resp { + s32 status; + u32 clock_rate_low; + u32 clock_rate_high; +}; + +/** RPMI RAS-Agent ServiceGroup Service IDs */ +enum rpmi_ras_service_id { + RPMI_RAS_SRV_PROBE_REQ = 0x01, + RPMI_RAS_SRV_SYNC_HART_ERR_REQ, + RPMI_RAS_SRV_SYNC_DEV_ERR_REQ, + RPMI_RAS_SRV_GET_PEND_VECS_REQ, + RPMI_RAS_SRV_SYNC_ERR_RESP, + RPMI_RAS_SRV_MAX_COUNT, +}; + +struct rpmi_ras_probe_req { + u32 dummy; +}; + +struct rpmi_ras_probe_resp { + s32 status; + u32 version; +}; + +struct rpmi_ras_sync_hart_err_req { + u32 hart_id; +}; + +struct rpmi_ras_sync_dev_err_req { + u32 dummy; +}; + +struct rpmi_ras_pend_vecs_req { +#define INVALID_LAST_VEC 0xFFFFFFFFUL + u32 last_vec; +}; + +/* + * List of vectors needing attention. These might be + * more than that can be sent in single message. + * + * `remaining` will contain the number of vectors + * remaining. SBI implementation should request + * remaining vectors by GET_PEND_VECS request. + */ +struct rpmi_ras_sync_err_resp { + s32 status; + u32 remaining; + u32 returned; +#define MAX_PEND_VECS ((RPMI_MSG_DATA_SIZE - (sizeof(u32) * 3)) / sizeof(u32)) + u32 pending_vecs[MAX_PEND_VECS]; +}; + +#endif /* !__RPMI_MSGPROT_H__ */ diff --git a/include/sbi_utils/ras/fdt_ras.h b/include/sbi_utils/ras/fdt_ras.h new file mode 100644 index 00000000000..549c81fd3f2 --- /dev/null +++ b/include/sbi_utils/ras/fdt_ras.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Himanshu Chauhan + */ + +#ifndef __FDT_RAS_H__ +#define __FDT_RAS_H__ + +#include + +#ifdef CONFIG_FDT_RAS + +struct fdt_ras { + const struct fdt_match *match_table; + int (*cold_init)(void *fdt, int nodeoff, const struct fdt_match *match); + int (*warm_init)(void); + void (*exit)(void); +}; + +void fdt_ras_exit(void); + +int fdt_ras_init(bool cold_boot); + +#else + +static inline void fdt_ras_exit(void) { } +static inline int fdt_ras_init(bool cold_boot) { return 0; } + +#endif + +#endif diff --git a/include/sbi_utils/rpxy/fdt_rpxy.h b/include/sbi_utils/rpxy/fdt_rpxy.h new file mode 100644 index 00000000000..4a550dc0ac4 --- /dev/null +++ b/include/sbi_utils/rpxy/fdt_rpxy.h @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel + */ + +#ifndef __FDT_RPXY_H__ +#define __FDT_RPXY_H__ + +#include + +#ifdef CONFIG_FDT_RPXY + +struct fdt_rpxy { + const struct fdt_match *match_table; + int (*init)(void *fdt, int nodeoff, const struct fdt_match *match); + void (*exit)(void); +}; + +int fdt_rpxy_init(void); + +#else + +static inline int fdt_rpxy_init(void) { return 0; } + +#endif + +#endif diff --git a/include/sbi_utils/suspend/fdt_suspend.h b/include/sbi_utils/suspend/fdt_suspend.h new file mode 100644 index 00000000000..f660a924023 --- /dev/null +++ b/include/sbi_utils/suspend/fdt_suspend.h @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#ifndef __FDT_SUSPEND_H__ +#define __FDT_SUSPEND_H__ + +#include + +struct fdt_suspend { + const struct fdt_match *match_table; + int (*init)(void *fdt, int nodeoff, const struct fdt_match *match); +}; + +#ifdef CONFIG_FDT_SUSPEND + +/** + * fdt_suspend_driver_init() - initialize suspend driver based on the device-tree + */ +int fdt_suspend_driver_init(void *fdt, struct fdt_suspend *drv); + +/** + * fdt_suspend_init() - initialize reset drivers based on the device-tree + * + * This function shall be invoked in final init. + */ +void fdt_suspend_init(void); + +#else + +static inline int fdt_suspend_driver_init(void *fdt, struct fdt_suspend *drv) +{ + return 0; +} + +static inline void fdt_suspend_init(void) { } + +#endif + +#endif diff --git a/lib/sbi/Kconfig b/lib/sbi/Kconfig index 81dd2db3967..3d151036e3b 100644 --- a/lib/sbi/Kconfig +++ b/lib/sbi/Kconfig @@ -38,6 +38,10 @@ config SBI_ECALL_CPPC bool "CPPC extension" default y +config SBI_ECALL_RPXY + bool "RPXY extension" + default y + config SBI_ECALL_LEGACY bool "SBI v0.1 legacy extensions" default y @@ -50,4 +54,8 @@ config SBI_ECALL_DBTR bool "Debug Trigger Extension" default y +config SBI_ECALL_SSE + bool "SSE extension" + default y + endmenu diff --git a/lib/sbi/objects.mk b/lib/sbi/objects.mk index 0a50e95c30f..d61fdc21bcc 100644 --- a/lib/sbi/objects.mk +++ b/lib/sbi/objects.mk @@ -46,6 +46,9 @@ libsbi-objs-$(CONFIG_SBI_ECALL_DBCN) += sbi_ecall_dbcn.o carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_CPPC) += ecall_cppc libsbi-objs-$(CONFIG_SBI_ECALL_CPPC) += sbi_ecall_cppc.o +carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_RPXY) += ecall_rpxy +libsbi-objs-$(CONFIG_SBI_ECALL_RPXY) += sbi_ecall_rpxy.o + carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_LEGACY) += ecall_legacy libsbi-objs-$(CONFIG_SBI_ECALL_LEGACY) += sbi_ecall_legacy.o @@ -55,9 +58,13 @@ libsbi-objs-$(CONFIG_SBI_ECALL_VENDOR) += sbi_ecall_vendor.o carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_DBTR) += ecall_dbtr libsbi-objs-$(CONFIG_SBI_ECALL_DBTR) += sbi_ecall_dbtr.o +carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_SSE) += ecall_sse +libsbi-objs-$(CONFIG_SBI_ECALL_SSE) += sbi_ecall_sse.o + libsbi-objs-y += sbi_bitmap.o libsbi-objs-y += sbi_bitops.o libsbi-objs-y += sbi_console.o +libsbi-objs-y += sbi_domain_context.o libsbi-objs-y += sbi_domain.o libsbi-objs-y += sbi_emulate_csr.o libsbi-objs-y += sbi_fifo.o @@ -74,7 +81,9 @@ libsbi-objs-y += sbi_misaligned_ldst.o libsbi-objs-y += sbi_platform.o libsbi-objs-y += sbi_pmu.o libsbi-objs-y += sbi_dbtr.o +libsbi-objs-y += sbi_rpxy.o libsbi-objs-y += sbi_scratch.o +libsbi-objs-y += sbi_sse.o libsbi-objs-y += sbi_string.o libsbi-objs-y += sbi_system.o libsbi-objs-y += sbi_timer.o @@ -83,3 +92,4 @@ libsbi-objs-y += sbi_trap.o libsbi-objs-y += sbi_unpriv.o libsbi-objs-y += sbi_expected_trap.o libsbi-objs-y += sbi_cppc.o +libsbi-objs-y += sbi_ras.o diff --git a/lib/sbi/sbi_domain.c b/lib/sbi/sbi_domain.c index 4e9f7428a52..50749f15dc9 100644 --- a/lib/sbi/sbi_domain.c +++ b/lib/sbi/sbi_domain.c @@ -51,7 +51,7 @@ struct sbi_domain *sbi_hartindex_to_domain(u32 hartindex) return sbi_scratch_read_type(scratch, void *, domain_hart_ptr_offset); } -static void update_hartindex_to_domain(u32 hartindex, struct sbi_domain *dom) +void sbi_update_hartindex_to_domain(u32 hartindex, struct sbi_domain *dom) { struct sbi_scratch *scratch; @@ -567,7 +567,7 @@ int sbi_domain_register(struct sbi_domain *dom, if (tdom) sbi_hartmask_clear_hartindex(i, &tdom->assigned_harts); - update_hartindex_to_domain(i, dom); + sbi_update_hartindex_to_domain(i, dom); sbi_hartmask_set_hartindex(i, &dom->assigned_harts); /* diff --git a/lib/sbi/sbi_domain_context.c b/lib/sbi/sbi_domain_context.c new file mode 100755 index 00000000000..d6843a6a906 --- /dev/null +++ b/lib/sbi/sbi_domain_context.c @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) IPADS@SJTU 2023. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * Switches the HART context from the current domain to the target domain. + * This includes changing domain assignments and reconfiguring PMP, as well + * as saving and restoring CSRs and trap states. + * + * @param ctx pointer to the current HART context + * @param dom_ctx pointer to the target domain context + */ +static void switch_to_next_domain_context(struct sbi_context *ctx, + struct sbi_context *dom_ctx) +{ + u32 hartindex; + struct sbi_trap_regs *trap_regs; + struct sbi_domain *dom = dom_ctx->dom; + struct sbi_scratch *scratch = sbi_scratch_thishart_ptr(); + unsigned int pmp_count = sbi_hart_pmp_count(scratch); + + /* Assign current hart to target domain */ + hartindex = sbi_hartid_to_hartindex(current_hartid()); + sbi_hartmask_clear_hartindex( + hartindex, &sbi_domain_thishart_ptr()->assigned_harts); + sbi_update_hartindex_to_domain(hartindex, dom); + sbi_hartmask_set_hartindex(hartindex, &dom->assigned_harts); + + /* Reconfigure PMP settings for the new domain */ + for (int i = 0; i < pmp_count; i++) { + pmp_disable(i); + } + sbi_hart_pmp_configure(scratch); + + /* Save current CSR context and restore target domain's CSR context */ + ctx->sstatus = csr_swap(CSR_SSTATUS, dom_ctx->sstatus); + ctx->sie = csr_swap(CSR_SIE, dom_ctx->sie); + ctx->stvec = csr_swap(CSR_STVEC, dom_ctx->stvec); + ctx->sscratch = csr_swap(CSR_SSCRATCH, dom_ctx->sscratch); + ctx->sepc = csr_swap(CSR_SEPC, dom_ctx->sepc); + ctx->scause = csr_swap(CSR_SCAUSE, dom_ctx->scause); + ctx->stval = csr_swap(CSR_STVAL, dom_ctx->stval); + ctx->sip = csr_swap(CSR_SIP, dom_ctx->sip); + ctx->satp = csr_swap(CSR_SATP, dom_ctx->satp); + ctx->scounteren = csr_swap(CSR_SCOUNTEREN, dom_ctx->scounteren); + ctx->senvcfg = csr_swap(CSR_SENVCFG, dom_ctx->senvcfg); + + /* Save current trap state and restore target domain's trap state */ + trap_regs = (struct sbi_trap_regs *)(csr_read(CSR_MSCRATCH) - + SBI_TRAP_REGS_SIZE); + sbi_memcpy(&ctx->regs, trap_regs, sizeof(*trap_regs)); + sbi_memcpy(trap_regs, &dom_ctx->regs, sizeof(*trap_regs)); + + /* Mark current context structure initialized because context saved */ + ctx->initialized = true; + + /* If target domain context is not initialized or runnable */ + if (!dom_ctx->initialized) { + /* Startup boot HART of target domain */ + if (current_hartid() == dom->boot_hartid) + sbi_hart_switch_mode(dom->boot_hartid, dom->next_arg1, + dom->next_addr, dom->next_mode, + false); + else + sbi_hsm_hart_stop(scratch, true); + } +} + +int sbi_domain_context_enter(struct sbi_domain *dom) +{ + struct sbi_context *ctx = sbi_domain_context_thishart_ptr(); + struct sbi_context *dom_ctx = sbi_hartindex_to_domain_context( + sbi_hartid_to_hartindex(current_hartid()), dom); + + /* Validate the domain context existence */ + if (!dom_ctx) + return SBI_EINVAL; + + /* Update target context's previous context to indicate the caller */ + dom_ctx->prev_ctx = ctx; + + switch_to_next_domain_context(ctx, dom_ctx); + + return 0; +} + +int sbi_domain_context_exit(void) +{ + u32 i, hartindex = sbi_hartid_to_hartindex(current_hartid()); + struct sbi_domain *dom; + struct sbi_context *ctx = sbi_domain_context_thishart_ptr(); + struct sbi_context *dom_ctx, *tmp; + + /* + * If it's first time to call `exit` on the current hart, no + * context allocated before. Loop through each domain to allocate + * its context on the current hart if valid. + */ + if (!ctx) { + sbi_domain_for_each(i, dom) { + if (!sbi_hartmask_test_hartindex(hartindex, + dom->possible_harts)) + continue; + + dom_ctx = sbi_zalloc(sizeof(struct sbi_context)); + if (!dom_ctx) + return SBI_ENOMEM; + + /* Bind context and domain */ + dom_ctx->dom = dom; + dom->hartindex_to_context_table[hartindex] = dom_ctx; + } + + ctx = sbi_domain_context_thishart_ptr(); + } + + dom_ctx = ctx->prev_ctx; + + /* If no previous caller context */ + if (!dom_ctx) { + /* Try to find next uninitialized user-defined domain's context */ + sbi_domain_for_each(i, dom) { + if (dom == &root || dom == sbi_domain_thishart_ptr()) + continue; + + tmp = sbi_hartindex_to_domain_context(hartindex, dom); + if (tmp && !tmp->initialized) { + dom_ctx = tmp; + break; + } + } + } + + /* Take the root domain context if fail to find */ + if (!dom_ctx) + dom_ctx = sbi_hartindex_to_domain_context(hartindex, &root); + + switch_to_next_domain_context(ctx, dom_ctx); + + return 0; +} diff --git a/lib/sbi/sbi_ecall_rpxy.c b/lib/sbi/sbi_ecall_rpxy.c new file mode 100644 index 00000000000..329b03b27bc --- /dev/null +++ b/lib/sbi/sbi_ecall_rpxy.c @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include + +static int sbi_ecall_rpxy_handler(unsigned long extid, unsigned long funcid, + struct sbi_trap_regs *regs, + struct sbi_ecall_return *out) +{ + int ret = 0; + + switch (funcid) { + case SBI_EXT_RPXY_PROBE: + ret = sbi_rpxy_probe(regs->a0, regs->a1, &out->value); + break; + case SBI_EXT_RPXY_SET_SHMEM: + ret = sbi_rpxy_set_shmem(regs->a0, + regs->a1, regs->a2, regs->a3); + break; + case SBI_EXT_RPXY_SEND_NORMAL_MESSAGE: + ret = sbi_rpxy_send_message(regs->a0, regs->a1, regs->a2, + regs->a3, &out->value); + break; + case SBI_EXT_RPXY_SEND_POSTED_MESSAGE: + ret = sbi_rpxy_send_message(regs->a0, regs->a1, regs->a2, + regs->a3, NULL); + break; + case SBI_EXT_RPXY_GET_NOTIFICATION_EVENTS: + ret = sbi_rpxy_get_notification_events(regs->a0, regs->a1, + &out->value); + break; + default: + ret = SBI_ENOTSUPP; + } + + return ret; +} + +struct sbi_ecall_extension ecall_rpxy; + +static int sbi_ecall_rpxy_register_extensions(void) +{ + if (!sbi_rpxy_service_group_available()) + return 0; + + return sbi_ecall_register_extension(&ecall_rpxy); +} + +struct sbi_ecall_extension ecall_rpxy = { + .extid_start = SBI_EXT_RPXY, + .extid_end = SBI_EXT_RPXY, + .register_extensions = sbi_ecall_rpxy_register_extensions, + .handle = sbi_ecall_rpxy_handler, +}; diff --git a/lib/sbi/sbi_ecall_sse.c b/lib/sbi/sbi_ecall_sse.c new file mode 100644 index 00000000000..8299d43eb5a --- /dev/null +++ b/lib/sbi/sbi_ecall_sse.c @@ -0,0 +1,68 @@ +#include +#include +#include +#include + +static int sbi_ecall_sse_handler(unsigned long extid, unsigned long funcid, + struct sbi_trap_regs *regs, + struct sbi_ecall_return *out) +{ + int ret; + unsigned long temp; + + switch (funcid) { + case SBI_EXT_SSE_GET_ATTR: + ret = sbi_sse_get_attr(regs->a0, regs->a1, &temp); + if (ret == 0) + out->value = temp; + break; + case SBI_EXT_SSE_SET_ATTR: + ret = sbi_sse_set_attr(regs->a0, regs->a1, regs->a2); + break; + case SBI_EXT_SSE_REGISTER: + ret = sbi_sse_register(regs->a0, regs->a1, regs->a2); + break; + case SBI_EXT_SSE_UNREGISTER: + ret = sbi_sse_unregister(regs->a0); + break; + case SBI_EXT_SSE_ENABLE: + ret = sbi_sse_enable(regs->a0); + break; + case SBI_EXT_SSE_DISABLE: + ret = sbi_sse_disable(regs->a0); + break; + case SBI_EXT_SSE_COMPLETE: + ret = sbi_sse_complete(regs->a0, regs->a1, regs->a2, + (struct sbi_trap_regs *) regs); + if (ret == SBI_EJUMP) { + out->skip_regs_update = true; + ret = 0; + } + break; + case SBI_EXT_SSE_INJECT: + ret = sbi_sse_inject_from_ecall(regs->a0, regs->a1, + (struct sbi_trap_regs *) regs); + if (ret == SBI_EJUMP) { + out->skip_regs_update = true; + ret = 0; + } + break; + default: + ret = SBI_ENOTSUPP; + } + return ret; +} + +struct sbi_ecall_extension ecall_sse; + +static int sbi_ecall_sse_register_extensions(void) +{ + return sbi_ecall_register_extension(&ecall_sse); +} + +struct sbi_ecall_extension ecall_sse = { + .extid_start = SBI_EXT_SSE, + .extid_end = SBI_EXT_SSE, + .register_extensions = sbi_ecall_sse_register_extensions, + .handle = sbi_ecall_sse_handler, +}; diff --git a/lib/sbi/sbi_hsm.c b/lib/sbi/sbi_hsm.c index be48d64eb78..b0d0023f65c 100644 --- a/lib/sbi/sbi_hsm.c +++ b/lib/sbi/sbi_hsm.c @@ -223,10 +223,10 @@ static int hsm_device_hart_stop(void) return SBI_ENOTSUPP; } -static int hsm_device_hart_suspend(u32 suspend_type) +static int hsm_device_hart_suspend(u32 suspend_type, ulong mmode_resume_addr) { if (hsm_dev && hsm_dev->hart_suspend) - return hsm_dev->hart_suspend(suspend_type); + return hsm_dev->hart_suspend(suspend_type, mmode_resume_addr); return SBI_ENOTSUPP; } @@ -517,7 +517,7 @@ int sbi_hsm_hart_suspend(struct sbi_scratch *scratch, u32 suspend_type, __sbi_hsm_suspend_non_ret_save(scratch); /* Try platform specific suspend */ - ret = hsm_device_hart_suspend(suspend_type); + ret = hsm_device_hart_suspend(suspend_type, scratch->warmboot_addr); if (ret == SBI_ENOTSUPP) { /* Try generic implementation of default suspend types */ if (suspend_type == SBI_HSM_SUSPEND_RET_DEFAULT || diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c index 804b01cd91c..3e10069e6f5 100644 --- a/lib/sbi/sbi_init.c +++ b/lib/sbi/sbi_init.c @@ -24,6 +24,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -74,6 +77,7 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch) const struct sbi_system_reset_device *srdev; const struct sbi_system_suspend_device *susp_dev; const struct sbi_cppc_device *cppc_dev; + const struct sbi_ras_agent *ras_dev; const struct sbi_platform *plat = sbi_platform_ptr(scratch); if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS) @@ -114,7 +118,9 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch) cppc_dev = sbi_cppc_get_device(); sbi_printf("Platform CPPC Device : %s\n", (cppc_dev) ? cppc_dev->name : "---"); - + ras_dev = sbi_ras_get_agent(); + sbi_printf("Platform RAS Device : %s\n", + (ras_dev) ? ras_dev->name : "---"); /* Firmware details */ sbi_printf("Firmware Base : 0x%lx\n", scratch->fw_start); sbi_printf("Firmware Size : %d KB\n", @@ -356,6 +362,18 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid) sbi_hart_hang(); } + rc = sbi_rpxy_init(scratch); + if (rc) { + sbi_printf("%s: rpxy init failed (error %d)\n", __func__, rc); + sbi_hart_hang(); + } + + rc = sbi_sse_init(scratch, true); + if (rc) { + sbi_printf("%s: sse init failed (error %d)\n", __func__, rc); + sbi_hart_hang(); + } + /* * Note: Finalize domains after HSM initialization so that we * can startup non-root domains. @@ -466,6 +484,10 @@ static void __noreturn init_warm_startup(struct sbi_scratch *scratch, if (rc) sbi_hart_hang(); + rc = sbi_sse_init(scratch, false); + if (rc) + sbi_hart_hang(); + rc = sbi_platform_final_init(plat, false); if (rc) sbi_hart_hang(); @@ -650,6 +672,8 @@ void __noreturn sbi_exit(struct sbi_scratch *scratch) sbi_platform_early_exit(plat); + sbi_sse_exit(scratch); + sbi_pmu_exit(scratch); sbi_timer_exit(scratch); diff --git a/lib/sbi/sbi_ipi.c b/lib/sbi/sbi_ipi.c index 048aaa6687e..99670169557 100644 --- a/lib/sbi/sbi_ipi.c +++ b/lib/sbi/sbi_ipi.c @@ -66,7 +66,7 @@ static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartindex, * SBI_IPI_UPDATE_BREAK for self-IPIs. For other events, check * for self-IPI and execute the callback directly here. */ - ipi_ops->process(scratch); + ipi_ops->process(scratch, NULL); return 0; } @@ -186,7 +186,8 @@ void sbi_ipi_event_destroy(u32 event) ipi_ops_array[event] = NULL; } -static void sbi_ipi_process_smode(struct sbi_scratch *scratch) +static void sbi_ipi_process_smode(struct sbi_scratch *scratch, + struct sbi_trap_regs *regs) { csr_set(CSR_MIP, MIP_SSIP); } @@ -208,7 +209,8 @@ void sbi_ipi_clear_smode(void) csr_clear(CSR_MIP, MIP_SSIP); } -static void sbi_ipi_process_halt(struct sbi_scratch *scratch) +static void sbi_ipi_process_halt(struct sbi_scratch *scratch, + struct sbi_trap_regs *regs) { sbi_hsm_hart_stop(scratch, true); } @@ -225,7 +227,7 @@ int sbi_ipi_send_halt(ulong hmask, ulong hbase) return sbi_ipi_send_many(hmask, hbase, ipi_halt_event, NULL); } -void sbi_ipi_process(void) +void sbi_ipi_process(struct sbi_trap_regs *regs) { unsigned long ipi_type; unsigned int ipi_event; @@ -244,7 +246,7 @@ void sbi_ipi_process(void) if (ipi_type & 1UL) { ipi_ops = ipi_ops_array[ipi_event]; if (ipi_ops) - ipi_ops->process(scratch); + ipi_ops->process(scratch, regs); } ipi_type = ipi_type >> 1; ipi_event++; @@ -349,7 +351,7 @@ void sbi_ipi_exit(struct sbi_scratch *scratch) csr_clear(CSR_MIE, MIP_MSIP); /* Process pending IPIs */ - sbi_ipi_process(); + sbi_ipi_process(NULL); /* Platform exit */ sbi_platform_ipi_exit(sbi_platform_ptr(scratch)); diff --git a/lib/sbi/sbi_irqchip.c b/lib/sbi/sbi_irqchip.c index 24128bece5a..bcd2f15600a 100644 --- a/lib/sbi/sbi_irqchip.c +++ b/lib/sbi/sbi_irqchip.c @@ -7,6 +7,7 @@ * Anup Patel */ +#include #include #include @@ -40,6 +41,14 @@ int sbi_irqchip_init(struct sbi_scratch *scratch, bool cold_boot) if (ext_irqfn != default_irqfn) csr_set(CSR_MIE, MIP_MEIP); + if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMAIA)) { +#if __riscv_xlen == 32 + csr_set(CSR_MIEH, MIPH_RASHP_INTP); +#else + csr_set(CSR_MIE, MIP_RASHP_INTP); +#endif + } + return 0; } diff --git a/lib/sbi/sbi_ras.c b/lib/sbi/sbi_ras.c new file mode 100644 index 00000000000..2015502b241 --- /dev/null +++ b/lib/sbi/sbi_ras.c @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Author(s): + * Himanshu Chauhan + */ + +#include +#include +#include + +static const struct sbi_ras_agent *ras_agent = NULL; + +const struct sbi_ras_agent *sbi_ras_get_agent(void) +{ + return ras_agent; +} + +void sbi_ras_set_agent(const struct sbi_ras_agent *agent) +{ + if (!agent || ras_agent) + return; + + ras_agent = agent; +} + +int sbi_ras_probe(void) +{ + if (!ras_agent || !ras_agent->ras_probe) + return SBI_EFAIL; + + return ras_agent->ras_probe(); +} + +int sbi_ras_sync_hart_errs(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining) +{ + if (!ras_agent) + return SBI_EFAIL; + + return ras_agent->ras_sync_hart_errs(pending_vectors, nr_pending, + nr_remaining); +} + +int sbi_ras_sync_dev_errs(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining) +{ + if (!ras_agent) + return SBI_EFAIL; + + return ras_agent->ras_sync_dev_errs(pending_vectors, nr_pending, + nr_remaining); +} diff --git a/lib/sbi/sbi_rpxy.c b/lib/sbi/sbi_rpxy.c new file mode 100644 index 00000000000..2097559687e --- /dev/null +++ b/lib/sbi/sbi_rpxy.c @@ -0,0 +1,219 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/** List of RPMI proxy service groups */ +static SBI_LIST_HEAD(rpxy_group_list); + +static struct sbi_rpxy_service *rpxy_find_service( + struct sbi_rpxy_service_group *grp, + u8 service_id) +{ + int i; + + for (i = 0; i < grp->num_services; i++) + if (grp->services[i].id == service_id) + return &grp->services[i]; + + return NULL; +} + +static struct sbi_rpxy_service_group *rpxy_find_group(u32 transport_id, + u32 service_group_id) +{ + struct sbi_rpxy_service_group *grp; + + sbi_list_for_each_entry(grp, &rpxy_group_list, head) + if (grp->transport_id == transport_id && + grp->service_group_id == service_group_id) + return grp; + + return NULL; +} + +bool sbi_rpxy_service_group_available(void) +{ + return sbi_list_empty(&rpxy_group_list) ? false : true; +} + +int sbi_rpxy_probe(u32 transport_id, u32 service_group_id, + unsigned long *out_max_data_len) +{ + int rc = SBI_ENOTSUPP; + struct sbi_rpxy_service_group *grp; + + *out_max_data_len = 0; + grp = rpxy_find_group(transport_id, service_group_id); + if (grp) { + *out_max_data_len = grp->max_message_data_len; + rc = 0; + } + + return rc; +} + +int sbi_rpxy_set_shmem(unsigned long shmem_size, + unsigned long shmem_phys_lo, + unsigned long shmem_phys_hi, + unsigned long flags) +{ + struct rpxy_state *rs = sbi_domain_rs_thishart_ptr(); + if (!rs) { + rs = sbi_zalloc(sizeof(struct rpxy_state)); + if (!rs) + return SBI_ENOMEM; + sbi_domain_rs_thishart_ptr() = rs; + } + if (shmem_phys_lo == -1UL && shmem_phys_hi == -1UL) { + rs->shmem_size = 0; + rs->shmem_addr = 0; + return 0; + } + + if (flags || !shmem_size || + (shmem_size & ~PAGE_MASK) || + (shmem_phys_lo & ~PAGE_MASK)) + return SBI_EINVAL; + if (shmem_phys_hi || + !sbi_domain_check_addr_range(sbi_domain_thishart_ptr(), + shmem_phys_lo, shmem_size, PRV_S, + SBI_DOMAIN_READ|SBI_DOMAIN_WRITE)) + return SBI_EINVALID_ADDR; + + rs->shmem_size = shmem_size; + rs->shmem_addr = shmem_phys_lo; + return 0; +} + +int sbi_rpxy_send_message(u32 transport_id, + u32 service_group_id, + u8 service_id, + unsigned long message_data_len, + unsigned long *ack_data_len) +{ + int rc; + u32 tx_len = 0, rx_len = 0; + void *tx = NULL, *rx = NULL; + struct sbi_rpxy_service *srv = NULL; + struct sbi_rpxy_service_group *grp; + struct rpxy_state *rs = sbi_domain_rs_thishart_ptr(); + + if (!rs || !rs->shmem_size) + return SBI_ENO_SHMEM; + + grp = rpxy_find_group(transport_id, service_group_id); + if (grp) + srv = rpxy_find_service(grp, service_id); + if (!srv) + return SBI_ENOTSUPP; + + tx_len = message_data_len; + if (tx_len > rs->shmem_size || tx_len > grp->max_message_data_len) + return SBI_EINVAL; + if (tx_len < srv->min_tx_len || srv->max_tx_len < tx_len) + return SBI_EFAIL; + + sbi_hart_map_saddr(rs->shmem_addr, rs->shmem_size); + + tx = (void *)rs->shmem_addr; + if (ack_data_len) { + rx = (void *)rs->shmem_addr; + if (srv->min_rx_len == srv->max_rx_len) + rx_len = srv->min_rx_len; + else if (srv->max_rx_len < grp->max_message_data_len) + rx_len = srv->max_rx_len; + else + rx_len = grp->max_message_data_len; + } + + rc = grp->send_message(grp, srv, tx, tx_len, rx, rx_len, ack_data_len); + sbi_hart_unmap_saddr(); + if (rc) + return rc; + + if (ack_data_len && + (*ack_data_len > rs->shmem_size || + *ack_data_len > grp->max_message_data_len)) + return SBI_EFAIL; + + return 0; +} + +int sbi_rpxy_get_notification_events(u32 transport_id, u32 service_group_id, + unsigned long *events_len) +{ + int rc; + struct sbi_rpxy_service_group *grp; + struct rpxy_state *rs = sbi_domain_rs_thishart_ptr(); + + if (!rs || !rs->shmem_size) + return SBI_ENO_SHMEM; + + grp = rpxy_find_group(transport_id, service_group_id); + if (!grp) + return SBI_ENOTSUPP; + + if (!grp->get_notification_events || !events_len) + return SBI_EFAIL; + + sbi_hart_map_saddr(rs->shmem_addr, rs->shmem_size); + rc = grp->get_notification_events(grp, (void *)rs->shmem_addr, + rs->shmem_size, + events_len); + sbi_hart_unmap_saddr(); + if (rc) + return rc; + + if (*events_len > rs->shmem_size) + return SBI_EFAIL; + + return 0; +} + +int sbi_rpxy_register_service_group(struct sbi_rpxy_service_group *grp) +{ + int i; + struct sbi_rpxy_service *srv; + + if (!grp || + !grp->max_message_data_len || + !grp->num_services || !grp->services || + !grp->send_message) + return SBI_EINVAL; + for (i = 0; i < grp->num_services; i++) { + srv = &grp->services[i]; + if (!srv->id || + (srv->min_tx_len > srv->max_tx_len) || + (srv->min_tx_len > grp->max_message_data_len) || + (srv->min_rx_len > srv->max_rx_len) || + (srv->min_rx_len > grp->max_message_data_len)) + return SBI_EINVAL; + } + + if (rpxy_find_group(grp->transport_id, grp->service_group_id)) + return SBI_EALREADY; + + SBI_INIT_LIST_HEAD(&grp->head); + sbi_list_add_tail(&grp->head, &rpxy_group_list); + + return 0; +} + +int sbi_rpxy_init(struct sbi_scratch *scratch) +{ + return sbi_platform_rpxy_init(sbi_platform_ptr(scratch)); +} diff --git a/lib/sbi/sbi_sse.c b/lib/sbi/sbi_sse.c new file mode 100644 index 00000000000..51860f19476 --- /dev/null +++ b/lib/sbi/sbi_sse.c @@ -0,0 +1,826 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Rivos Systems Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* TODO: REMOVE */ + +enum sse_event_index { + SSE_LOCAL_RAS_0 = 0, + SSE_LOCAL_RAS_1, + SSE_LOCAL_RAS_RSVD, + SSE_LOCAL_PMU, + SSE_LOCAL_ASYNC_PF, + SSE_LOCAL_DEBUG, + SSE_MAX_LOCAL_EVENTS, + SSE_GLOBAL_RAS = SSE_MAX_LOCAL_EVENTS, + SSE_GLOBAL_DEBUG, + SSE_MAX_EVENTS, + SSE_MAX_GLOBAL_EVENTS = SSE_MAX_EVENTS - SSE_MAX_LOCAL_EVENTS, +}; + +struct sse_ipi_inject_data { + uint32_t event_id; +}; + +struct sbi_sse_event { + enum sbi_sse_state state; + bool pending; + uint32_t event_idx; + struct sbi_sse_handler_ctx *ctx; + uint32_t prio; + unsigned int hartid; + set_hartid_cb_t set_hartid_cb; + spinlock_t lock; +}; + +struct sse_hart_state { + struct sbi_sse_event events[SSE_MAX_LOCAL_EVENTS]; +}; + +static struct sbi_sse_event global_events[SSE_MAX_GLOBAL_EVENTS]; + +static unsigned long sse_inject_fifo_off; +static unsigned long sse_inject_fifo_mem_off; +/* Offset of pointer to SSE HART state in scratch space */ +static unsigned long shs_ptr_off; + +static u32 sse_ipi_inject_event = SBI_IPI_EVENT_MAX; + +#define sse_get_hart_state_ptr(__scratch) \ + sbi_scratch_read_type((__scratch), void *, shs_ptr_off) + +#define sse_thishart_state_ptr() \ + sse_get_hart_state_ptr(sbi_scratch_thishart_ptr()) + +#define sse_set_hart_state_ptr(__scratch, __sse_state) \ + sbi_scratch_write_type((__scratch), void *, shs_ptr_off, (__sse_state)) + +static int sse_ipi_inject_send(unsigned long hartid, uint32_t event_id); + +static enum sse_event_index sse_event_idx(uint32_t event_id) +{ + switch (event_id) { + case SBI_SSE_EVENT_LOCAL_RAS_0: + return SSE_LOCAL_RAS_0; + case SBI_SSE_EVENT_LOCAL_RAS_1: + return SSE_LOCAL_RAS_1; + case SBI_SSE_EVENT_LOCAL_PMU: + return SSE_LOCAL_PMU; + case SBI_SSE_EVENT_LOCAL_ASYNC_PF: + return SSE_LOCAL_ASYNC_PF; + case SBI_SSE_EVENT_LOCAL_DEBUG: + return SSE_LOCAL_DEBUG; + case SBI_SSE_EVENT_GLOBAL_RAS: + return SSE_GLOBAL_RAS; + case SBI_SSE_EVENT_GLOBAL_DEBUG: + return SSE_GLOBAL_DEBUG; + default: + return SSE_MAX_EVENTS; + } +} + +static bool sse_event_is_global(struct sbi_sse_event *e) +{ + return e->event_idx >= SSE_MAX_LOCAL_EVENTS; +} + +static void sse_event_lock(struct sbi_sse_event *e) +{ + if (sse_event_is_global(e)) + spin_lock(&e->lock); +} + +static void sse_event_unlock(struct sbi_sse_event *e) +{ + if (sse_event_is_global(e)) + spin_unlock(&e->lock); +} + +static void sse_event_set_state(struct sbi_sse_event *e, + enum sbi_sse_state new_state) +{ + enum sbi_sse_state prev_state = e->state; + + e->state = new_state; + switch (new_state) { + case SSE_STATE_UNUSED: + if (prev_state == SSE_STATE_REGISTERED) + return; + break; + case SSE_STATE_REGISTERED: + if (prev_state == SSE_STATE_UNUSED || + prev_state == SSE_STATE_ENABLED) { + return; + } + break; + case SSE_STATE_ENABLED: + if (prev_state == SSE_STATE_REGISTERED || + prev_state == SSE_STATE_RUNNING) + return; + break; + case SSE_STATE_RUNNING: + if (prev_state == SSE_STATE_ENABLED) + return; + break; + } + + sbi_panic("Invalid SSE state transition: %d -> %d\n", prev_state, + new_state); +} + +static struct sbi_sse_event *sse_event_get_by_idx(uint32_t idx) +{ + struct sse_hart_state *shs; + + if (idx < SSE_MAX_LOCAL_EVENTS) { + shs = sse_thishart_state_ptr(); + return shs->events + idx; + } else { + return &global_events[idx - SSE_MAX_LOCAL_EVENTS]; + } +} + +static struct sbi_sse_event *sse_event_get(uint32_t event) +{ + enum sse_event_index idx; + + idx = sse_event_idx(event); + if (idx >= SSE_MAX_EVENTS) + return NULL; + + return sse_event_get_by_idx(idx); +} + +static int sse_event_get_attr(struct sbi_sse_event *e, uint32_t attr_id, + unsigned long *out_val) +{ + int ret; + + switch (attr_id) { + case SBI_SSE_ATTR_STATE: + *out_val = e->state; + ret = 0; + break; + case SBI_SSE_ATTR_PRIO: + *out_val = e->prio; + ret = 0; + break; + case SBI_SSE_ATTR_ALLOW_INJECT: + *out_val = 1; + ret = 0; + break; + case SBI_SSE_ATTR_HART_ID: + *out_val = e->hartid; + ret = 0; + break; + case SBI_SSE_ATTR_PENDING: + *out_val = e->pending; + ret = 0; + break; + default: + ret = SBI_EINVAL; + break; + } + + return ret; +} + +static int sse_event_set_hart_id(struct sbi_sse_event *e, uint32_t event_id, + unsigned long val) +{ + int hstate; + uint32_t hartid = (uint32_t) val; + struct sbi_domain * hd = sbi_domain_thishart_ptr(); + + if (!sse_event_is_global(e)) + return SBI_EDENIED; + + if (!sbi_domain_is_assigned_hart(hd, val)) + return SBI_EINVAL; + + hstate = sbi_hsm_hart_get_state(hd, hartid); + if (hstate != SBI_HSM_STATE_STARTED) + return SBI_EINVAL; + + if (e->state == SSE_STATE_RUNNING) + return SBI_EBUSY; + + if (val == e->hartid) + return SBI_OK; + + e->hartid = hartid; + + if (e->set_hartid_cb) + e->set_hartid_cb(event_id, e->hartid); + + if (e->pending) + sbi_ipi_send_many(BIT(e->hartid), 0, sse_ipi_inject_event, NULL); + + return 0; +} + +static int sse_event_set_attr(struct sbi_sse_event *e, uint32_t event_id, + uint32_t attr_id, unsigned long val) +{ + int ret; + + switch (attr_id) { + case SBI_SSE_ATTR_PENDING: + case SBI_SSE_ATTR_STATE: + case SBI_SSE_ATTR_ALLOW_INJECT: + /* Read-only */ + ret = SBI_EDENIED; + break; + case SBI_SSE_ATTR_PRIO: + e->prio = (uint32_t)val; + ret = 0; + break; + case SBI_SSE_ATTR_HART_ID: + ret = sse_event_set_hart_id(e, event_id, val); + break; + default: + ret = SBI_EINVAL; + break; + } + + return ret; +} + +static int sse_event_register(struct sbi_sse_event *e, + struct sbi_sse_handler_ctx *ctx) +{ + if (sse_event_is_global(e) && e->hartid != current_hartid()) + return SBI_EINVAL; + + if (e->state != SSE_STATE_UNUSED) + return SBI_EINVALID_STATE; + + e->ctx = ctx; + sse_event_set_state(e, SSE_STATE_REGISTERED); + + return 0; +} + +static int sse_event_unregister(struct sbi_sse_event *e) +{ + if (e->state != SSE_STATE_REGISTERED) + return SBI_EINVALID_STATE; + + sse_event_set_state(e, SSE_STATE_UNUSED); + e->ctx = NULL; + + return 0; +} + +static int sse_event_inject(struct sbi_sse_event *e, struct sbi_sse_event *prev_e, + struct sbi_trap_regs *regs) +{ + ulong prev_smode, prev_virt; + struct sse_interrupted_state *i_ctx = &e->ctx->interrupted; + struct sse_entry_state *e_ctx = &e->ctx->entry; + + sse_event_set_state(e, SSE_STATE_RUNNING); + e->pending = false; + + if (prev_e) { + /* We are injected right after another event, copy previous + * event context for correct restoration + */ + sbi_memcpy(i_ctx, &prev_e->ctx->interrupted, + sizeof(struct sse_interrupted_state)); + } else { + sbi_memcpy(&i_ctx->ra, ®s->ra, sizeof(unsigned long) * 31); + + prev_smode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT; + #if __riscv_xlen == 32 + prev_virt = (regs->mstatusH & MSTATUSH_MPV) ? 1 : 0; + #else + prev_virt = (regs->mstatus & MSTATUS_MPV) ? 1 : 0; + #endif + + i_ctx->exc_mode = prev_smode << EXC_MODE_PP_SHIFT; + i_ctx->exc_mode |= prev_virt << EXC_MODE_PV_SHIFT; + if (regs->mstatus & MSTATUS_SPIE) + i_ctx->exc_mode |= EXC_MODE_SSTATUS_SPIE; + i_ctx->pc = regs->mepc; + + /* We only want to set SPIE for the first event injected after + * entering M-Mode. For the event injected right after another + * event (after calling sse_event_complete(), we will keep the + * saved SPIE). + */ + regs->mstatus &= ~MSTATUS_SPIE; + if (regs->mstatus & MSTATUS_SIE) + regs->mstatus |= MSTATUS_SPIE; + } + + sbi_memcpy(®s->ra, &e_ctx->ra, sizeof(unsigned long) * 31); + regs->mepc = e_ctx->pc; + + regs->mstatus &= ~MSTATUS_MPP; + regs->mstatus |= (PRV_S << MSTATUS_MPP_SHIFT); + + #if __riscv_xlen == 32 + regs->mstatusH &= ~MSTATUSH_MPV; + #else + regs->mstatus &= ~MSTATUS_MPV; + #endif + + regs->mstatus &= ~MSTATUS_SIE; + + return SBI_EJUMP; +} + +static int sse_event_resume(struct sbi_sse_event *e, struct sbi_trap_regs *regs) +{ + struct sse_interrupted_state *i_ctx = &e->ctx->interrupted; + + sbi_memcpy(®s->ra, &i_ctx->ra, sizeof(unsigned long) * 31); + + /* Restore previous virtualization state */ +#if __riscv_xlen == 32 + regs->mstatusH &= ~MSTATUSH_MPV; + if (i_ctx->exc_mode & EXC_MODE_PV) + regs->mstatusH |= MSTATUSH_MPV; +#else + regs->mstatus &= ~MSTATUS_MPV; + if (i_ctx->exc_mode & EXC_MODE_PV) + regs->mstatus |= MSTATUS_MPV; +#endif + + regs->mstatus &= ~MSTATUS_MPP; + if (i_ctx->exc_mode & EXC_MODE_PP) + regs->mstatus |= (PRV_S << MSTATUS_MPP_SHIFT); + + regs->mstatus &= ~MSTATUS_SIE; + if (regs->mstatus & MSTATUS_SPIE) + regs->mstatus |= MSTATUS_SIE; + + regs->mstatus &= ~MSTATUS_SPIE; + if (i_ctx->exc_mode & EXC_MODE_SSTATUS_SPIE) + regs->mstatus |= MSTATUS_SPIE; + + regs->mepc = i_ctx->pc; + + return SBI_EJUMP; +} + +static int sse_process_event(struct sbi_sse_event *prev_e, + struct sbi_sse_event *e, + struct sbi_trap_regs *regs) +{ + /* Do not preempt the same event if already running */ + if (!e->pending || e->state == SSE_STATE_RUNNING || + e->hartid != current_hartid()) { + return SBI_OK; + } + + return sse_event_inject(e, prev_e, regs); +} + +static int sse_process_pending_events(struct sbi_sse_event *prev_e, + struct sbi_trap_regs *regs) +{ + int i, ret; + struct sbi_sse_event *e; + + for (i = 0; i < SSE_MAX_EVENTS; i++) { + e = sse_event_get_by_idx(i); + + if (e != prev_e) + sse_event_lock(e); + + ret = sse_process_event(prev_e, e, regs); + if (e != prev_e) + sse_event_unlock(e); + + if (ret) + return ret; + } + + return SBI_OK; +} + +static int sse_event_set_pending(struct sbi_sse_event *e) +{ + if (e->state != SSE_STATE_RUNNING && e->state != SSE_STATE_ENABLED) + return SBI_ERR_INVALID_STATE; + + e->pending = true; + + return SBI_OK; +} + +static void sse_ipi_inject_process(struct sbi_scratch *scratch, + struct sbi_trap_regs *regs) +{ + struct sbi_sse_event *e; + struct sse_ipi_inject_data evt; + struct sbi_fifo *sse_inject_fifo_r = + sbi_scratch_offset_ptr(scratch, sse_inject_fifo_off); + + /* This can be the case when sbi_exit() is called */ + if (!regs) + return; + + /* Mark all queued events as pending */ + while(!sbi_fifo_dequeue(sse_inject_fifo_r, &evt)) { + e = sse_event_get(evt.event_id); + if (!e) + continue; + + sse_event_lock(e); + sse_event_set_pending(e); + sse_event_unlock(e); + } + + sse_process_pending_events(NULL, regs); +} + +static struct sbi_ipi_event_ops sse_ipi_inject_ops = { + .name = "IPI_SSE_INJECT", + .process = sse_ipi_inject_process, +}; + +static int sse_ipi_inject_send(unsigned long hartid, uint32_t event_id) +{ + int ret; + struct sbi_scratch *remote_scratch = NULL; + struct sse_ipi_inject_data evt = {event_id}; + struct sbi_fifo *sse_inject_fifo_r; + + remote_scratch = sbi_hartid_to_scratch(hartid); + if (!remote_scratch) + return SBI_EINVAL; + sse_inject_fifo_r = sbi_scratch_offset_ptr(remote_scratch, sse_inject_fifo_off); + + ret = sbi_fifo_enqueue(sse_inject_fifo_r, &evt); + if (ret) + return SBI_EFAIL; + + ret = sbi_ipi_send_many(BIT(hartid), 0, sse_ipi_inject_event, NULL); + if (ret) + return SBI_EFAIL; + + return SBI_OK; +} + +static int sse_inject_event(uint32_t event_id, unsigned long hartid, + struct sbi_trap_regs *regs, bool from_ecall) +{ + int ret; + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + sse_event_lock(e); + + /* In case of global event, provided hart_id is ignored */ + if (sse_event_is_global(e)) + hartid = e->hartid; + + /* + * If coming from an ecall, always use an IPI to send the event, this + * simplifies handling as we don't have to modify epc/a0 for ecall + * return value. + */ + if (from_ecall || hartid != current_hartid()) { + sse_event_unlock(e); + return sse_ipi_inject_send(hartid, event_id); + } + + /* + * In other cases, directly handle the event on this hart for faster + * handling + */ + ret = sse_event_set_pending(e); + sse_event_unlock(e); + if (ret) + return ret; + + return sse_process_pending_events(NULL, regs); +} + +static int sse_event_disable(struct sbi_sse_event *e) +{ + if (e->state != SSE_STATE_ENABLED) + return SBI_EINVALID_STATE; + + sse_event_set_state(e, SSE_STATE_REGISTERED); + + return SBI_OK; +} + +static int sse_event_enable(struct sbi_sse_event *e) +{ + if (e->state != SSE_STATE_REGISTERED) + return SBI_EINVALID_STATE; + + sse_event_set_state(e, SSE_STATE_ENABLED); + + return SBI_OK; +} + +static int sse_event_complete(struct sbi_sse_event *e, uint32_t status, + uint32_t flags, struct sbi_trap_regs *regs) +{ + if (e->state != SSE_STATE_RUNNING) + return SBI_EINVALID_STATE; + + if (e->hartid != current_hartid()) + return SBI_EDENIED; + + if (flags & SBI_SSE_COMPLETE_FLAG_EVENT_DISABLE) + sse_event_set_state(e, SSE_STATE_REGISTERED); + else + sse_event_set_state(e, SSE_STATE_ENABLED); + + if (sse_process_pending_events(e, regs) == SBI_EJUMP) + return SBI_EJUMP; + + return sse_event_resume(e, regs); +} + +int sbi_sse_complete(uint32_t event_id, uint32_t status, uint32_t flags, + struct sbi_trap_regs *regs) +{ + int ret; + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + sse_event_lock(e); + ret = sse_event_complete(e, status, flags, regs); + sse_event_unlock(e); + + return ret; +} + +int sbi_sse_enable(uint32_t event_id) +{ + int ret; + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + sse_event_lock(e); + ret = sse_event_enable(e); + sse_event_unlock(e); + + return ret; +} + +int sbi_sse_disable(uint32_t event_id) +{ + int ret; + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + sse_event_lock(e); + ret = sse_event_disable(e); + sse_event_unlock(e); + + return ret; +} + +int sbi_sse_inject_from_ecall(uint32_t event_id, unsigned long hartid, + struct sbi_trap_regs *regs) +{ + if (!sbi_domain_is_assigned_hart(sbi_domain_thishart_ptr(), hartid)) + return SBI_EINVAL; + + return sse_inject_event(event_id, hartid, regs, true); +} + +int sbi_sse_inject_event(uint32_t event_id, struct sbi_trap_regs *regs) +{ + return sse_inject_event(event_id, current_hartid(), regs, false); +} + +int sbi_sse_event_set_hartid_cb(uint32_t event_id, + set_hartid_cb_t set_hartid_cb) +{ + + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + if (!sse_event_is_global(e)) + return SBI_EINVAL; + + e->set_hartid_cb = set_hartid_cb; + + return SBI_OK; +} + +int sbi_sse_get_attr(uint32_t event_id, uint32_t attr_id, unsigned long *out_val) +{ + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + return sse_event_get_attr(e, attr_id, out_val); +} + +int sbi_sse_set_attr(uint32_t event_id, uint32_t attr_id, unsigned long val) +{ + int ret; + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + sse_event_lock(e); + ret = sse_event_set_attr(e, event_id, attr_id, val); + sse_event_unlock(e); + + return ret; +} + +int sbi_sse_register(uint32_t event_id, + unsigned long phys_lo, + unsigned long phys_hi) +{ + int ret; + struct sbi_sse_event *e; + const unsigned align = __riscv_xlen >> 3; + ulong smode = (csr_read(CSR_MSTATUS) & MSTATUS_MPP) >> + MSTATUS_MPP_SHIFT; + + if (phys_lo & (align - 1)) + return SBI_EINVALID_ADDR; + + /* + * On RV32, the M-mode can only access the first 4GB of + * the physical address space because M-mode does not have + * MMU to access full 34-bit physical address space. + * + * Based on above, we simply fail if the upper 32bits of + * the physical address (i.e. a2 register) is non-zero on + * RV32. + */ + if (phys_hi) + return SBI_EINVALID_ADDR; + + if (!sbi_domain_check_addr_range(sbi_domain_thishart_ptr(), + phys_lo, + sizeof(struct sbi_sse_handler_ctx), + smode, + SBI_DOMAIN_READ|SBI_DOMAIN_WRITE)) + return SBI_EINVALID_ADDR; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + sse_event_lock(e); + ret = sse_event_register(e, (struct sbi_sse_handler_ctx *)phys_lo); + sse_event_unlock(e); + + return ret; +} + +int sbi_sse_unregister(uint32_t event_id) +{ + int ret; + struct sbi_sse_event *e; + + e = sse_event_get(event_id); + if (!e) + return SBI_EINVAL; + + sse_event_lock(e); + ret = sse_event_unregister(e); + sse_event_unlock(e); + + return ret; +} + +static void sse_global_init() +{ + unsigned int i, event_idx; + + for (i = 0; i < SSE_MAX_GLOBAL_EVENTS; i++) { + event_idx = SSE_MAX_LOCAL_EVENTS + i; + global_events[i].event_idx = event_idx; + global_events[i].hartid = current_hartid(); + SPIN_LOCK_INIT(global_events[i].lock); + } +} + +static void sse_local_init(struct sse_hart_state *shs) +{ + unsigned int i; + + for (i = 0; i < SSE_MAX_LOCAL_EVENTS; i++) { + shs->events[i].event_idx = i; + shs->events[i].hartid = current_hartid(); + SPIN_LOCK_INIT(shs->events[i].lock); + } +} + +int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot) +{ + int ret; + void *sse_inject_mem; + struct sse_hart_state *shs; + struct sbi_fifo *sse_inject_q; + + if (cold_boot) { + sse_global_init(); + + shs_ptr_off = sbi_scratch_alloc_offset(sizeof(void *)); + if (!shs_ptr_off) { + return SBI_ENOMEM; + } + + sse_inject_fifo_off = sbi_scratch_alloc_offset(sizeof(*sse_inject_q)); + if (!sse_inject_fifo_off) { + sbi_scratch_free_offset(shs_ptr_off); + return SBI_ENOMEM; + } + sse_inject_fifo_mem_off = sbi_scratch_alloc_offset( + SSE_MAX_EVENTS * sizeof(struct sse_ipi_inject_data)); + if (!sse_inject_fifo_mem_off) { + sbi_scratch_free_offset(sse_inject_fifo_off); + sbi_scratch_free_offset(shs_ptr_off); + return SBI_ENOMEM; + } + + ret = sbi_ipi_event_create(&sse_ipi_inject_ops); + if (ret < 0) { + sbi_scratch_free_offset(shs_ptr_off); + return ret; + } + sse_ipi_inject_event = ret; + } + + shs = sse_get_hart_state_ptr(scratch); + if (!shs) { + shs = sbi_zalloc(sizeof(*shs)); + if (!shs) + return SBI_ENOMEM; + + sse_set_hart_state_ptr(scratch, shs); + } + + sse_local_init(shs); + + sse_inject_q = sbi_scratch_offset_ptr(scratch, sse_inject_fifo_off); + sse_inject_mem = sbi_scratch_offset_ptr(scratch, sse_inject_fifo_mem_off); + + sbi_fifo_init(sse_inject_q, sse_inject_mem, + SSE_MAX_EVENTS, sizeof(struct sse_ipi_inject_data)); + + return 0; +} + +void sbi_sse_exit(struct sbi_scratch *scratch) +{ + int i; + struct sbi_sse_event *e; + + for (i = 0; i < SSE_MAX_EVENTS; i++) { + e = sse_event_get_by_idx(i); + + if (e->hartid != current_hartid()) + continue; + + if (e->state > SSE_STATE_REGISTERED) + sbi_printf("Event %d in invalid state when stopping hart", i); + } +} diff --git a/lib/sbi/sbi_tlb.c b/lib/sbi/sbi_tlb.c index cca319f9cbb..3fff519fb6b 100644 --- a/lib/sbi/sbi_tlb.c +++ b/lib/sbi/sbi_tlb.c @@ -240,7 +240,7 @@ static bool tlb_process_once(struct sbi_scratch *scratch) return false; } -static void tlb_process(struct sbi_scratch *scratch) +static void tlb_process(struct sbi_scratch *scratch, struct sbi_trap_regs *regs) { while (tlb_process_once(scratch)); } diff --git a/lib/sbi/sbi_trap.c b/lib/sbi/sbi_trap.c index dbf307c6894..1f1d1995480 100644 --- a/lib/sbi/sbi_trap.c +++ b/lib/sbi/sbi_trap.c @@ -19,9 +19,13 @@ #include #include #include +#include #include #include #include +#include +#include +#include static void __noreturn sbi_trap_error(const char *msg, int rc, ulong mcause, ulong mtval, ulong mtval2, @@ -198,6 +202,31 @@ int sbi_trap_redirect(struct sbi_trap_regs *regs, return 0; } +void sbi_ras_process(struct sbi_trap_regs *regs) +{ + int rc; + u32 pending_vectors[MAX_PEND_VECS] = { 0xfffffffful }; + u32 nr_pending, nr_remaining; + +#if __riscv_xlen == 32 + csr_clear(CSR_MIPH, MIPH_RASHP_INTP); +#else + csr_clear(CSR_MIP, MIP_RASHP_INTP); +#endif + + rc = sbi_ras_sync_hart_errs(pending_vectors, &nr_pending, &nr_remaining); + + if (rc) + return; + + for (rc = 0; rc < nr_pending; rc++) + if (pending_vectors[rc] < SBI_SSE_EVENT_LOCAL_RAS_RSVD + && pending_vectors[rc] >= SBI_SSE_EVENT_LOCAL_RAS_0) + sbi_sse_inject_event(pending_vectors[rc], regs); + + return; +} + static int sbi_trap_nonaia_irq(struct sbi_trap_regs *regs, ulong mcause) { mcause &= ~(1UL << (__riscv_xlen - 1)); @@ -206,7 +235,10 @@ static int sbi_trap_nonaia_irq(struct sbi_trap_regs *regs, ulong mcause) sbi_timer_process(); break; case IRQ_M_SOFT: - sbi_ipi_process(); + sbi_ipi_process(regs); + break; + case IRQ_RASHP_INT: + sbi_ras_process(regs); break; case IRQ_M_EXT: return sbi_irqchip_process(regs); @@ -229,13 +261,16 @@ static int sbi_trap_aia_irq(struct sbi_trap_regs *regs, ulong mcause) sbi_timer_process(); break; case IRQ_M_SOFT: - sbi_ipi_process(); + sbi_ipi_process(regs); break; case IRQ_M_EXT: rc = sbi_irqchip_process(regs); if (rc) return rc; break; + case IRQ_RASHP_INT: + sbi_ras_process(regs); + break; default: return SBI_ENOENT; } diff --git a/lib/utils/Kconfig b/lib/utils/Kconfig index de8b4eb919f..50516366017 100644 --- a/lib/utils/Kconfig +++ b/lib/utils/Kconfig @@ -2,10 +2,14 @@ menu "Utils and Drivers Support" +source "$(OPENSBI_SRC_DIR)/lib/utils/cppc/Kconfig" + source "$(OPENSBI_SRC_DIR)/lib/utils/fdt/Kconfig" source "$(OPENSBI_SRC_DIR)/lib/utils/gpio/Kconfig" +source "$(OPENSBI_SRC_DIR)/lib/utils/hsm/Kconfig" + source "$(OPENSBI_SRC_DIR)/lib/utils/i2c/Kconfig" source "$(OPENSBI_SRC_DIR)/lib/utils/ipi/Kconfig" @@ -14,12 +18,20 @@ source "$(OPENSBI_SRC_DIR)/lib/utils/irqchip/Kconfig" source "$(OPENSBI_SRC_DIR)/lib/utils/libfdt/Kconfig" +source "$(OPENSBI_SRC_DIR)/lib/utils/mailbox/Kconfig" + +source "$(OPENSBI_SRC_DIR)/lib/utils/ras/Kconfig" + source "$(OPENSBI_SRC_DIR)/lib/utils/regmap/Kconfig" source "$(OPENSBI_SRC_DIR)/lib/utils/reset/Kconfig" +source "$(OPENSBI_SRC_DIR)/lib/utils/rpxy/Kconfig" + source "$(OPENSBI_SRC_DIR)/lib/utils/serial/Kconfig" +source "$(OPENSBI_SRC_DIR)/lib/utils/suspend/Kconfig" + source "$(OPENSBI_SRC_DIR)/lib/utils/sys/Kconfig" source "$(OPENSBI_SRC_DIR)/lib/utils/timer/Kconfig" diff --git a/lib/utils/cppc/Kconfig b/lib/utils/cppc/Kconfig new file mode 100644 index 00000000000..494f68940d5 --- /dev/null +++ b/lib/utils/cppc/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-2-Clause + +menu "CPPC Device Support" + +config FDT_CPPC + bool "FDT based CPPC drivers" + depends on FDT + default n + +if FDT_CPPC + +config FDT_CPPC_RPMI + bool "FDT RPMI CPPC driver" + depends on FDT_MAILBOX && RPMI_MAILBOX + default n + +endif + +endmenu diff --git a/lib/utils/cppc/fdt_cppc.c b/lib/utils/cppc/fdt_cppc.c new file mode 100644 index 00000000000..3d467e1188e --- /dev/null +++ b/lib/utils/cppc/fdt_cppc.c @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include + +/* List of FDT CPPC drivers generated at compile time */ +extern struct fdt_cppc *fdt_cppc_drivers[]; +extern unsigned long fdt_cppc_drivers_size; + +static struct fdt_cppc *current_driver = NULL; + +void fdt_cppc_exit(void) +{ + if (current_driver && current_driver->exit) + current_driver->exit(); +} + +static int fdt_cppc_warm_init(void) +{ + if (current_driver && current_driver->warm_init) + return current_driver->warm_init(); + return 0; +} + +static int fdt_cppc_cold_init(void) +{ + int pos, noff, rc; + struct fdt_cppc *drv; + const struct fdt_match *match; + void *fdt = fdt_get_address(); + + for (pos = 0; pos < fdt_cppc_drivers_size; pos++) { + drv = fdt_cppc_drivers[pos]; + + noff = -1; + while ((noff = fdt_find_match(fdt, noff, + drv->match_table, &match)) >= 0) { + /* drv->cold_init must not be NULL */ + if (drv->cold_init == NULL) + return SBI_EFAIL; + + rc = drv->cold_init(fdt, noff, match); + if (rc == SBI_ENODEV) + continue; + if (rc) + return rc; + current_driver = drv; + + /* + * We can have multiple CPPC devices on multi-die or + * multi-socket systems so we cannot break here. + */ + } + } + + /* + * On some single-hart system there is no need for CPPC, + * so we cannot return a failure here + */ + return 0; +} + +int fdt_cppc_init(bool cold_boot) +{ + int rc; + + if (cold_boot) { + rc = fdt_cppc_cold_init(); + if (rc) + return rc; + } + + return fdt_cppc_warm_init(); +} diff --git a/lib/utils/cppc/fdt_cppc_drivers.carray b/lib/utils/cppc/fdt_cppc_drivers.carray new file mode 100644 index 00000000000..c2a9af2cfb9 --- /dev/null +++ b/lib/utils/cppc/fdt_cppc_drivers.carray @@ -0,0 +1,3 @@ +HEADER: sbi_utils/cppc/fdt_cppc.h +TYPE: struct fdt_cppc +NAME: fdt_cppc_drivers diff --git a/lib/utils/cppc/fdt_cppc_rpmi.c b/lib/utils/cppc/fdt_cppc_rpmi.c new file mode 100644 index 00000000000..56457ca1624 --- /dev/null +++ b/lib/utils/cppc/fdt_cppc_rpmi.c @@ -0,0 +1,259 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Subrahmanya Lingappa + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rpmi_cppc { + struct mbox_chan *chan; + bool fast_chan_supported; + ulong fast_chan_addr; + bool fast_chan_db_supported; + enum rpmi_cppc_fast_channel_db_width fast_chan_db_width; + ulong fast_chan_db_addr; + u64 fast_chan_db_id; +}; + +static unsigned long rpmi_cppc_offset; + +static struct rpmi_cppc *rpmi_cppc_get_pointer(u32 hartid) +{ + struct sbi_scratch *scratch; + + scratch = sbi_hartid_to_scratch(hartid); + if (!scratch || !rpmi_cppc_offset) + return NULL; + + return sbi_scratch_offset_ptr(scratch, rpmi_cppc_offset); +} + +static int rpmi_cppc_read(unsigned long reg, u64 *val) +{ + int rc = SBI_SUCCESS; + struct rpmi_cppc_read_reg_req req; + struct rpmi_cppc_read_reg_resp resp; + struct rpmi_cppc *cppc; + + req.hart_id = current_hartid(); + req.reg_id = reg; + cppc = rpmi_cppc_get_pointer(req.hart_id); + + rc = rpmi_normal_request_with_status( + cppc->chan, RPMI_CPPC_SRV_READ_REG, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + +#if __riscv_xlen == 32 + *val = resp.data_lo; +#else + *val = (u64)resp.data_hi << 32 | resp.data_lo; +#endif + return rc; +} + +static int rpmi_cppc_write(unsigned long reg, u64 val) +{ + int rc = SBI_SUCCESS; + u32 hart_id = current_hartid(); + struct rpmi_cppc_write_reg_req req; + struct rpmi_cppc_write_reg_resp resp; + struct rpmi_cppc *cppc = rpmi_cppc_get_pointer(hart_id); + + if (reg != SBI_CPPC_DESIRED_PERF || !cppc->fast_chan_supported) { + req.hart_id = hart_id; + req.reg_id = reg; + req.data_lo = val & 0xFFFFFFFF; + req.data_hi = val >> 32; + + rc = rpmi_normal_request_with_status( + cppc->chan, RPMI_CPPC_SRV_WRITE_REG, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + } else { + /* use fast path writes */ +#if __riscv_xlen != 32 + writeq(val, (void *)cppc->fast_chan_addr); +#else + writel((u32)val, (void *)cppc->fast_chan_addr); + writel((u32)(val >> 32), (void *)(cppc->fast_chan_addr + 4)); +#endif + if (cppc->fast_chan_db_supported) { + switch (cppc->fast_chan_db_width) { + case RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_8: + writeb((u8)cppc->fast_chan_db_id, + (void *)cppc->fast_chan_db_addr); + break; + case RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_16: + writew((u16)cppc->fast_chan_db_id, + (void *)cppc->fast_chan_db_addr); + break; + case RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_32: + writel((u32)cppc->fast_chan_db_id, + (void *)cppc->fast_chan_db_addr); + break; + case RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_64: +#if __riscv_xlen != 32 + writeq(cppc->fast_chan_db_id, + (void *)cppc->fast_chan_db_addr); +#else + writel((u32)cppc->fast_chan_db_id, + (void *)cppc->fast_chan_db_addr); + writel((u32)(cppc->fast_chan_db_id >> 32), + (void *)(cppc->fast_chan_db_addr + 4)); +#endif + break; + default: + break; + } + } + } + + return rc; +} + +static int rpmi_cppc_probe(unsigned long reg) +{ + int rc; + struct rpmi_cppc *cppc; + struct rpmi_cppc_probe_resp resp; + struct rpmi_cppc_probe_req req; + + req.hart_id = current_hartid(); + req.reg_id = reg; + + cppc = rpmi_cppc_get_pointer(req.hart_id); + if (!cppc) + return SBI_ENOSYS; + + rc = rpmi_normal_request_with_status( + cppc->chan, RPMI_CPPC_SRV_PROBE_REG, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + return resp.reg_len; +} + +static struct sbi_cppc_device sbi_rpmi_cppc = { + .name = "rpmi-cppc", + .cppc_read = rpmi_cppc_read, + .cppc_write = rpmi_cppc_write, + .cppc_probe = rpmi_cppc_probe, +}; + +static int rpmi_cppc_update_hart_scratch(struct mbox_chan *chan) +{ + int rc, i; + struct rpmi_cppc_hart_list_req req; + struct rpmi_cppc_hart_list_resp resp; + struct rpmi_cppc_get_fast_channel_addr_req freq; + struct rpmi_cppc_get_fast_channel_addr_resp fresp; + struct rpmi_cppc *cppc; + + req.start_index = 0; + do { + rc = rpmi_normal_request_with_status( + chan, RPMI_CPPC_SRV_GET_HART_LIST, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + for (i = 0; i < resp.returned; i++) { + cppc = rpmi_cppc_get_pointer(resp.hartid[i]); + if (!cppc) + return SBI_ENOSYS; + cppc->chan = chan; + + freq.hart_id = resp.hartid[i]; + rc = rpmi_normal_request_with_status( + chan, RPMI_CPPC_SRV_GET_FAST_CHANNEL_ADDR, + &freq, rpmi_u32_count(freq), rpmi_u32_count(freq), + &fresp, rpmi_u32_count(fresp), rpmi_u32_count(fresp)); + if (rc) + continue; + + cppc->fast_chan_supported = true; +#if __riscv_xlen == 32 + cppc->fast_chan_addr = fresp.addr_lo; +#else + cppc->fast_chan_addr = (ulong)fresp.addr_hi << 32 | + fresp.addr_lo; +#endif + cppc->fast_chan_db_supported = fresp.flags & + RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_SUPPORTED; + cppc->fast_chan_db_width = (fresp.flags & + RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_MASK) >> + RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_POS; +#if __riscv_xlen == 32 + cppc->fast_chan_db_addr = fresp.db_addr_lo; +#else + cppc->fast_chan_db_addr = (ulong)fresp.db_addr_hi << 32 | + fresp.db_addr_lo; +#endif + cppc->fast_chan_db_id = (u64)fresp.db_id_hi << 32 | + fresp.db_id_lo; + } + + req.start_index = i; + } while (resp.remaining); + + return 0; +} + +static int rpmi_cppc_cold_init(void *fdt, int nodeoff, + const struct fdt_match *match) +{ + int rc; + struct mbox_chan *chan; + + if (!rpmi_cppc_offset) { + rpmi_cppc_offset = + sbi_scratch_alloc_type_offset(struct rpmi_cppc); + if (!rpmi_cppc_offset) + return SBI_ENOMEM; + } + + /* + * If channel request failed then other end does not support + * CPPC service group so do nothing. + */ + rc = fdt_mailbox_request_chan(fdt, nodeoff, 0, &chan); + if (rc) + return 0; + + /* Update per-HART scratch space */ + rc = rpmi_cppc_update_hart_scratch(chan); + if (rc) + return rc; + + sbi_cppc_set_device(&sbi_rpmi_cppc); + + return 0; +} + +static const struct fdt_match rpmi_cppc_match[] = { + { .compatible = "riscv,rpmi-cppc" }, + {}, +}; + +struct fdt_cppc fdt_cppc_rpmi = { + .match_table = rpmi_cppc_match, + .cold_init = rpmi_cppc_cold_init, +}; diff --git a/lib/utils/cppc/objects.mk b/lib/utils/cppc/objects.mk new file mode 100644 index 00000000000..f0842810afa --- /dev/null +++ b/lib/utils/cppc/objects.mk @@ -0,0 +1,14 @@ +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (c) 2023 Ventana Micro Systems Inc. +# +# Authors: +# Anup Patel +# + +libsbiutils-objs-$(CONFIG_FDT_CPPC) += cppc/fdt_cppc.o +libsbiutils-objs-$(CONFIG_FDT_CPPC) += cppc/fdt_cppc_drivers.o + +carray-fdt_cppc_drivers-$(CONFIG_FDT_CPPC_RPMI) += fdt_cppc_rpmi +libsbiutils-objs-$(CONFIG_FDT_CPPC_RPMI) += cppc/fdt_cppc_rpmi.o diff --git a/lib/utils/hsm/Kconfig b/lib/utils/hsm/Kconfig new file mode 100644 index 00000000000..1ad7958fb58 --- /dev/null +++ b/lib/utils/hsm/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-2-Clause + +menu "HSM Device Support" + +config FDT_HSM + bool "FDT based HSM drivers" + depends on FDT + default n + +if FDT_HSM + +config FDT_HSM_RPMI + bool "FDT RPMI HSM driver" + depends on FDT_MAILBOX && RPMI_MAILBOX + default n + +endif + +endmenu diff --git a/lib/utils/hsm/fdt_hsm.c b/lib/utils/hsm/fdt_hsm.c new file mode 100644 index 00000000000..270400d1773 --- /dev/null +++ b/lib/utils/hsm/fdt_hsm.c @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include + +/* List of FDT HSM drivers generated at compile time */ +extern struct fdt_hsm *fdt_hsm_drivers[]; +extern unsigned long fdt_hsm_drivers_size; + +static struct fdt_hsm *current_driver = NULL; + +int fdt_hsm_fixup(void *fdt) +{ + if (current_driver && current_driver->fdt_fixup) + return current_driver->fdt_fixup(fdt); + return 0; +} + +void fdt_hsm_exit(void) +{ + if (current_driver && current_driver->exit) + current_driver->exit(); +} + +static int fdt_hsm_warm_init(void) +{ + if (current_driver && current_driver->warm_init) + return current_driver->warm_init(); + return 0; +} + +static int fdt_hsm_cold_init(void) +{ + int pos, noff, rc; + struct fdt_hsm *drv; + const struct fdt_match *match; + void *fdt = fdt_get_address(); + + for (pos = 0; pos < fdt_hsm_drivers_size; pos++) { + drv = fdt_hsm_drivers[pos]; + + noff = -1; + while ((noff = fdt_find_match(fdt, noff, + drv->match_table, &match)) >= 0) { + /* drv->cold_init must not be NULL */ + if (drv->cold_init == NULL) + return SBI_EFAIL; + + rc = drv->cold_init(fdt, noff, match); + if (rc == SBI_ENODEV) + continue; + if (rc) + return rc; + current_driver = drv; + + /* + * We can have multiple HSM devices on multi-die or + * multi-socket systems so we cannot break here. + */ + } + } + + /* + * On some single-hart system there is no need for HSM, + * so we cannot return a failure here + */ + return 0; +} + +int fdt_hsm_init(bool cold_boot) +{ + int rc; + + if (cold_boot) { + rc = fdt_hsm_cold_init(); + if (rc) + return rc; + } + + return fdt_hsm_warm_init(); +} diff --git a/lib/utils/hsm/fdt_hsm_drivers.carray b/lib/utils/hsm/fdt_hsm_drivers.carray new file mode 100644 index 00000000000..21396db2eba --- /dev/null +++ b/lib/utils/hsm/fdt_hsm_drivers.carray @@ -0,0 +1,3 @@ +HEADER: sbi_utils/hsm/fdt_hsm.h +TYPE: struct fdt_hsm +NAME: fdt_hsm_drivers diff --git a/lib/utils/hsm/fdt_hsm_rpmi.c b/lib/utils/hsm/fdt_hsm_rpmi.c new file mode 100644 index 00000000000..8e1a524c9ba --- /dev/null +++ b/lib/utils/hsm/fdt_hsm_rpmi.c @@ -0,0 +1,351 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Subrahmanya Lingappa + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_HSM_SUPSEND_STATE_NAMELEN 16 + +struct rpmi_hsm_suspend { + u32 num_states; + struct sbi_cpu_idle_state *states; +}; + +struct rpmi_hsm { + struct mbox_chan *chan; + struct rpmi_hsm_suspend *susp; +}; + +static unsigned long rpmi_hsm_offset; + +static struct rpmi_hsm *rpmi_hsm_get_pointer(u32 hartid) +{ + struct sbi_scratch *scratch; + + scratch = sbi_hartid_to_scratch(hartid); + if (!scratch || !rpmi_hsm_offset) + return NULL; + + return sbi_scratch_offset_ptr(scratch, rpmi_hsm_offset); +} + +static int rpmi_hsm_start(u32 hartid, ulong resume_addr) +{ + struct rpmi_hsm_hart_start_req req; + struct rpmi_hsm_hart_start_resp resp; + struct rpmi_hsm *rpmi = rpmi_hsm_get_pointer(hartid); + + if (!rpmi) + return SBI_ENOSYS; + + req.hartid = hartid; + req.start_addr_lo = resume_addr; + req.start_addr_hi = (u64)resume_addr >> 32; + + return rpmi_normal_request_with_status( + rpmi->chan, RPMI_HSM_SRV_HART_START, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); +} + +static int rpmi_hsm_stop(void) +{ + int rc; + struct rpmi_hsm_hart_stop_req req; + struct rpmi_hsm_hart_stop_resp resp; + void (*jump_warmboot)(void) = + (void (*)(void))sbi_scratch_thishart_ptr()->warmboot_addr; + struct rpmi_hsm *rpmi = rpmi_hsm_get_pointer(current_hartid()); + + if (!rpmi) + return SBI_ENOSYS; + + req.hartid = current_hartid(); + + rc = rpmi_normal_request_with_status( + rpmi->chan, RPMI_HSM_SRV_HART_STOP, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + /* Wait for interrupt */ + wfi(); + + jump_warmboot(); + + return 0; +} + +static bool is_rpmi_hsm_susp_supported(struct rpmi_hsm_suspend *susp, u32 type) +{ + int i; + + for (i = 0; i < susp->num_states; i++) + if (type == susp->states[i].suspend_param) + return true; + + return false; +} + +static int rpmi_hsm_suspend(u32 type, ulong resume_addr) +{ + int rc; + struct rpmi_hsm_hart_susp_req req; + struct rpmi_hsm_hart_susp_resp resp; + struct rpmi_hsm *rpmi = rpmi_hsm_get_pointer(current_hartid()); + + if (!rpmi) + return SBI_ENOSYS; + + /* check if harts support this suspend type */ + if (!is_rpmi_hsm_susp_supported(rpmi->susp, type)) + return SBI_EINVAL; + + req.hartid = current_hartid(); + req.suspend_type = type; + req.resume_addr_lo = resume_addr; + req.resume_addr_hi = (u64)resume_addr >> 32; + + rc = rpmi_normal_request_with_status( + rpmi->chan, RPMI_HSM_SRV_HART_SUSPEND, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + /* Wait for interrupt */ + wfi(); + + return 0; +} + +static struct sbi_hsm_device sbi_hsm_rpmi = { + .name = "rpmi-hsm", + .hart_start = rpmi_hsm_start, + .hart_stop = rpmi_hsm_stop, + .hart_suspend = rpmi_hsm_suspend, +}; + +static int rpmi_hsm_fixup(void *fdt) +{ + struct rpmi_hsm *rpmi = rpmi_hsm_get_pointer(current_hartid()); + + if (!rpmi || !rpmi->susp || !rpmi->susp->num_states) + return 0; + + return fdt_add_cpu_idle_states(fdt, rpmi->susp->states); +} + +static int rpmi_hsm_get_num_suspend_states(struct mbox_chan *chan, + struct rpmi_hsm_suspend *susp) +{ + int rc; + struct rpmi_hsm_get_susp_types_req req; + struct rpmi_hsm_get_susp_types_resp resp; + + req.start_index = 0; + rc = rpmi_normal_request_with_status( + chan, RPMI_HSM_SRV_GET_SUSPEND_TYPES, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + susp->num_states = resp.returned + resp.remaining; + return 0; +} + +static int rpmi_hsm_get_suspend_states(struct mbox_chan *chan, + struct rpmi_hsm_suspend *susp) +{ + int rc, i, cnt = 0; + struct rpmi_hsm_get_susp_types_req req; + struct rpmi_hsm_get_susp_types_resp resp; + struct rpmi_hsm_get_susp_info_req dreq; + struct rpmi_hsm_get_susp_info_resp dresp; + struct sbi_cpu_idle_state *state; + + if (!susp->num_states) + return 0; + + req.start_index = 0; + do { + rc = rpmi_normal_request_with_status( + chan, RPMI_HSM_SRV_GET_SUSPEND_TYPES, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + for (i = 0; i < resp.returned && cnt < susp->num_states; i++) + susp->states[cnt++].suspend_param = resp.types[i]; + req.start_index = i; + } while (resp.remaining); + + for (i = 0; i < susp->num_states; i++) { + state = &susp->states[i]; + + dreq.suspend_type = state->suspend_param; + rc = rpmi_normal_request_with_status( + chan, RPMI_HSM_SRV_GET_SUSPEND_INFO, + &dreq, rpmi_u32_count(dreq), rpmi_u32_count(dreq), + &dresp, rpmi_u32_count(dresp), rpmi_u32_count(dresp)); + if (rc) + return rc; + + state->entry_latency_us = dresp.entry_latency_us; + state->exit_latency_us = dresp.exit_latency_us; + state->wakeup_latency_us = dresp.wakeup_latency_us; + state->min_residency_us = dresp.min_residency_us; + } + + return 0; +} + +static int rpmi_hsm_update_hart_scratch(struct mbox_chan *chan, + struct rpmi_hsm_suspend *susp) +{ + int rc, i; + struct rpmi_hsm_get_hart_list_req req; + struct rpmi_hsm_get_hart_list_resp resp; + struct rpmi_hsm *rpmi = rpmi_hsm_get_pointer(current_hartid()); + + req.start_index = 0; + do { + rc = rpmi_normal_request_with_status( + chan, RPMI_HSM_SRV_GET_HART_LIST, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + for (i = 0; i < resp.returned; i++) { + rpmi = rpmi_hsm_get_pointer(resp.hartid[i]); + if (!rpmi) + return SBI_ENOSYS; + + rpmi->chan = chan; + rpmi->susp = susp; + } + + req.start_index = i; + } while (resp.remaining); + + return 0; +} + +static int rpmi_hsm_cold_init(void *fdt, int nodeoff, + const struct fdt_match *match) +{ + int rc, i; + struct mbox_chan *chan; + struct rpmi_hsm_suspend *susp; + + if (!rpmi_hsm_offset) { + rpmi_hsm_offset = + sbi_scratch_alloc_type_offset(struct rpmi_hsm); + if (!rpmi_hsm_offset) + return SBI_ENOMEM; + } + + /* + * If channel request failed then other end does not support + * HSM service group so do nothing. + */ + rc = fdt_mailbox_request_chan(fdt, nodeoff, 0, &chan); + if (rc) + return 0; + + /* Allocate context for HART suspend states */ + susp = sbi_zalloc(sizeof(*susp)); + if (!susp) + return SBI_ENOMEM; + + /* Get number of HART suspend states */ + rc = rpmi_hsm_get_num_suspend_states(chan, susp); + if (rc) + goto fail_free_susp; + + /* Skip HART suspend state discovery for zero HART suspend states */ + if (!susp->num_states) + goto skip_suspend_states; + + /* Allocate array of HART suspend states */ + susp->states = sbi_calloc(susp->num_states + 1, sizeof(*susp->states)); + if (!susp->states) { + rc = SBI_ENOMEM; + goto fail_free_susp; + } + + /* Allocate name of each HART suspend state */ + for (i = 0; i < susp->num_states; i++) { + susp->states[i].name = + sbi_zalloc(MAX_HSM_SUPSEND_STATE_NAMELEN); + if (!susp->states[i].name) { + do { + i--; + sbi_free((void *)susp->states[i].name); + } while (i > 0); + + rc = SBI_ENOMEM; + goto fail_free_susp_states; + } + sbi_snprintf((char *)susp->states[i].name, + MAX_HSM_SUPSEND_STATE_NAMELEN, "cpu-susp%d", i); + } + + /* Get details about each HART suspend state */ + rc = rpmi_hsm_get_suspend_states(chan, susp); + if (rc) + goto fail_free_susp_state_names; + +skip_suspend_states: + /* Update per-HART scratch space */ + rc = rpmi_hsm_update_hart_scratch(chan, susp); + if (rc) + goto fail_free_susp_state_names; + + /* Register HSM device */ + if (!susp->num_states) + sbi_hsm_rpmi.hart_suspend = NULL; + sbi_hsm_set_device(&sbi_hsm_rpmi); + + return 0; + +fail_free_susp_state_names: + for (i = 0; i < susp->num_states; i++) + sbi_free((void *)susp->states[i].name); +fail_free_susp_states: + if (susp->num_states) + sbi_free(susp->states); +fail_free_susp: + sbi_free(susp); + return rc; +} + +static const struct fdt_match rpmi_hsm_match[] = { + { .compatible = "riscv,rpmi-hsm" }, + {}, +}; + +struct fdt_hsm fdt_hsm_rpmi = { + .match_table = rpmi_hsm_match, + .fdt_fixup = rpmi_hsm_fixup, + .cold_init = rpmi_hsm_cold_init, +}; diff --git a/lib/utils/hsm/objects.mk b/lib/utils/hsm/objects.mk new file mode 100644 index 00000000000..0f792ad5e08 --- /dev/null +++ b/lib/utils/hsm/objects.mk @@ -0,0 +1,14 @@ +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (c) 2023 Ventana Micro Systems Inc. +# +# Authors: +# Anup Patel +# + +libsbiutils-objs-$(CONFIG_FDT_HSM) += hsm/fdt_hsm.o +libsbiutils-objs-$(CONFIG_FDT_HSM) += hsm/fdt_hsm_drivers.o + +carray-fdt_hsm_drivers-$(CONFIG_FDT_HSM_RPMI) += fdt_hsm_rpmi +libsbiutils-objs-$(CONFIG_FDT_HSM_RPMI) += hsm/fdt_hsm_rpmi.o diff --git a/lib/utils/irqchip/imsic.c b/lib/utils/irqchip/imsic.c index 36ef66cf1eb..a207dbc8458 100644 --- a/lib/utils/irqchip/imsic.c +++ b/lib/utils/irqchip/imsic.c @@ -149,7 +149,7 @@ static int imsic_external_irqfn(struct sbi_trap_regs *regs) switch (mirq) { case IMSIC_IPI_ID: - sbi_ipi_process(); + sbi_ipi_process(regs); break; default: sbi_printf("%s: unhandled IRQ%d\n", diff --git a/lib/utils/mailbox/Kconfig b/lib/utils/mailbox/Kconfig new file mode 100644 index 00000000000..6e7f2cdd766 --- /dev/null +++ b/lib/utils/mailbox/Kconfig @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-2-Clause + +menu "Mailbox Support" + +config FDT_MAILBOX + bool "FDT based mailbox drivers" + depends on FDT + select MAILBOX + default n + +config RPMI_MAILBOX + bool "RPMI based mailbox drivers" + select MAILBOX + default n + +config MAILBOX + bool "Mailbox support" + default n + +if FDT_MAILBOX + +config FDT_MAILBOX_RPMI_SHMEM + bool "RPMI Shared Memory Mailbox Controller" + depends on RPMI_MAILBOX + default n + +endif + +endmenu diff --git a/lib/utils/mailbox/fdt_mailbox.c b/lib/utils/mailbox/fdt_mailbox.c new file mode 100644 index 00000000000..a8eddaf7fdf --- /dev/null +++ b/lib/utils/mailbox/fdt_mailbox.c @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2022 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include + +/* List of FDT mailbox drivers generated at compile time */ +extern struct fdt_mailbox *fdt_mailbox_drivers[]; +extern unsigned long fdt_mailbox_drivers_size; + +static struct fdt_mailbox *fdt_mailbox_driver(struct mbox_controller *mbox) +{ + int pos; + + if (!mbox) + return NULL; + + for (pos = 0; pos < fdt_mailbox_drivers_size; pos++) { + if (mbox->driver == fdt_mailbox_drivers[pos]) + return fdt_mailbox_drivers[pos]; + } + + return NULL; +} + +static int fdt_mailbox_init(void *fdt, u32 phandle) +{ + int pos, nodeoff, rc; + struct fdt_mailbox *drv; + const struct fdt_match *match; + + /* Find node offset */ + nodeoff = fdt_node_offset_by_phandle(fdt, phandle); + if (nodeoff < 0) + return nodeoff; + + /* Try all mailbox drivers one-by-one */ + for (pos = 0; pos < fdt_mailbox_drivers_size; pos++) { + drv = fdt_mailbox_drivers[pos]; + + match = fdt_match_node(fdt, nodeoff, drv->match_table); + if (match && drv->init) { + rc = drv->init(fdt, nodeoff, phandle, match); + if (rc == SBI_ENODEV) + continue; + if (rc) + return rc; + return 0; + } + } + + return SBI_ENOSYS; +} + +static int fdt_mbox_controller_find(void *fdt, u32 phandle, + struct mbox_controller **out_mbox) +{ + int rc; + struct mbox_controller *mbox = mbox_controller_find(phandle); + + if (!mbox) { + /* mailbox not found so initialize matching driver */ + rc = fdt_mailbox_init(fdt, phandle); + if (rc) + return rc; + + /* Try to find mailbox controller again */ + mbox = mbox_controller_find(phandle); + if (!mbox) + return SBI_ENOSYS; + } + + if (out_mbox) + *out_mbox = mbox; + + return 0; +} + +int fdt_mailbox_request_chan(void *fdt, int nodeoff, int index, + struct mbox_chan **out_chan) +{ + int rc; + struct mbox_chan *chan; + struct fdt_mailbox *drv; + struct fdt_phandle_args pargs; + struct mbox_controller *mbox = NULL; + u32 phandle, chan_args[MBOX_CHAN_MAX_ARGS]; + + if (!fdt || (nodeoff < 0) || (index < 0) || !out_chan) + return SBI_EINVAL; + + pargs.node_offset = pargs.args_count = 0; + rc = fdt_parse_phandle_with_args(fdt, nodeoff, + "mboxes", "#mbox-cells", + index, &pargs); + if (rc) + return rc; + + phandle = fdt_get_phandle(fdt, pargs.node_offset); + rc = fdt_mbox_controller_find(fdt, phandle, &mbox); + if (rc) + return rc; + + drv = fdt_mailbox_driver(mbox); + if (!drv || !drv->xlate) + return SBI_ENOSYS; + + rc = drv->xlate(mbox, &pargs, chan_args); + if (rc) + return rc; + + chan = mbox_controller_request_chan(mbox, chan_args); + if (!chan) + return SBI_ENOENT; + + *out_chan = chan; + return 0; +} + +int fdt_mailbox_simple_xlate(struct mbox_controller *mbox, + const struct fdt_phandle_args *pargs, + u32 *out_chan_args) +{ + int i; + + if (pargs->args_count < 1) + return SBI_EINVAL; + + out_chan_args[0] = pargs->args[0]; + for (i = 1; i < MBOX_CHAN_MAX_ARGS; i++) + out_chan_args[i] = 0; + + return 0; +} diff --git a/lib/utils/mailbox/fdt_mailbox_drivers.carray b/lib/utils/mailbox/fdt_mailbox_drivers.carray new file mode 100644 index 00000000000..fd4246df499 --- /dev/null +++ b/lib/utils/mailbox/fdt_mailbox_drivers.carray @@ -0,0 +1,3 @@ +HEADER: sbi_utils/mailbox/fdt_mailbox.h +TYPE: struct fdt_mailbox +NAME: fdt_mailbox_drivers diff --git a/lib/utils/mailbox/fdt_mailbox_rpmi_shmem.c b/lib/utils/mailbox/fdt_mailbox_rpmi_shmem.c new file mode 100644 index 00000000000..9bea0620c7a --- /dev/null +++ b/lib/utils/mailbox/fdt_mailbox_rpmi_shmem.c @@ -0,0 +1,662 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Rahul Pathak + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/**************** RPMI Transport Structures and Macros ***********/ + +#define SIZE_WRITEIDX (4) /* bytes */ +#define SIZE_READIDX (4) /* bytes */ +#define RPMI_QUEUE_HEADER_SIZE (SIZE_WRITEIDX + SIZE_READIDX) +#define RPMI_MAILBOX_CHANNELS_MAX (16) + +#define GET_TOKEN(msg) \ +({ \ + struct rpmi_message *mbuf = msg; \ + le32_to_cpu(mbuf->header.token); \ +}) + +#define GET_MESSAGE_ID(msg) \ +({ \ + le32_to_cpu(*(u32 *)((char *)msg + RPMI_MSG_IDN_OFFSET)); \ +}) + +#define GET_ENTITY_ID(msg) \ +({ \ + le32_to_cpu(*(u32 *)((char *)msg + RPMI_MSG_ENTYID_OFFSET)); \ +}) + +#define GET_MESSAGE_TYPE(msg) \ +({ \ + u32 msgidn = *(u32 *)((char *)msg + RPMI_MSG_IDN_OFFSET); \ + le32_to_cpu((u32)((msgidn & RPMI_MSG_IDN_TYPE_MASK) >> \ + RPMI_MSG_IDN_TYPE_POS)); \ +}) + +#define GET_SERVICE_ID(msg) \ +({ \ + u32 msgidn = *(u32 *)((char *)msg + RPMI_MSG_IDN_OFFSET); \ + le32_to_cpu((u32)((msgidn & RPMI_MSG_IDN_SERVICE_ID_MASK) >> \ + RPMI_MSG_IDN_SERVICE_ID_POS)); \ +}) + +#define GET_SERVICEGROUP_ID(msg) \ +({ \ + u32 msgidn = *(u32 *)((char *)msg + RPMI_MSG_IDN_OFFSET); \ + le32_to_cpu((u32)((msgidn & RPMI_MSG_IDN_SERVICEGROUP_ID_MASK) >>\ + RPMI_MSG_IDN_SERVICEGROUP_ID_POS)); \ +}) + +#define GET_DLEN(msg) \ +({ \ + struct rpmi_message *mbuf = msg; \ + le32_to_cpu(mbuf->header.datalen); \ +}) + +enum rpmi_queue_type { + RPMI_QUEUE_TYPE_REQ = 0, + RPMI_QUEUE_TYPE_ACK = 1, +}; + +enum rpmi_queue_idx { + RPMI_QUEUE_IDX_A2P_REQ = 0, + RPMI_QUEUE_IDX_P2A_ACK = 1, + RPMI_QUEUE_IDX_P2A_REQ = 2, + RPMI_QUEUE_IDX_A2P_ACK = 3, + RPMI_QUEUE_IDX_MAX_COUNT, +}; + +enum rpmi_reg_idx { + RPMI_REG_IDX_DB_REG = 0, /* Doorbell register */ + RPMI_REG_IDX_MAX_COUNT, +}; + +/** Single Queue Memory View */ +struct rpmi_queue { + volatile le32_t readidx; + volatile le32_t writeidx; + uint8_t buffer[]; +} __packed; + +/** Mailbox registers */ +struct rpmi_mb_regs { + /* doorbell from AP -> PuC*/ + volatile le32_t db_reg; +} __packed; + +/** Single Queue Context Structure */ +struct smq_queue_ctx { + u32 queue_id; + u32 num_slots; + spinlock_t queue_lock; + /* Type of queue - REQ or ACK */ + enum rpmi_queue_type queue_type; + /* Pointer to the queue shared memory */ + struct rpmi_queue *queue; + char name[RPMI_NAME_CHARS_MAX]; +}; + +struct rpmi_shmem_mbox_controller { + /* Driver specific members */ + u32 slot_size; + u32 queue_count; + struct rpmi_mb_regs *mb_regs; + struct smq_queue_ctx queue_ctx_tbl[RPMI_QUEUE_IDX_MAX_COUNT]; + /* Mailbox framework related members */ + struct mbox_controller controller; + struct mbox_chan channels[RPMI_MAILBOX_CHANNELS_MAX]; + struct mbox_chan *base_chan; + u32 impl_version; + u32 impl_id; + u32 spec_version; + struct { + bool f0_ev_notif_en; + bool f0_msi_en; + } base_flags; +}; + +/**************** Shared Memory Queues Helpers **************/ + +static bool __smq_queue_full(struct smq_queue_ctx *qctx) +{ + return + ((le32_to_cpu(qctx->queue->writeidx) + 1) % qctx->num_slots == + le32_to_cpu(qctx->queue->readidx)) ? true : false; +} + +static bool __smq_queue_empty(struct smq_queue_ctx *qctx) +{ + return + (le32_to_cpu(qctx->queue->readidx) == + le32_to_cpu(qctx->queue->writeidx)) ? true : false; +} + +static int __smq_rx(struct smq_queue_ctx *qctx, u32 slot_size, + u32 service_group_id, struct mbox_xfer *xfer) +{ + void *dst, *src; + struct rpmi_message *msg; + u32 i, tmp, pos, dlen, msgidn, readidx, writeidx; + struct rpmi_message_args *args = xfer->args; + bool no_rx_token = (args->flags & RPMI_MSG_FLAGS_NO_RX_TOKEN) ? + true : false; + + /* Rx sanity checks */ + if ((sizeof(u32) * args->rx_endian_words) > + (slot_size - sizeof(struct rpmi_message_header))) + return SBI_EINVAL; + if ((sizeof(u32) * args->rx_endian_words) > xfer->rx_len) + return SBI_EINVAL; + + /* There should be some message in the queue */ + if (__smq_queue_empty(qctx)) + return SBI_ENOENT; + + /* Get the read index and write index */ + readidx = le32_to_cpu(qctx->queue->readidx); + writeidx = le32_to_cpu(qctx->queue->writeidx); + + /* + * Compute msgidn expected in the incoming message + * NOTE: DOORBELL bit is not expected to be set. + */ + msgidn = ((service_group_id << RPMI_MSG_IDN_SERVICEGROUP_ID_POS) & + RPMI_MSG_IDN_SERVICEGROUP_ID_MASK) | + ((args->service_id << RPMI_MSG_IDN_SERVICE_ID_POS) & + RPMI_MSG_IDN_SERVICE_ID_MASK) | + ((args->type << RPMI_MSG_IDN_TYPE_POS) & + RPMI_MSG_IDN_TYPE_MASK); + + /* Find the Rx message with matching token */ + pos = readidx; + while (pos != writeidx) { + src = (void *)qctx->queue->buffer + (pos * slot_size); + if ((no_rx_token && GET_MESSAGE_ID(src) == msgidn) || + (GET_TOKEN(src) == xfer->seq)) + break; + pos = (pos + 1) % qctx->num_slots; + } + if (pos == writeidx) + return SBI_ENOENT; + + /* If Rx message is not first message then make it first message */ + if (pos != readidx) { + src = (void *)qctx->queue->buffer + (pos * slot_size); + dst = (void *)qctx->queue->buffer + (readidx * slot_size); + for (i = 0; i < slot_size / sizeof(u32); i++) { + tmp = ((u32 *)dst)[i]; + ((u32 *)dst)[i] = ((u32 *)src)[i]; + ((u32 *)src)[i] = tmp; + } + } + + /* Update rx_token if not available */ + msg = (void *)qctx->queue->buffer + (readidx * slot_size); + if (no_rx_token) + args->rx_token = GET_TOKEN(msg); + + /* Extract data from the first message */ + if (xfer->rx) { + args->rx_data_len = dlen = GET_DLEN(msg); + if (dlen > xfer->rx_len) + dlen = xfer->rx_len; + src = (void *)msg + sizeof(struct rpmi_message_header); + dst = xfer->rx; + for (i = 0; i < args->rx_endian_words; i++) + ((u32 *)dst)[i] = le32_to_cpu(((u32 *)src)[i]); + dst += sizeof(u32) * args->rx_endian_words; + src += sizeof(u32) * args->rx_endian_words; + sbi_memcpy(dst, src, + xfer->rx_len - (sizeof(u32) * args->rx_endian_words)); + } + + /* Update the read index */ + qctx->queue->readidx = cpu_to_le32(readidx + 1) % qctx->num_slots; + smp_wmb(); + + return SBI_OK; +} + +static int __smq_tx(struct smq_queue_ctx *qctx, struct rpmi_mb_regs *mb_regs, + u32 slot_size, u32 service_group_id, struct mbox_xfer *xfer) +{ + void *dst, *src; + u32 i, msgidn, writeidx; + struct rpmi_message_header header = { 0 }; + struct rpmi_message_args *args = xfer->args; + + /* Tx sanity checks */ + if ((sizeof(u32) * args->tx_endian_words) > + (slot_size - sizeof(struct rpmi_message_header))) + return SBI_EINVAL; + if ((sizeof(u32) * args->tx_endian_words) > xfer->tx_len) + return SBI_EINVAL; + + /* There should be some room in the queue */ + if (__smq_queue_full(qctx)) + return SBI_ENOMEM; + + /* Get the write index */ + writeidx = le32_to_cpu(qctx->queue->writeidx); + + /* + * Compute msgidn for the outgoing message + * NOTE: DOORBELL bit is not set currently because we always poll. + */ + msgidn = ((service_group_id << RPMI_MSG_IDN_SERVICEGROUP_ID_POS) & + RPMI_MSG_IDN_SERVICEGROUP_ID_MASK) | + ((args->service_id << RPMI_MSG_IDN_SERVICE_ID_POS) & + RPMI_MSG_IDN_SERVICE_ID_MASK) | + ((args->type << RPMI_MSG_IDN_TYPE_POS) & + RPMI_MSG_IDN_TYPE_MASK); + + /* Prepare the header to be written into the slot */ + header.token = cpu_to_le32((u32)xfer->seq); + header.msgidn = cpu_to_le32(msgidn); + header.datalen = cpu_to_le32(xfer->tx_len); + + /* Write header into the slot */ + dst = (char *)qctx->queue->buffer + (writeidx * slot_size); + sbi_memcpy(dst, &header, sizeof(header)); + dst += sizeof(header); + + /* Write data into the slot */ + if (xfer->tx) { + src = xfer->tx; + for (i = 0; i < args->tx_endian_words; i++) + ((u32 *)dst)[i] = cpu_to_le32(((u32 *)src)[i]); + dst += sizeof(u32) * args->tx_endian_words; + src += sizeof(u32) * args->tx_endian_words; + sbi_memcpy(dst, src, + xfer->tx_len - (sizeof(u32) * args->tx_endian_words)); + } + + /* Update the write index */ + qctx->queue->writeidx = cpu_to_le32(writeidx + 1) % qctx->num_slots; + smp_wmb(); + + /* Ring the RPMI doorbell if present */ + if (mb_regs) + writel(cpu_to_le32(1), &mb_regs->db_reg); + + return SBI_OK; +} + +static int smq_rx(struct rpmi_shmem_mbox_controller *mctl, + u32 queue_id, u32 service_group_id, struct mbox_xfer *xfer) +{ + int ret, rxretry = 0; + struct smq_queue_ctx *qctx; + + if (mctl->queue_count < queue_id || + RPMI_MAILBOX_CHANNELS_MAX <= service_group_id) { + sbi_printf("%s: invalid queue_id or service_group_id\n", + __func__); + return SBI_EINVAL; + } + qctx = &mctl->queue_ctx_tbl[queue_id]; + + /* + * Once the timeout happens and call this function is returned + * to the client then there is no way to deliver the response + * message after that if it comes later. + * + * REVISIT: In complete timeout duration how much duration + * it should wait(delay) before recv retry. udelay or mdelay + */ + do { + spin_lock(&qctx->queue_lock); + ret = __smq_rx(qctx, mctl->slot_size, service_group_id, xfer); + spin_unlock(&qctx->queue_lock); + if (!ret) + return 0; + + sbi_timer_mdelay(1); + rxretry += 1; + } while (rxretry < xfer->rx_timeout); + + return SBI_ETIMEDOUT; +} + +static int smq_tx(struct rpmi_shmem_mbox_controller *mctl, + u32 queue_id, u32 service_group_id, struct mbox_xfer *xfer) +{ + int ret, txretry = 0; + struct smq_queue_ctx *qctx; + + if (mctl->queue_count < queue_id || + RPMI_MAILBOX_CHANNELS_MAX <= service_group_id) { + sbi_printf("%s: invalid queue_id or service_group_id\n", + __func__); + return SBI_EINVAL; + } + qctx = &mctl->queue_ctx_tbl[queue_id]; + + /* + * Ignoring the tx timeout since in RPMI has no mechanism + * with which other side can let know about the reception of + * message which marks as tx complete. For RPMI tx complete is + * marked as done when message in successfully copied in queue. + * + * REVISIT: In complete timeout duration how much duration + * it should wait(delay) before send retry. udelay or mdelay + */ + do { + spin_lock(&qctx->queue_lock); + ret = __smq_tx(qctx, mctl->mb_regs, mctl->slot_size, + service_group_id, xfer); + spin_unlock(&qctx->queue_lock); + if (!ret) + return 0; + + sbi_timer_mdelay(1); + txretry += 1; + } while (txretry < xfer->tx_timeout); + + return SBI_ETIMEDOUT; +} + +static int smq_base_get_two_u32(struct rpmi_shmem_mbox_controller *mctl, + u32 service_id, u32 *inarg, u32 *outvals) +{ + return rpmi_normal_request_with_status( + mctl->base_chan, service_id, + inarg, (inarg) ? 1 : 0, (inarg) ? 1 : 0, + outvals, 2, 2); +} + +/**************** Mailbox Controller Functions **************/ + +static int rpmi_shmem_mbox_xfer(struct mbox_chan *chan, struct mbox_xfer *xfer) +{ + int ret; + u32 tx_qid = 0, rx_qid = 0; + struct rpmi_shmem_mbox_controller *mctl = + container_of(chan->mbox, + struct rpmi_shmem_mbox_controller, + controller); + struct rpmi_message_args *args = xfer->args; + bool do_tx = (args->flags & RPMI_MSG_FLAGS_NO_TX) ? false : true; + bool do_rx = (args->flags & RPMI_MSG_FLAGS_NO_RX) ? false : true; + + if (!do_tx && !do_rx) + return SBI_EINVAL; + + switch (args->type) { + case RPMI_MSG_NORMAL_REQUEST: + if (do_tx && do_rx) { + tx_qid = RPMI_QUEUE_IDX_A2P_REQ; + rx_qid = RPMI_QUEUE_IDX_P2A_ACK; + } else if (do_tx) { + tx_qid = RPMI_QUEUE_IDX_A2P_REQ; + } else if (do_rx) { + rx_qid = RPMI_QUEUE_IDX_P2A_REQ; + } + break; + case RPMI_MSG_POSTED_REQUEST: + if (do_tx && do_rx) + return SBI_EINVAL; + if (do_tx) { + tx_qid = RPMI_QUEUE_IDX_A2P_REQ; + } else { + rx_qid = RPMI_QUEUE_IDX_P2A_REQ; + } + break; + case RPMI_MSG_ACKNOWLDGEMENT: + if (do_tx && do_rx) + return SBI_EINVAL; + if (do_tx) { + tx_qid = RPMI_QUEUE_IDX_A2P_ACK; + } else { + rx_qid = RPMI_QUEUE_IDX_P2A_ACK; + } + break; + default: + return SBI_ENOTSUPP; + } + + if (do_tx) { + ret = smq_tx(mctl, tx_qid, chan - mctl->channels, xfer); + if (ret) + return ret; + } + + if (do_rx) { + ret = smq_rx(mctl, rx_qid, chan - mctl->channels, xfer); + if (ret) + return ret; + } + + return 0; +} + +static struct mbox_chan *rpmi_shmem_mbox_request_chan( + struct mbox_controller *mbox, + u32 *chan_args) +{ + int ret; + u32 tval[2] = { 0 }; + struct rpmi_shmem_mbox_controller *mctl = + container_of(mbox, + struct rpmi_shmem_mbox_controller, + controller); + + if (chan_args[0] >= RPMI_MAILBOX_CHANNELS_MAX) + return NULL; + + /* Base serivce group is always present so probe other groups */ + if (chan_args[0] != RPMI_SRVGRP_BASE) { + /* Probe service group */ + ret = smq_base_get_two_u32(mctl, + RPMI_BASE_SRV_PROBE_SERVICE_GROUP, + chan_args, tval); + if (ret || !tval[1]) + return NULL; + } + + return &mctl->channels[chan_args[0]]; +} + +static void *rpmi_shmem_mbox_free_chan(struct mbox_controller *mbox, + struct mbox_chan *chan) +{ + /* Nothing to do here */ + return NULL; +} + +extern struct fdt_mailbox fdt_mailbox_rpmi_shmem; + +static int rpmi_shmem_transport_init(struct rpmi_shmem_mbox_controller *mctl, + void *fdt, int nodeoff) +{ + const char *name; + int count, len, ret, qid; + uint64_t reg_addr, reg_size; + const fdt32_t *prop_slotsz; + struct smq_queue_ctx *qctx; + + ret = fdt_node_check_compatible(fdt, nodeoff, + "riscv,rpmi-shmem-mbox"); + if (ret) + return ret; + + /* get queue slot size in bytes */ + prop_slotsz = fdt_getprop(fdt, nodeoff, "riscv,slot-size", &len); + if (!prop_slotsz) + return SBI_ENOENT; + + mctl->slot_size = fdt32_to_cpu(*prop_slotsz); + if (mctl->slot_size < RPMI_MSG_SIZE_MIN) { + sbi_printf("%s: slot_size < mimnum required message size\n", + __func__); + mctl->slot_size = RPMI_MSG_SIZE_MIN; + } + + /* + * queue names count is taken as the number of queues + * supported which make it mandatory to provide the + * name of the queue. + */ + count = fdt_stringlist_count(fdt, nodeoff, "reg-names"); + if (count < 0 || + count > (RPMI_QUEUE_IDX_MAX_COUNT + RPMI_REG_IDX_MAX_COUNT)) + return SBI_EINVAL; + + mctl->queue_count = count - RPMI_REG_IDX_MAX_COUNT; + + /* parse all queues and populate queues context structure */ + for (qid = 0; qid < mctl->queue_count; qid++) { + qctx = &mctl->queue_ctx_tbl[qid]; + + /* get each queue share-memory base address and size*/ + ret = fdt_get_node_addr_size(fdt, nodeoff, qid, + ®_addr, ®_size); + if (ret < 0 || !reg_addr || !reg_size) + return SBI_ENOENT; + + qctx->queue = (void *)(unsigned long)reg_addr; + + /* calculate number of slots in each queue */ + qctx->num_slots = + (reg_size - RPMI_QUEUE_HEADER_SIZE) / mctl->slot_size; + + /* get the queue name */ + name = fdt_stringlist_get(fdt, nodeoff, "reg-names", + qid, &len); + if (!name || (name && len < 0)) + return len; + + sbi_memcpy(qctx->name, name, len); + + /* store the index as queue_id */ + qctx->queue_id = qid; + + SPIN_LOCK_INIT(qctx->queue_lock); + } + + /* get the db-reg property name */ + name = fdt_stringlist_get(fdt, nodeoff, "reg-names", qid, &len); + if (!name || (name && len < 0)) + return len; + + /* fetch doorbell register address*/ + ret = fdt_get_node_addr_size(fdt, nodeoff, qid, ®_addr, + ®_size); + if (!ret && !(strncmp(name, "db-reg", strlen("db-reg")))) + mctl->mb_regs = (void *)(unsigned long)reg_addr; + + return SBI_SUCCESS; +} + +static int rpmi_shmem_mbox_init(void *fdt, int nodeoff, u32 phandle, + const struct fdt_match *match) +{ + int ret = 0; + u32 tval[2]; + struct rpmi_base_get_attributes_resp resp; + struct rpmi_shmem_mbox_controller *mctl; + + mctl = sbi_zalloc(sizeof(*mctl)); + if (!mctl) + return SBI_ENOMEM; + + /* Initialization transport from device tree */ + ret = rpmi_shmem_transport_init(mctl, fdt, nodeoff); + if (ret) + goto fail_free_controller; + + /* Register mailbox controller */ + mctl->controller.id = phandle; + mctl->controller.max_xfer_len = + mctl->slot_size - sizeof(struct rpmi_message_header); + mctl->controller.driver = &fdt_mailbox_rpmi_shmem; + mctl->controller.request_chan = rpmi_shmem_mbox_request_chan; + mctl->controller.free_chan = rpmi_shmem_mbox_free_chan; + mctl->controller.xfer = rpmi_shmem_mbox_xfer; + ret = mbox_controller_add(&mctl->controller); + if (ret) + goto fail_free_controller; + + /* Request base service group channel */ + tval[0] = RPMI_SRVGRP_BASE; + mctl->base_chan = mbox_controller_request_chan(&mctl->controller, + tval); + if (!mctl->base_chan) { + ret = SBI_ENOENT; + goto fail_remove_controller; + } + + /* Get implementation id */ + ret = smq_base_get_two_u32(mctl, + RPMI_BASE_SRV_GET_IMPLEMENTATION_VERSION, + NULL, tval); + if (ret) + goto fail_free_chan; + mctl->impl_version = tval[1]; + + /* Get implementation version */ + ret = smq_base_get_two_u32(mctl, RPMI_BASE_SRV_GET_IMPLEMENTATION_IDN, + NULL, tval); + if (ret) + goto fail_free_chan; + mctl->impl_id = tval[1]; + + /* Get specification version */ + ret = smq_base_get_two_u32(mctl, RPMI_BASE_SRV_GET_SPEC_VERSION, + NULL, tval); + if (ret) + goto fail_free_chan; + mctl->spec_version = tval[1]; + + /* Get optional features implementation flags */ + ret = rpmi_normal_request_with_status( + mctl->base_chan, RPMI_BASE_SRV_GET_ATTRIBUTES, + NULL, 0, 0, + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (ret) + goto fail_free_chan; + + mctl->base_flags.f0_ev_notif_en = + resp.f0 & RPMI_BASE_FLAGS_F0_EV_NOTIFY ? 1 : 0; + mctl->base_flags.f0_msi_en = + resp.f0 & RPMI_BASE_FLAGS_F0_MSI_EN ? 1 : 0; + + return 0; + +fail_free_chan: + mbox_controller_free_chan(mctl->base_chan); +fail_remove_controller: + mbox_controller_remove(&mctl->controller); +fail_free_controller: + sbi_free(mctl); + return ret; +} + +static const struct fdt_match rpmi_shmem_mbox_match[] = { + { .compatible = "riscv,rpmi-shmem-mbox" }, + { }, +}; + +struct fdt_mailbox fdt_mailbox_rpmi_shmem = { + .match_table = rpmi_shmem_mbox_match, + .init = rpmi_shmem_mbox_init, + .xlate = fdt_mailbox_simple_xlate, +}; diff --git a/lib/utils/mailbox/mailbox.c b/lib/utils/mailbox/mailbox.c new file mode 100644 index 00000000000..c35aa6ec91e --- /dev/null +++ b/lib/utils/mailbox/mailbox.c @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2022 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include + +static SBI_LIST_HEAD(mbox_list); + +struct mbox_controller *mbox_controller_find(unsigned int id) +{ + struct sbi_dlist *pos; + + sbi_list_for_each(pos, &mbox_list) { + struct mbox_controller *mbox = to_mbox_controller(pos); + + if (mbox->id == id) + return mbox; + } + + return NULL; +} + +int mbox_controller_add(struct mbox_controller *mbox) +{ + if (!mbox || !mbox->max_xfer_len) + return SBI_EINVAL; + if (mbox_controller_find(mbox->id)) + return SBI_EALREADY; + + SBI_INIT_LIST_HEAD(&mbox->node); + ATOMIC_INIT(&mbox->xfer_next_seq, 0); + SBI_INIT_LIST_HEAD(&mbox->chan_list); + sbi_list_add(&mbox->node, &mbox_list); + + return 0; +} + +void mbox_controller_remove(struct mbox_controller *mbox) +{ + struct mbox_chan *chan; + + if (!mbox) + return; + + while (!sbi_list_empty(&mbox->chan_list)) { + chan = sbi_list_first_entry(&mbox->chan_list, + struct mbox_chan, node); + if (mbox->free_chan) + mbox->free_chan(mbox, chan); + sbi_list_del(&chan->node); + } + + sbi_list_del(&mbox->node); +} + +struct mbox_chan *mbox_controller_request_chan(struct mbox_controller *mbox, + u32 *chan_args) +{ + struct mbox_chan *ret; + struct sbi_dlist *pos; + + if (!chan_args || !mbox || !mbox->request_chan) + return NULL; + + sbi_list_for_each(pos, &mbox->chan_list) { + ret = to_mbox_chan(pos); + if (!sbi_memcmp(ret->chan_args, chan_args, + sizeof(ret->chan_args))) + return ret; + } + + ret = mbox->request_chan(mbox, chan_args); + if (!ret) + return NULL; + + SBI_INIT_LIST_HEAD(&ret->node); + ret->mbox = mbox; + sbi_memcpy(ret->chan_args, chan_args, sizeof(ret->chan_args)); + sbi_list_add(&ret->node, &mbox->chan_list); + return ret; +} + +void mbox_controller_free_chan(struct mbox_chan *chan) +{ + if (!chan || !chan->mbox) + return; + + if (chan->mbox->free_chan) + chan->mbox->free_chan(chan->mbox, chan); + sbi_list_del(&chan->node); +} + +int mbox_chan_xfer(struct mbox_chan *chan, struct mbox_xfer *xfer) +{ + if (!xfer || !chan || !chan->mbox || !chan->mbox->xfer) + return SBI_EINVAL; + + if (xfer->tx && (xfer->tx_len > chan->mbox->max_xfer_len)) + return SBI_EINVAL; + + if (xfer->rx && (xfer->rx_len > chan->mbox->max_xfer_len)) + return SBI_EINVAL; + + if (!(xfer->flags & MBOX_XFER_SEQ)) + mbox_xfer_set_sequence(xfer, + atomic_add_return(&chan->mbox->xfer_next_seq, 1)); + + return chan->mbox->xfer(chan, xfer); +} diff --git a/lib/utils/mailbox/objects.mk b/lib/utils/mailbox/objects.mk new file mode 100644 index 00000000000..23cd18ce629 --- /dev/null +++ b/lib/utils/mailbox/objects.mk @@ -0,0 +1,18 @@ +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (c) 2022 Ventana Micro Systems Inc. +# +# Authors: +# Anup Patel +# + +libsbiutils-objs-$(CONFIG_FDT_MAILBOX) += mailbox/fdt_mailbox.o +libsbiutils-objs-$(CONFIG_FDT_MAILBOX) += mailbox/fdt_mailbox_drivers.o + +libsbiutils-objs-$(CONFIG_MAILBOX) += mailbox/mailbox.o + +libsbiutils-objs-$(CONFIG_RPMI_MAILBOX) += mailbox/rpmi_mailbox.o + +carray-fdt_mailbox_drivers-$(CONFIG_FDT_MAILBOX_RPMI_SHMEM) += fdt_mailbox_rpmi_shmem +libsbiutils-objs-$(CONFIG_FDT_MAILBOX_RPMI_SHMEM) += mailbox/fdt_mailbox_rpmi_shmem.o diff --git a/lib/utils/mailbox/rpmi_mailbox.c b/lib/utils/mailbox/rpmi_mailbox.c new file mode 100644 index 00000000000..58c64e566e8 --- /dev/null +++ b/lib/utils/mailbox/rpmi_mailbox.c @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include + +int rpmi_xlate_error(enum rpmi_error error) +{ + switch (error) { + case RPMI_SUCCESS: + return SBI_OK; + case RPMI_ERR_FAILED: + return SBI_EFAIL; + case RPMI_ERR_NOTSUPP: + return SBI_ENOTSUPP; + case RPMI_ERR_INVAL: + return SBI_EINVAL; + case RPMI_ERR_DENIED: + return SBI_EDENIED; + case RPMI_ERR_NOTFOUND: + return SBI_ENOENT; + case RPMI_ERR_OUTOFRANGE: + return SBI_EINVAL; + case RPMI_ERR_OUTOFRES: + return SBI_ENOSPC; + case RPMI_ERR_HWFAULT: + return SBI_EIO; + default: + return SBI_EUNKNOWN; + } +} + +int rpmi_normal_request_with_status( + struct mbox_chan *chan, u32 service_id, + void *req, u32 req_words, u32 req_endian_words, + void *resp, u32 resp_words, u32 resp_endian_words) +{ + int ret; + struct mbox_xfer xfer; + struct rpmi_message_args args = { 0 }; + + args.type = RPMI_MSG_NORMAL_REQUEST; + args.service_id = service_id; + args.tx_endian_words = req_endian_words; + args.rx_endian_words = resp_endian_words; + mbox_xfer_init_txrx(&xfer, &args, + req, sizeof(u32) * req_words, RPMI_DEF_TX_TIMEOUT, + resp, sizeof(u32) * resp_words, RPMI_DEF_RX_TIMEOUT); + + ret = mbox_chan_xfer(chan, &xfer); + if (ret) + return ret; + + return rpmi_xlate_error(((u32 *)resp)[0]); +} + +int rpmi_posted_request( + struct mbox_chan *chan, u32 service_id, + void *req, u32 req_words, u32 req_endian_words) +{ + struct mbox_xfer xfer; + struct rpmi_message_args args = { 0 }; + + args.type = RPMI_MSG_POSTED_REQUEST; + args.flags = RPMI_MSG_FLAGS_NO_RX; + args.service_id = service_id; + args.tx_endian_words = req_endian_words; + mbox_xfer_init_tx(&xfer, &args, + req, sizeof(u32) * req_words, RPMI_DEF_TX_TIMEOUT); + + return mbox_chan_xfer(chan, &xfer); +} diff --git a/lib/utils/ras/Kconfig b/lib/utils/ras/Kconfig new file mode 100644 index 00000000000..b016992409d --- /dev/null +++ b/lib/utils/ras/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-2-Clause + +menu "RAS Agent Driver Support" + +config FDT_RAS + bool "FDT based RAS drivers" + depends on FDT + default y + +if FDT_RAS + +config FDT_RAS_RPMI + bool "FDT RPMI RAS driver" + default y + +endif + +endmenu diff --git a/lib/utils/ras/fdt_ras.c b/lib/utils/ras/fdt_ras.c new file mode 100644 index 00000000000..5ebe50d9b5c --- /dev/null +++ b/lib/utils/ras/fdt_ras.c @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems, Inc. + * + * Author(s): + * Himanshu Chauhan + */ + +#include +#include +#include +#include + +/* List of FDT RAS drivers generated at compile time */ +extern struct fdt_ras *fdt_ras_drivers[]; +extern unsigned long fdt_ras_drivers_size; + +static struct fdt_ras *current_driver = NULL; + +void fdt_ras_exit(void) +{ + if (current_driver && current_driver->exit) + current_driver->exit(); +} + +static int fdt_ras_warm_init(void) +{ + if (current_driver && current_driver->warm_init) + return current_driver->warm_init(); + return 0; +} + +static int fdt_ras_cold_init(void) +{ + int pos, noff, rc; + struct fdt_ras *drv; + const struct fdt_match *match; + void *fdt = fdt_get_address(); + + for (pos = 0; pos < fdt_ras_drivers_size; pos++) { + drv = fdt_ras_drivers[pos]; + + noff = -1; + while ((noff = fdt_find_match(fdt, noff, + drv->match_table, &match)) >= 0) { + /* drv->cold_init must not be NULL */ + if (drv->cold_init == NULL) { + sbi_printf("%s: But no cold init function\n", __func__); + return SBI_EFAIL; + } + + rc = drv->cold_init(fdt, noff, match); + if (rc == SBI_ENODEV) + continue; + if (rc) + return rc; + current_driver = drv; + break; + } + } + + return 0; +} + +int fdt_ras_init(bool cold_boot) +{ + int rc; + + if (cold_boot) { + rc = fdt_ras_cold_init(); + if (rc) + return rc; + } + + return fdt_ras_warm_init(); +} diff --git a/lib/utils/ras/fdt_ras_drivers.carray b/lib/utils/ras/fdt_ras_drivers.carray new file mode 100644 index 00000000000..289ca1a37e1 --- /dev/null +++ b/lib/utils/ras/fdt_ras_drivers.carray @@ -0,0 +1,3 @@ +HEADER: sbi_utils/ras/fdt_ras.h +TYPE: struct fdt_ras +NAME: fdt_ras_drivers diff --git a/lib/utils/ras/fdt_ras_rpmi.c b/lib/utils/ras/fdt_ras_rpmi.c new file mode 100644 index 00000000000..019d501b9f7 --- /dev/null +++ b/lib/utils/ras/fdt_ras_rpmi.c @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems, Inc. + * + * Author(s): + * Himanshu Chauhan + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rpmi_ras { + struct mbox_chan *chan; +}; + +static struct rpmi_ras ras; + +static int rpmi_ras_sync_hart_errs(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining) +{ + int rc = SBI_SUCCESS; + struct rpmi_ras_sync_hart_err_req req; + struct rpmi_ras_sync_err_resp resp; + + if (!pending_vectors || !nr_pending || !nr_remaining) + return SBI_ERR_INVALID_PARAM; + + *nr_pending = *nr_remaining = 0; + + if (!ras.chan) + return SBI_ERR_INVALID_STATE; + + req.hart_id = current_hartid(); + + rc = rpmi_normal_request_with_status(ras.chan, + RPMI_RAS_SRV_SYNC_HART_ERR_REQ, + &req, rpmi_u32_count(req), + rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), + rpmi_u32_count(resp)); + + if (rc) { + sbi_printf("%s: sync failed, rc: 0x%x\n", __func__, rc); + return rc; + } + + if (!resp.status && resp.returned > 0 && resp.returned < MAX_PEND_VECS) { + memcpy(pending_vectors, resp.pending_vecs, + resp.returned * sizeof(u32)); + *nr_pending = resp.returned; + *nr_remaining = resp.remaining; + } else { + if (resp.status) { + sbi_printf("%s: sync returned status %d\n", + __func__, resp.status); + } + + if (resp.returned < 0 || resp.returned > MAX_PEND_VECS) + sbi_printf("%s: invalid vector range returned %u\n", + __func__, resp.returned); + + return SBI_ERR_FAILED; + } + + return SBI_SUCCESS; +} + +static int rpmi_ras_sync_dev_errs(u32 *pending_vectors, u32 *nr_pending, + u32 *nr_remaining) +{ + int rc = SBI_SUCCESS; + + return rc; +} + +static int rpmi_ras_probe(void) +{ + int rc; + struct rpmi_ras_probe_resp resp; + struct rpmi_ras_probe_req req; + + if (!ras.chan) + return SBI_ERR_INVALID_STATE; + + rc = rpmi_normal_request_with_status( + ras.chan, RPMI_RAS_SRV_PROBE_REQ, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + return 0; +} + +static struct sbi_ras_agent sbi_rpmi_ras_agent = { + .name = "rpmi-ras-agent", + .ras_sync_hart_errs = rpmi_ras_sync_hart_errs, + .ras_sync_dev_errs = rpmi_ras_sync_dev_errs, + .ras_probe = rpmi_ras_probe, +}; + +static int rpmi_ras_cold_init(void *fdt, int nodeoff, + const struct fdt_match *match) +{ + int rc; + + if (ras.chan) + return 0; + + /* + * If channel request failed then other end does not support + * RAS service group so do nothing. + */ + rc = fdt_mailbox_request_chan(fdt, nodeoff, 0, &ras.chan); + if (rc) + return rc; + + sbi_ras_set_agent(&sbi_rpmi_ras_agent); + + sbi_ras_probe(); + + return 0; +} + +static const struct fdt_match rpmi_ras_match[] = { + { .compatible = "riscv,rpmi-ras" }, + {}, +}; + +struct fdt_ras fdt_ras_rpmi = { + .match_table = rpmi_ras_match, + .cold_init = rpmi_ras_cold_init, +}; diff --git a/lib/utils/ras/objects.mk b/lib/utils/ras/objects.mk new file mode 100644 index 00000000000..fd6dcc9f5ae --- /dev/null +++ b/lib/utils/ras/objects.mk @@ -0,0 +1,14 @@ +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (c) 2023 Ventana Micro Systems Inc. +# +# Authors: +# Himanshu Chauhan +# + +libsbiutils-objs-$(CONFIG_FDT_RAS) += ras/fdt_ras.o +libsbiutils-objs-$(CONFIG_FDT_RAS) += ras/fdt_ras_drivers.o + +carray-fdt_ras_drivers-$(CONFIG_FDT_RAS_RPMI) += fdt_ras_rpmi +libsbiutils-objs-$(CONFIG_FDT_RAS_RPMI) += ras/fdt_ras_rpmi.o diff --git a/lib/utils/reset/Kconfig b/lib/utils/reset/Kconfig index b26260db167..9e654cb1b3f 100644 --- a/lib/utils/reset/Kconfig +++ b/lib/utils/reset/Kconfig @@ -24,6 +24,11 @@ config FDT_RESET_HTIF select SYS_HTIF default n +config FDT_RESET_RPMI + bool "RPMI FDT reset driver" + depends on FDT_MAILBOX && RPMI_MAILBOX + default n + config FDT_RESET_SUNXI_WDT bool "Sunxi WDT FDT reset driver" default n diff --git a/lib/utils/reset/fdt_reset_rpmi.c b/lib/utils/reset/fdt_reset_rpmi.c new file mode 100644 index 00000000000..6bf00ce7ea3 --- /dev/null +++ b/lib/utils/reset/fdt_reset_rpmi.c @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Rahul Pathak + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* struct rpmi_sysreset: RPMI System Reset Context */ +struct rpmi_sysreset { + int warm_reset_support; + struct mbox_chan *chan; +}; + +static struct rpmi_sysreset sysreset_ctx; + +static int rpmi_system_reset_type_check(u32 reset_type) +{ + int ret; + struct rpmi_sysrst_get_reset_attributes_resp resp; + + ret = rpmi_normal_request_with_status(sysreset_ctx.chan, + RPMI_SYSRST_SRV_GET_SYSTEM_RESET_ATTRIBUTES, &reset_type, + rpmi_u32_count(reset_type), rpmi_u32_count(reset_type), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (ret) { + return 0; + } + + return (resp.flags & RPMI_SYSRST_FLAGS_SUPPORTED_MASK) ? 1 : 0; +} + +/** + * rpmi_do_system_reset: Do system reset + * + * @reset_type: RPMI System Reset Type + */ +static void rpmi_do_system_reset(u32 reset_type) +{ + int ret; + + ret = rpmi_posted_request(sysreset_ctx.chan, + RPMI_SYSRST_SRV_SYSTEM_RESET, + &reset_type, rpmi_u32_count(reset_type), + rpmi_u32_count(reset_type)); + if (ret) + sbi_printf("system reset failed [type: %d]: ret: %d\n", + reset_type, ret); +} + +/** + * rpmi_system_reset_check: Check the support for + * various reset types + * + * @type: SBI System Reset Type + * @reason: Reason for system reset + */ +static int rpmi_system_reset_check(u32 type, u32 reason) +{ + switch (type) { + case SBI_SRST_RESET_TYPE_SHUTDOWN: + case SBI_SRST_RESET_TYPE_COLD_REBOOT: + return 1; + case SBI_SRST_RESET_TYPE_WARM_REBOOT: + return sysreset_ctx.warm_reset_support; + default: + return 0; + } +} + +static void rpmi_system_reset(u32 type, u32 reason) +{ + u32 reset_type; + + switch (type) { + case SBI_SRST_RESET_TYPE_SHUTDOWN: + reset_type = RPMI_SYSRST_SHUTDOWN; + break; + case SBI_SRST_RESET_TYPE_COLD_REBOOT: + reset_type = RPMI_SYSRST_COLD_RESET; + break; + case SBI_SRST_RESET_TYPE_WARM_REBOOT: + reset_type = RPMI_SYSRST_WARM_RESET; + break; + default: + return; + } + + rpmi_do_system_reset(reset_type); +} + +static struct sbi_system_reset_device rpmi_reset_dev = { + .name = "rpmi-system-reset", + .system_reset_check = rpmi_system_reset_check, + .system_reset = rpmi_system_reset, +}; + +static int rpmi_reset_init(void *fdt, int nodeoff, + const struct fdt_match *match) +{ + int ret; + + /* If channel already available then do nothing. */ + if (sysreset_ctx.chan) + return 0; + + /* + * If channel request failed then other end does not support + * system reset group so do nothing. + */ + ret = fdt_mailbox_request_chan(fdt, nodeoff, 0, &sysreset_ctx.chan); + if (ret) + return ret; + + sysreset_ctx.warm_reset_support = + rpmi_system_reset_type_check(RPMI_SYSRST_WARM_RESET); + + sbi_system_reset_add_device(&rpmi_reset_dev); + + return SBI_OK; +} + +static const struct fdt_match rpmi_reset_match[] = { + { .compatible = "riscv,rpmi-system-reset" }, + {}, +}; + +struct fdt_reset fdt_reset_rpmi = { + .match_table = rpmi_reset_match, + .init = rpmi_reset_init, +}; diff --git a/lib/utils/reset/objects.mk b/lib/utils/reset/objects.mk index b5222347b21..70e91adf69f 100644 --- a/lib/utils/reset/objects.mk +++ b/lib/utils/reset/objects.mk @@ -26,3 +26,6 @@ libsbiutils-objs-$(CONFIG_FDT_RESET_SUNXI_WDT) += reset/fdt_reset_sunxi_wdt.o carray-fdt_reset_drivers-$(CONFIG_FDT_RESET_SYSCON) += fdt_syscon_poweroff carray-fdt_reset_drivers-$(CONFIG_FDT_RESET_SYSCON) += fdt_syscon_reboot libsbiutils-objs-$(CONFIG_FDT_RESET_SYSCON) += reset/fdt_reset_syscon.o + +carray-fdt_reset_drivers-$(CONFIG_FDT_RESET_RPMI) += fdt_reset_rpmi +libsbiutils-objs-$(CONFIG_FDT_RESET_RPMI) += reset/fdt_reset_rpmi.o diff --git a/lib/utils/rpxy/Kconfig b/lib/utils/rpxy/Kconfig new file mode 100644 index 00000000000..b6c454bd68d --- /dev/null +++ b/lib/utils/rpxy/Kconfig @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: BSD-2-Clause + +menu "RPXY Device Support" + +config FDT_RPXY + bool "FDT based RPXY drivers" + depends on FDT + default n + +if FDT_RPXY + +config FDT_RPXY_MBOX + bool "FDT RPXY mailbox client driver" + depends on FDT_MAILBOX + default n + +config FDT_RPXY_MM + bool "FDT RPXY MM client driver" + default n + +endif + +endmenu diff --git a/lib/utils/rpxy/fdt_rpxy.c b/lib/utils/rpxy/fdt_rpxy.c new file mode 100644 index 00000000000..05e4f157f67 --- /dev/null +++ b/lib/utils/rpxy/fdt_rpxy.c @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include + +/* List of FDT RPXY drivers generated at compile time */ +extern struct fdt_rpxy *fdt_rpxy_drivers[]; +extern unsigned long fdt_rpxy_drivers_size; + +int fdt_rpxy_init(void) +{ + int pos, noff, rc; + struct fdt_rpxy *drv; + const struct fdt_match *match; + void *fdt = fdt_get_address(); + + for (pos = 0; pos < fdt_rpxy_drivers_size; pos++) { + drv = fdt_rpxy_drivers[pos]; + + noff = -1; + while ((noff = fdt_find_match(fdt, noff, + drv->match_table, &match)) >= 0) { + /* drv->init must not be NULL */ + if (drv->init == NULL) + return SBI_EFAIL; + + rc = drv->init(fdt, noff, match); + if (rc == SBI_ENODEV) + continue; + if (rc) + return rc; + + /* + * We will have multiple RPXY devices so we + * cannot break here. + */ + } + } + + /* Platforms might not have any RPXY devices so don't fail */ + return 0; +} diff --git a/lib/utils/rpxy/fdt_rpxy_drivers.carray b/lib/utils/rpxy/fdt_rpxy_drivers.carray new file mode 100644 index 00000000000..a749cd6d619 --- /dev/null +++ b/lib/utils/rpxy/fdt_rpxy_drivers.carray @@ -0,0 +1,3 @@ +HEADER: sbi_utils/rpxy/fdt_rpxy.h +TYPE: struct fdt_rpxy +NAME: fdt_rpxy_drivers diff --git a/lib/utils/rpxy/fdt_rpxy_mbox.c b/lib/utils/rpxy/fdt_rpxy_mbox.c new file mode 100644 index 00000000000..903b4029fcf --- /dev/null +++ b/lib/utils/rpxy/fdt_rpxy_mbox.c @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include +#include +#include +#include + +struct rpxy_mbox_data { + u32 service_group_id; + int num_services; + struct sbi_rpxy_service *services; +}; + +struct rpxy_mbox { + struct sbi_rpxy_service_group group; + struct mbox_chan *chan; +}; + +static int rpxy_mbox_send_message(struct sbi_rpxy_service_group *grp, + struct sbi_rpxy_service *srv, + void *tx, u32 tx_len, + void *rx, u32 rx_len, + unsigned long *ack_len) +{ + int ret; + struct mbox_xfer xfer; + struct rpmi_message_args args = { 0 }; + struct rpxy_mbox *rmb = container_of(grp, struct rpxy_mbox, group); + + if (ack_len) { + args.type = RPMI_MSG_NORMAL_REQUEST; + args.flags = (rx) ? 0 : RPMI_MSG_FLAGS_NO_RX; + args.service_id = srv->id; + mbox_xfer_init_txrx(&xfer, &args, + tx, tx_len, RPMI_DEF_TX_TIMEOUT, + rx, rx_len, RPMI_DEF_RX_TIMEOUT); + } else { + args.type = RPMI_MSG_POSTED_REQUEST; + args.flags = RPMI_MSG_FLAGS_NO_RX; + args.service_id = srv->id; + mbox_xfer_init_tx(&xfer, &args, + tx, tx_len, RPMI_DEF_TX_TIMEOUT); + } + + ret = mbox_chan_xfer(rmb->chan, &xfer); + if (ret) + return (ret == SBI_ETIMEDOUT) ? SBI_ETIMEDOUT : SBI_EFAIL; + if (ack_len) + *ack_len = args.rx_data_len; + + return 0; +} + +static int rpxy_mbox_init(void *fdt, int nodeoff, + const struct fdt_match *match) +{ + int rc; + struct rpxy_mbox *rmb; + struct mbox_chan *chan; + const struct rpxy_mbox_data *data = match->data; + + /* Allocate context for RPXY mbox client */ + rmb = sbi_zalloc(sizeof(*rmb)); + if (!rmb) + return SBI_ENOMEM; + + /* + * If channel request failed then other end does not support + * service group so do nothing. + */ + rc = fdt_mailbox_request_chan(fdt, nodeoff, 0, &chan); + if (rc) { + sbi_free(rmb); + return 0; + } + + /* Match channel service group id */ + if (data->service_group_id != chan->chan_args[0]) { + mbox_controller_free_chan(chan); + sbi_free(rmb); + return SBI_EINVAL; + } + + /* Setup RPXY mbox client */ + rmb->group.transport_id = chan->mbox->id; + rmb->group.service_group_id = data->service_group_id; + rmb->group.max_message_data_len = chan->mbox->max_xfer_len; + rmb->group.num_services = data->num_services; + rmb->group.services = data->services; + rmb->group.send_message = rpxy_mbox_send_message; + rmb->chan = chan; + + /* Register RPXY service group */ + rc = sbi_rpxy_register_service_group(&rmb->group); + if (rc) { + mbox_controller_free_chan(chan); + sbi_free(rmb); + return rc; + } + + return 0; +} + +static struct sbi_rpxy_service clock_services[] = { +{ + .id = RPMI_CLOCK_SRV_GET_NUM_CLOCKS, + .min_tx_len = 0, + .max_tx_len = 0, + .min_rx_len = sizeof(struct rpmi_clock_get_num_clocks_resp), + .max_rx_len = sizeof(struct rpmi_clock_get_num_clocks_resp), +}, +{ + .id = RPMI_CLOCK_SRV_GET_ATTRIBUTES, + .min_tx_len = sizeof(struct rpmi_clock_get_attributes_req), + .max_tx_len = sizeof(struct rpmi_clock_get_attributes_req), + .min_rx_len = sizeof(struct rpmi_clock_get_attributes_resp), + .max_rx_len = sizeof(struct rpmi_clock_get_attributes_resp), +}, +{ + .id = RPMI_CLOCK_SRV_GET_SUPPORTED_RATES, + .min_tx_len = sizeof(struct rpmi_clock_get_supported_rates_req), + .max_tx_len = sizeof(struct rpmi_clock_get_supported_rates_req), + .min_rx_len = sizeof(struct rpmi_clock_get_supported_rates_resp), + .max_rx_len = -1U, +}, +{ + .id = RPMI_CLOCK_SRV_SET_CONFIG, + .min_tx_len = sizeof(struct rpmi_clock_set_config_req), + .max_tx_len = sizeof(struct rpmi_clock_set_config_req), + .min_rx_len = sizeof(struct rpmi_clock_set_config_resp), + .max_rx_len = sizeof(struct rpmi_clock_set_config_resp), +}, +{ + .id = RPMI_CLOCK_SRV_GET_CONFIG, + .min_tx_len = sizeof(struct rpmi_clock_get_config_req), + .max_tx_len = sizeof(struct rpmi_clock_get_config_req), + .min_rx_len = sizeof(struct rpmi_clock_get_config_resp), + .max_rx_len = sizeof(struct rpmi_clock_get_config_resp), +}, +{ + .id = RPMI_CLOCK_SRV_SET_RATE, + .min_tx_len = sizeof(struct rpmi_clock_set_rate_req), + .max_tx_len = sizeof(struct rpmi_clock_set_rate_req), + .min_rx_len = sizeof(struct rpmi_clock_set_rate_resp), + .max_rx_len = sizeof(struct rpmi_clock_set_rate_resp), +}, +{ + .id = RPMI_CLOCK_SRV_GET_RATE, + .min_tx_len = sizeof(struct rpmi_clock_get_rate_req), + .max_tx_len = sizeof(struct rpmi_clock_get_rate_req), + .min_rx_len = sizeof(struct rpmi_clock_get_rate_resp), + .max_rx_len = sizeof(struct rpmi_clock_get_rate_resp), +}, +}; + +static struct rpxy_mbox_data clock_data = { + .service_group_id = RPMI_SRVGRP_CLOCK, + .num_services = array_size(clock_services), + .services = clock_services, +}; + +static const struct fdt_match rpxy_mbox_match[] = { + { .compatible = "riscv,rpmi-clock", .data = &clock_data }, + {}, +}; + +struct fdt_rpxy fdt_rpxy_mbox = { + .match_table = rpxy_mbox_match, + .init = rpxy_mbox_init, +}; diff --git a/lib/utils/rpxy/fdt_rpxy_mm.c b/lib/utils/rpxy/fdt_rpxy_mm.c new file mode 100755 index 00000000000..548955ea8a0 --- /dev/null +++ b/lib/utils/rpxy/fdt_rpxy_mm.c @@ -0,0 +1,267 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2024 Intel Corporation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MM_VERSION_MAJOR 1 +#define MM_VERSION_MAJOR_SHIFT 16 +#define MM_VERSION_MAJOR_MASK 0x7FFF +#define MM_VERSION_MINOR 0 +#define MM_VERSION_MINOR_SHIFT 0 +#define MM_VERSION_MINOR_MASK 0xFFFF +#define MM_VERSION_FORM(major, minor) ((major << MM_VERSION_MAJOR_SHIFT) | \ + (minor)) +#define MM_VERSION_COMPILED MM_VERSION_FORM(MM_VERSION_MAJOR, \ + MM_VERSION_MINOR) + +/** SMM ServiceGroups IDs */ +enum rpmi_mm_servicegroup_id { + RPMI_SRVGRP_ID_MIN = 0, + RPMI_SRVGRP_MM = 0x000A, + RPMI_SRVGRP_ID_MAX_COUNT, +}; + +/** SMM ServiceGroup Service IDs */ +enum rpmi_mm_service_id { + RPMI_MM_SRV_VERSION = 0x01, + RPMI_MM_SRV_COMMUNICATE = 0x02, + RPMI_MM_SRV_COMPLETE = 0x03, +}; + +struct mm_boot_info { + uint64_t mm_mem_base; + uint64_t mm_mem_limit; + uint64_t mm_image_base; + uint64_t mm_stack_base; + uint64_t mm_heap_base; + uint64_t mm_ns_comm_buf_base; + uint64_t mm_shared_buf_base; + uint64_t mm_image_size; + uint64_t mm_pcpu_stack_size; + uint64_t mm_heap_size; + uint64_t mm_ns_comm_buf_size; + uint64_t mm_shared_buf_size; + uint32_t num_mem_region; +}; + +struct rpxy_mm_data { + u32 service_group_id; + int num_services; + struct sbi_rpxy_service *services; +}; + +static struct sbi_domain *tdomain; +static struct sbi_domain *udomain; + +static int mm_boot_info_setup(void *fdt, int nodeoff, const struct fdt_match *match) +{ + const u32 *prop_value; + u64 base64, size64; + int len; + + struct mm_boot_info *boot_info = NULL; + prop_value = fdt_getprop(fdt, nodeoff, "boot-info-address", &len); + if (!prop_value || len < 8) + return SBI_EINVAL; + base64 = fdt32_to_cpu(prop_value[0]); + base64 = (base64 << 32) | fdt32_to_cpu(prop_value[1]); + boot_info = (struct mm_boot_info *)base64; + + prop_value = fdt_getprop(fdt, nodeoff, "num-regions", &len); + if (!prop_value || len < 4) + return SBI_EINVAL; + boot_info->num_mem_region = (unsigned long)fdt32_to_cpu(*prop_value); + + prop_value = fdt_getprop(fdt, nodeoff, "memory-region", &len); + if (!prop_value || len < 16) + return SBI_EINVAL; + base64 = fdt32_to_cpu(prop_value[0]); + base64 = (base64 << 32) | fdt32_to_cpu(prop_value[1]); + size64 = fdt32_to_cpu(prop_value[2]); + size64 = (size64 << 32) | fdt32_to_cpu(prop_value[3]); + boot_info->mm_mem_base = base64; + boot_info->mm_mem_limit = base64 + size64; + + prop_value = fdt_getprop(fdt, nodeoff, "image-region", &len); + if (!prop_value || len < 16) + return SBI_EINVAL; + base64 = fdt32_to_cpu(prop_value[0]); + base64 = (base64 << 32) | fdt32_to_cpu(prop_value[1]); + size64 = fdt32_to_cpu(prop_value[2]); + size64 = (size64 << 32) | fdt32_to_cpu(prop_value[3]); + boot_info->mm_image_base = base64; + boot_info->mm_image_size = size64; + + prop_value = fdt_getprop(fdt, nodeoff, "heap-region", &len); + if (!prop_value || len < 16) + return SBI_EINVAL; + base64 = fdt32_to_cpu(prop_value[0]); + base64 = (base64 << 32) | fdt32_to_cpu(prop_value[1]); + size64 = fdt32_to_cpu(prop_value[2]); + size64 = (size64 << 32) | fdt32_to_cpu(prop_value[3]); + boot_info->mm_heap_base = base64; + boot_info->mm_heap_size = size64; + + prop_value = fdt_getprop(fdt, nodeoff, "stack-region", &len); + if (!prop_value || len < 16) + return SBI_EINVAL; + base64 = fdt32_to_cpu(prop_value[0]); + base64 = (base64 << 32) | fdt32_to_cpu(prop_value[1]); + size64 = fdt32_to_cpu(prop_value[2]); + size64 = (size64 << 32) | fdt32_to_cpu(prop_value[3]); + boot_info->mm_stack_base = base64 + size64 -1; + + prop_value = fdt_getprop(fdt, nodeoff, "pcpu-stack-size", &len); + if (!prop_value || len < 4) + return SBI_EINVAL; + boot_info->mm_pcpu_stack_size = (unsigned long)fdt32_to_cpu(*prop_value); + + prop_value = fdt_getprop(fdt, nodeoff, "shared-buffer", &len); + if (!prop_value || len < 16) + return SBI_EINVAL; + base64 = fdt32_to_cpu(prop_value[0]); + base64 = (base64 << 32) | fdt32_to_cpu(prop_value[1]); + size64 = fdt32_to_cpu(prop_value[2]); + size64 = (size64 << 32) | fdt32_to_cpu(prop_value[3]); + boot_info->mm_shared_buf_base = base64; + boot_info->mm_shared_buf_size = size64; + + prop_value = fdt_getprop(fdt, nodeoff, "ns-comm-buffer", &len); + if (!prop_value || len < 16) + return SBI_EINVAL; + base64 = fdt32_to_cpu(prop_value[0]); + base64 = (base64 << 32) | fdt32_to_cpu(prop_value[1]); + size64 = fdt32_to_cpu(prop_value[2]); + size64 = (size64 << 32) | fdt32_to_cpu(prop_value[3]); + boot_info->mm_ns_comm_buf_base = base64; + boot_info->mm_ns_comm_buf_size = size64; + + return 0; +} + +static int rpxy_mm_handler(struct sbi_rpxy_service_group *grp, + struct sbi_rpxy_service *srv, + void *tx, u32 tx_len, + void *rx, u32 rx_len, + unsigned long *ack_len) +{ + int srv_id = srv->id; + struct rpxy_state *rs; + + if (RPMI_MM_SRV_VERSION == srv_id) { + *((uint32_t *)rx) = MM_VERSION_COMPILED; + } else if (RPMI_MM_SRV_COMMUNICATE == srv_id) { + if(!udomain) + udomain = sbi_domain_thishart_ptr(); + + if(tdomain) { + /* Get per-hart RPXY share memory with tdomain */ + rs = sbi_hartindex_to_domain_rs( + sbi_hartid_to_hartindex(current_hartid()), tdomain); + if (rs && rs->shmem_addr && tx && ((void *)rs->shmem_addr != tx)) { + sbi_memcpy((void *)rs->shmem_addr, tx, tx_len); + } + + sbi_domain_context_enter(tdomain); + } + } else if (RPMI_MM_SRV_COMPLETE == srv_id) { + if(!tdomain) + tdomain = sbi_domain_thishart_ptr(); + + if(udomain) { + /* Get per-hart RPXY share memory with udomain */ + rs = sbi_hartindex_to_domain_rs( + sbi_hartid_to_hartindex(current_hartid()), udomain); + if (rs && rs->shmem_addr && tx && ((void *)rs->shmem_addr != tx)) { + sbi_memcpy((void *)rs->shmem_addr, tx, tx_len); + } + } + + sbi_domain_context_exit(); + } + + return 0; +} + +static int rpxy_mm_init(void *fdt, int nodeoff, + const struct fdt_match *match) +{ + int rc; + struct sbi_rpxy_service_group *group; + const struct rpxy_mm_data *data = match->data; + group = sbi_zalloc(sizeof(*group)); + if (!group) + return SBI_ENOMEM; + + rc = mm_boot_info_setup(fdt, nodeoff, match); + if (rc) { + sbi_free(group); + return 0; + } + + /* Setup RPXY service group */ + group->transport_id = 0; + group->service_group_id = data->service_group_id; + group->max_message_data_len = -1; + group->num_services = data->num_services; + group->services = data->services; + group->send_message = rpxy_mm_handler; + /* Register RPXY service group */ + rc = sbi_rpxy_register_service_group(group); + if (rc) { + sbi_free(group); + return rc; + } + + return 0; +} + +static struct sbi_rpxy_service mm_services[] = { +{ + .id = RPMI_MM_SRV_VERSION, + .min_tx_len = 0, + .max_tx_len = 0, + .min_rx_len = sizeof(u32), + .max_rx_len = sizeof(u32), +}, +{ + .id = RPMI_MM_SRV_COMMUNICATE, + .min_tx_len = 0, + .max_tx_len = 0x8000, + .min_rx_len = 0, + .max_rx_len = 0x8000, +}, +{ + .id = RPMI_MM_SRV_COMPLETE, + .min_tx_len = 0, + .max_tx_len = 0x8000, + .min_rx_len = 0, + .max_rx_len = 0x8000, +}, +}; + +static struct rpxy_mm_data mm_data = { + .service_group_id = RPMI_SRVGRP_MM, + .num_services = array_size(mm_services), + .services = mm_services, +}; + +static const struct fdt_match rpxy_mm_match[] = { + { .compatible = "riscv,sbi-rpxy-mm", .data = &mm_data }, + {}, +}; + +struct fdt_rpxy fdt_rpxy_mm = { + .match_table = rpxy_mm_match, + .init = rpxy_mm_init, +}; diff --git a/lib/utils/rpxy/objects.mk b/lib/utils/rpxy/objects.mk new file mode 100644 index 00000000000..6905a14c9ae --- /dev/null +++ b/lib/utils/rpxy/objects.mk @@ -0,0 +1,17 @@ +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (c) 2023 Ventana Micro Systems Inc. +# +# Authors: +# Anup Patel +# + +libsbiutils-objs-$(CONFIG_FDT_RPXY) += rpxy/fdt_rpxy.o +libsbiutils-objs-$(CONFIG_FDT_RPXY) += rpxy/fdt_rpxy_drivers.o + +carray-fdt_rpxy_drivers-$(CONFIG_FDT_RPXY_MBOX) += fdt_rpxy_mbox +libsbiutils-objs-$(CONFIG_FDT_RPXY_MBOX) += rpxy/fdt_rpxy_mbox.o + +carray-fdt_rpxy_drivers-$(CONFIG_FDT_RPXY_MM) += fdt_rpxy_mm +libsbiutils-objs-$(CONFIG_FDT_RPXY_MM) += rpxy/fdt_rpxy_mm.o \ No newline at end of file diff --git a/lib/utils/suspend/Kconfig b/lib/utils/suspend/Kconfig new file mode 100644 index 00000000000..2cbea75c265 --- /dev/null +++ b/lib/utils/suspend/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-2-Clause + +menu "System Suspend Support" + +config FDT_SUSPEND + bool "FDT based suspend drivers" + depends on FDT + default n + +if FDT_SUSPEND + +config FDT_SUSPEND_RPMI + bool "FDT RPMI suspend driver" + depends on FDT_MAILBOX && RPMI_MAILBOX + default n + +endif + +endmenu diff --git a/lib/utils/suspend/fdt_suspend.c b/lib/utils/suspend/fdt_suspend.c new file mode 100644 index 00000000000..222b9b296c0 --- /dev/null +++ b/lib/utils/suspend/fdt_suspend.c @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Anup Patel + */ + +#include +#include +#include +#include + +/* List of FDT suspend drivers generated at compile time */ +extern struct fdt_suspend *fdt_suspend_drivers[]; +extern unsigned long fdt_suspend_drivers_size; + +int fdt_suspend_driver_init(void *fdt, struct fdt_suspend *drv) +{ + int noff, rc = SBI_ENODEV; + const struct fdt_match *match; + + noff = fdt_find_match(fdt, -1, drv->match_table, &match); + if (noff < 0) + return SBI_ENODEV; + + if (drv->init) { + rc = drv->init(fdt, noff, match); + if (rc && rc != SBI_ENODEV) { + sbi_printf("%s: %s init failed, %d\n", + __func__, match->compatible, rc); + } + } + + return rc; +} + +void fdt_suspend_init(void) +{ + int pos; + void *fdt = fdt_get_address(); + + for (pos = 0; pos < fdt_suspend_drivers_size; pos++) + fdt_suspend_driver_init(fdt, fdt_suspend_drivers[pos]); +} diff --git a/lib/utils/suspend/fdt_suspend_drivers.carray b/lib/utils/suspend/fdt_suspend_drivers.carray new file mode 100644 index 00000000000..8793e4cf3ac --- /dev/null +++ b/lib/utils/suspend/fdt_suspend_drivers.carray @@ -0,0 +1,3 @@ +HEADER: sbi_utils/suspend/fdt_suspend.h +TYPE: struct fdt_suspend +NAME: fdt_suspend_drivers diff --git a/lib/utils/suspend/fdt_suspend_rpmi.c b/lib/utils/suspend/fdt_suspend_rpmi.c new file mode 100644 index 00000000000..a9410e277b2 --- /dev/null +++ b/lib/utils/suspend/fdt_suspend_rpmi.c @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Ventana Micro Systems Inc. + * + * Authors: + * Subrahmanya Lingappa + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct rpmi_syssusp { + struct mbox_chan *chan; + bool cust_res_addr_supported; + bool suspend_supported; +}; + +static struct rpmi_syssusp syssusp_ctx; + +static int rpmi_syssusp_attrs(uint32_t *attrs) +{ + int rc; + struct rpmi_syssusp_get_attr_resp resp; + + rc = rpmi_normal_request_with_status( + syssusp_ctx.chan, RPMI_SYSSUSP_SRV_GET_SYSTEM_SUSPEND_ATTRIBUTES, + NULL, 0, 0, + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + *attrs = resp.flags; + + return 0; +} + +static int rpmi_syssusp(uint32_t suspend_type, ulong resume_addr) +{ + int rc; + struct rpmi_syssusp_suspend_req req; + struct rpmi_syssusp_suspend_resp resp; + + req.hartid = current_hartid(); + req.suspend_type = suspend_type; + req.resume_addr_lo = resume_addr; + req.resume_addr_hi = (u64)resume_addr >> 32; + + rc = rpmi_normal_request_with_status( + syssusp_ctx.chan, RPMI_SYSSUSP_SRV_SYSTEM_SUSPEND, + &req, rpmi_u32_count(req), rpmi_u32_count(req), + &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); + if (rc) + return rc; + + /* Wait for interrupt */ + wfi(); + + return 0; +} + +static int rpmi_system_suspend_check(u32 sleep_type) +{ + return ((sleep_type == SBI_SUSP_SLEEP_TYPE_SUSPEND) && + syssusp_ctx.suspend_supported); +} + +static int rpmi_system_suspend(u32 sleep_type, ulong resume_addr) +{ + int rc; + + if (sleep_type != SBI_SUSP_SLEEP_TYPE_SUSPEND) + return SBI_ENOTSUPP; + + rc = rpmi_syssusp(sleep_type, resume_addr); + if (rc) + return rc; + + return 0; +} + +static struct sbi_system_suspend_device rpmi_suspend_dev = { + .name = "rpmi-system-suspend", + .system_suspend_check = rpmi_system_suspend_check, + .system_suspend = rpmi_system_suspend, +}; + +static int rpmi_suspend_init(void *fdt, int nodeoff, + const struct fdt_match *match) +{ + int rc; + uint32_t attrs = 0; + + /* If channel already available then do nothing. */ + if (syssusp_ctx.chan) + return 0; + + /* + * If channel request failed then other end does not support + * suspend service group so do nothing. + */ + rc = fdt_mailbox_request_chan(fdt, nodeoff, 0, &syssusp_ctx.chan); + if (rc) + return 0; + + /* Get suspend attributes */ + rc = rpmi_syssusp_attrs(&attrs); + if (rc) + return rc; + + syssusp_ctx.suspend_supported = attrs & RPMI_SYSSUSP_FLAGS_SUPPORTED; + syssusp_ctx.cust_res_addr_supported = + attrs & RPMI_SYSSUSP_FLAGS_CUSTOM_RESUME_ADDR_SUPPORTED; + + sbi_system_suspend_set_device(&rpmi_suspend_dev); + + return 0; +} + +static const struct fdt_match rpmi_suspend_match[] = { + { .compatible = "riscv,rpmi-system-suspend" }, + {}, +}; + +struct fdt_suspend fdt_suspend_rpmi = { + .match_table = rpmi_suspend_match, + .init = rpmi_suspend_init, +}; diff --git a/lib/utils/suspend/objects.mk b/lib/utils/suspend/objects.mk new file mode 100644 index 00000000000..47d55f064d9 --- /dev/null +++ b/lib/utils/suspend/objects.mk @@ -0,0 +1,14 @@ +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (c) 2023 Ventana Micro Systems Inc. +# +# Authors: +# Anup Patel +# + +libsbiutils-objs-$(CONFIG_FDT_SUSPEND) += suspend/fdt_suspend.o +libsbiutils-objs-$(CONFIG_FDT_SUSPEND) += suspend/fdt_suspend_drivers.o + +carray-fdt_suspend_drivers-$(CONFIG_FDT_SUSPEND_RPMI) += fdt_suspend_rpmi +libsbiutils-objs-$(CONFIG_FDT_SUSPEND_RPMI) += suspend/fdt_suspend_rpmi.o diff --git a/platform/generic/allwinner/sun20i-d1.c b/platform/generic/allwinner/sun20i-d1.c index e9388dba76b..2a7f99b35fa 100644 --- a/platform/generic/allwinner/sun20i-d1.c +++ b/platform/generic/allwinner/sun20i-d1.c @@ -161,7 +161,7 @@ static void sun20i_d1_riscv_cfg_init(void) writel_relaxed(entry >> 32, SUN20I_D1_RISCV_CFG_BASE + RESET_ENTRY_HI_REG); } -static int sun20i_d1_hart_suspend(u32 suspend_type) +static int sun20i_d1_hart_suspend(u32 suspend_type, ulong mmode_resume_addr) { /* Use the generic code for retentive suspend. */ if (!(suspend_type & SBI_HSM_SUSP_NON_RET_BIT)) diff --git a/platform/generic/configs/defconfig b/platform/generic/configs/defconfig index 1ce6a1204cf..3f7005d1749 100644 --- a/platform/generic/configs/defconfig +++ b/platform/generic/configs/defconfig @@ -6,10 +6,14 @@ CONFIG_PLATFORM_SIFIVE_FU740=y CONFIG_PLATFORM_SOPHGO_SG2042=y CONFIG_PLATFORM_STARFIVE_JH7110=y CONFIG_PLATFORM_THEAD=y +CONFIG_FDT_CPPC=y +CONFIG_FDT_CPPC_RPMI=y CONFIG_FDT_GPIO=y CONFIG_FDT_GPIO_DESIGNWARE=y CONFIG_FDT_GPIO_SIFIVE=y CONFIG_FDT_GPIO_STARFIVE=y +CONFIG_FDT_HSM=y +CONFIG_FDT_HSM_RPMI=y CONFIG_FDT_I2C=y CONFIG_FDT_I2C_SIFIVE=y CONFIG_FDT_I2C_DW=y @@ -20,14 +24,21 @@ CONFIG_FDT_IRQCHIP=y CONFIG_FDT_IRQCHIP_APLIC=y CONFIG_FDT_IRQCHIP_IMSIC=y CONFIG_FDT_IRQCHIP_PLIC=y +CONFIG_FDT_MAILBOX=y +CONFIG_RPMI_MAILBOX=y +CONFIG_FDT_MAILBOX_RPMI_SHMEM=y CONFIG_FDT_REGMAP=y CONFIG_FDT_REGMAP_SYSCON=y CONFIG_FDT_RESET=y CONFIG_FDT_RESET_ATCWDT200=y CONFIG_FDT_RESET_GPIO=y CONFIG_FDT_RESET_HTIF=y +CONFIG_FDT_RESET_RPMI=y CONFIG_FDT_RESET_SUNXI_WDT=y CONFIG_FDT_RESET_SYSCON=y +CONFIG_FDT_RPXY=y +CONFIG_FDT_RPXY_MBOX=y +CONFIG_FDT_RPXY_MM=y CONFIG_FDT_SERIAL=y CONFIG_FDT_SERIAL_CADENCE=y CONFIG_FDT_SERIAL_GAISLER=y @@ -39,6 +50,8 @@ CONFIG_FDT_SERIAL_LITEX=y CONFIG_FDT_SERIAL_UART8250=y CONFIG_FDT_SERIAL_XILINX_UARTLITE=y CONFIG_SERIAL_SEMIHOSTING=y +CONFIG_FDT_SUSPEND=y +CONFIG_FDT_SUSPEND_RPMI=y CONFIG_FDT_TIMER=y CONFIG_FDT_TIMER_MTIMER=y CONFIG_FDT_TIMER_PLMT=y diff --git a/platform/generic/platform.c b/platform/generic/platform.c index 1f46b76c4d7..07c7a5782ab 100644 --- a/platform/generic/platform.c +++ b/platform/generic/platform.c @@ -17,16 +17,21 @@ #include #include #include +#include +#include #include #include #include #include +#include #include #include #include +#include #include #include #include +#include #include /* List of platform override modules generated at compile time */ @@ -221,8 +226,14 @@ static int generic_nascent_init(void) static int generic_early_init(bool cold_boot) { - if (cold_boot) + if (cold_boot) { fdt_reset_init(); + fdt_suspend_init(); + } + + fdt_hsm_init(cold_boot); + fdt_cppc_init(cold_boot); + fdt_ras_init(cold_boot); if (!generic_plat || !generic_plat->early_init) return 0; @@ -249,6 +260,7 @@ static int generic_final_init(bool cold_boot) fdt_cpu_fixup(fdt); fdt_fixups(fdt); fdt_domain_fixup(fdt); + fdt_hsm_fixup(fdt); if (generic_plat && generic_plat->fdt_fixup) { rc = generic_plat->fdt_fixup(fdt, generic_plat_match); @@ -275,6 +287,10 @@ static int generic_vendor_ext_provider(long funcid, static void generic_early_exit(void) { + fdt_cppc_exit(); + fdt_hsm_exit(); + fdt_ras_exit(); + if (generic_plat && generic_plat->early_exit) generic_plat->early_exit(generic_plat_match); } @@ -406,6 +422,7 @@ const struct sbi_platform_operations platform_ops = { .get_tlb_num_entries = generic_tlb_num_entries, .timer_init = fdt_timer_init, .timer_exit = fdt_timer_exit, + .rpxy_init = fdt_rpxy_init, .vendor_ext_check = generic_vendor_ext_check, .vendor_ext_provider = generic_vendor_ext_provider, };