From e0b20c9673cd4d529bef5f6d45330bb9f3f588ee Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 31 May 2024 15:17:46 -0700 Subject: [PATCH 01/19] avoid reloc-type collisions on elf.h constants Do we need more robust architecture protection (Issue #1356) The elf.h reloc-type constants are not unique across archs #define R_PPC64_REL24 10 /* PC relative 26 bit */ #define R_X86_64_32 10 /* Direct 32 bit zero extended */ so to avoid any unexpected aliasing, guard all R_arch_type refs with a check on kelf->arch, or a global default arch set from the first elf encountered. --- kpatch-build/create-diff-object.c | 77 ++++++++++++++++++----------- kpatch-build/create-kpatch-module.c | 3 ++ kpatch-build/kpatch-elf.c | 24 ++++++++- kpatch-build/kpatch-elf.h | 3 ++ 4 files changed, 77 insertions(+), 30 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 25710e921..1097e2daf 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -215,6 +215,8 @@ static bool is_gcc6_localentry_bundled_sym(struct kpatch_elf *kelf, */ static struct rela *toc_rela(const struct rela *rela) { + if (!is_arch(PPC64)) + return (struct rela *)rela; if (rela->type != R_PPC64_TOC16_HA && rela->type != R_PPC64_TOC16_LO_DS) return (struct rela *)rela; @@ -1618,7 +1620,7 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) if (is_text_section(relasec->base) && !is_text_section(sym->sec) && - rela->type == R_X86_64_32S && + is_arch(X86_64) && rela->type == R_X86_64_32S && rela->addend == (long)sym->sec->sh.sh_size && end == (long)sym->sec->sh.sh_size) { @@ -3236,12 +3238,23 @@ static int function_ptr_rela(const struct rela *rela) { const struct rela *rela_toc = toc_rela(rela); + switch (def_arch()) { + case PPC64: + if (rela->type != R_PPC64_TOC16_HA && + rela->type != R_PPC64_TOC16_LO_DS) + return false; + break; + case X86_64: + if (rela->type != R_X86_64_32S) + return false; + break; + default: + break; + } + return (rela_toc && rela_toc->sym->type == STT_FUNC && !rela_toc->sym->parent && - rela_toc->addend == (int)rela_toc->sym->sym.st_value && - (rela->type == R_X86_64_32S || - rela->type == R_PPC64_TOC16_HA || - rela->type == R_PPC64_TOC16_LO_DS)); + rela_toc->addend == (int)rela_toc->sym->sym.st_value); } static bool need_klp_reloc(struct kpatch_elf *kelf, struct lookup_table *table, @@ -3256,32 +3269,38 @@ static bool need_klp_reloc(struct kpatch_elf *kelf, struct lookup_table *table, * These references are treated specially by the module loader and * should never be converted to klp relocations. */ - if (rela->type == R_PPC64_REL16_HA || rela->type == R_PPC64_REL16_LO || - rela->type == R_PPC64_ENTRY) - return false; + switch (kelf->arch) { + case PPC64: + if (rela->type == R_PPC64_REL16_HA || rela->type == R_PPC64_REL16_LO || + rela->type == R_PPC64_ENTRY) + return false; - /* v5.13+ kernels use relative jump labels */ - if (rela->type == R_PPC64_REL64 && strcmp(relasec->name, ".rela__jump_table")) - return false; + /* v5.13+ kernels use relative jump labels */ + if (rela->type == R_PPC64_REL64 && strcmp(relasec->name, ".rela__jump_table")) + return false; - /* - * On powerpc, the function prologue generated by GCC 6 has the - * sequence: - * - * .globl my_func - * .type my_func, @function - * .quad .TOC.-my_func - * my_func: - * .reloc ., R_PPC64_ENTRY ; optional - * ld r2,-8(r12) - * add r2,r2,r12 - * .localentry my_func, .-my_func - * - * The R_PPC64_ENTRY is optional and its symbol might have an empty - * name. Leave it as a normal rela. - */ - if (rela->type == R_PPC64_ENTRY) - return false; + /* + * On powerpc, the function prologue generated by GCC 6 has the + * sequence: + * + * .globl my_func + * .type my_func, @function + * .quad .TOC.-my_func + * my_func: + * .reloc ., R_PPC64_ENTRY ; optional + * ld r2,-8(r12) + * add r2,r2,r12 + * .localentry my_func, .-my_func + * + * The R_PPC64_ENTRY is optional and its symbol might have an empty + * name. Leave it as a normal rela. + */ + if (rela->type == R_PPC64_ENTRY) + return false; + break; + default: + break; + } /* * Allow references to core module symbols to remain as normal diff --git a/kpatch-build/create-kpatch-module.c b/kpatch-build/create-kpatch-module.c index 2884f93d9..b758bd4d9 100644 --- a/kpatch-build/create-kpatch-module.c +++ b/kpatch-build/create-kpatch-module.c @@ -58,6 +58,9 @@ static void create_dynamic_rela_sections(struct kpatch_elf *kelf, struct section dynsec = create_section_pair(kelf, ".kpatch.dynrelas", sizeof(*dynrelas), nr); dynrelas = dynsec->data->d_buf; + if (kelf->arch != X86_64) + return; + for (index = 0; index < nr; index++) { offset = index * (unsigned int)sizeof(*krelas); diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 374d424cc..885ab913a 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -38,6 +38,26 @@ * Helper functions ******************/ +static enum architecture current_arch; + +enum architecture def_arch(void) +{ + return current_arch; +} + +bool is_arch(enum architecture arch) +{ + return current_arch == arch; +} + +void set_arch(enum architecture arch) +{ + if (!arch || (current_arch && arch != current_arch)) + ERROR("inconsistent ELF arch: setting %d but already %d", + arch, current_arch); + current_arch = arch; +} + char *status_str(enum status status) { switch(status) { @@ -594,8 +614,10 @@ struct kpatch_elf *kpatch_elf_open(const char *name) kelf->arch = S390; break; default: - ERROR("Unsupported target architecture"); + ERROR("Unsupported target architecture: e_machine %x", + ehdr.e_machine); } + set_arch(kelf->arch); kpatch_create_section_list(kelf); kpatch_create_symbol_list(kelf); diff --git a/kpatch-build/kpatch-elf.h b/kpatch-build/kpatch-elf.h index e32209b72..d47b6c7e0 100644 --- a/kpatch-build/kpatch-elf.h +++ b/kpatch-build/kpatch-elf.h @@ -157,6 +157,9 @@ int offset_of_string(struct list_head *list, char *name); long rela_target_offset(struct kpatch_elf *kelf, struct section *relasec, struct rela *rela); unsigned int insn_length(struct kpatch_elf *kelf, void *addr); +enum architecture def_arch(void); +void set_arch(enum architecture); +bool is_arch(enum architecture); #ifndef R_PPC64_ENTRY #define R_PPC64_ENTRY 118 From fcae79f54bcc6d4a7f190f1c60ff6acff964d82f Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 6 Oct 2021 12:41:16 -0700 Subject: [PATCH 02/19] kpatch-build: Add sym->has_func_profiling support for aarch64 The "has_function_profiling" support field in the symbol struct is used to show that a function symbol is able to be patched. This is necessary to check that functions which need to be patched are able to be. On arm64 this means the presence of 2 NOP instructions at function entry which are patched by ftrace to call the ftrace handling code. These 2 NOPs are inserted by the compiler and the location of them is recorded in a section called "__patchable_function_entries". Check whether a symbol has a corresponding entry in the "__patchable_function_entries" section and if so mark it as "has_func_profiling". Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Make error message standard across architectures when no patchable entry - Don't store __patchable_function_entries section in kpatch_find_func_profiling_calls(), instead find it each time --- kpatch-build/create-diff-object.c | 20 +++++++++++++++++++- kpatch-build/kpatch-elf.c | 3 +++ kpatch-build/kpatch-elf.h | 1 + 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 1097e2daf..03794da82 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1696,7 +1696,7 @@ static void kpatch_check_func_profiling_calls(struct kpatch_elf *kelf) (sym->parent && sym->parent->status == CHANGED)) continue; if (!sym->twin->has_func_profiling) { - log_normal("function %s has no fentry/mcount call, unable to patch\n", + log_normal("function %s doesn't have patchable function entry, unable to patch\n", sym->name); errs++; } @@ -3976,6 +3976,24 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) continue; switch(kelf->arch) { + case AARCH64: { + struct section *sec = find_section_by_name(&kelf->sections, + "__patchable_function_entries"); + /* + * If we can't find the __patchable_function_entries section or + * there are no relocations in it then not patchable. + */ + if (!sec || !sec->rela) + return; + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->sym->sec && sym->sec == rela->sym->sec) { + sym->has_func_profiling = 1; + break; + } + } + + break; + } case PPC64: list_for_each_entry(rela, &sym->sec->rela->relas, list) { if (!strcmp(rela->sym->name, "_mcount")) { diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 885ab913a..c9a0188fc 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -604,6 +604,9 @@ struct kpatch_elf *kpatch_elf_open(const char *name) if (!gelf_getehdr(kelf->elf, &ehdr)) ERROR("gelf_getehdr"); switch (ehdr.e_machine) { + case EM_AARCH64: + kelf->arch = AARCH64; + break; case EM_PPC64: kelf->arch = PPC64; break; diff --git a/kpatch-build/kpatch-elf.h b/kpatch-build/kpatch-elf.h index d47b6c7e0..7f787e526 100644 --- a/kpatch-build/kpatch-elf.h +++ b/kpatch-build/kpatch-elf.h @@ -115,6 +115,7 @@ enum architecture { PPC64 = 0x1 << 0, X86_64 = 0x1 << 1, S390 = 0x1 << 2, + AARCH64 = 0x1 << 3, }; struct kpatch_elf { From cba63fa9fdc62f6a49d986a3cdb8f568c0568ff2 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 28 Jul 2023 16:33:23 -0700 Subject: [PATCH 03/19] create-diff-object: Split kpatch_create_mcount_sections into alloc and populate The function kpatch_create_mcount_sections() allocates the __mcount_loc section and then populates it with functions which have a patchable entry. The following patch will add aarch64 support to this function where the allocation will have to be done before the kelf_patched is torn down. Thus split this function so that the allocation can be performed earlier and the populating as before. No intended functional change. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Add patch to series --- kpatch-build/create-diff-object.c | 31 ++++++++++++++++++++++--------- test/unit/objs | 2 +- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 03794da82..0d0efc046 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3695,6 +3695,21 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } +static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) +{ + int nr; + struct symbol *sym; + + nr = 0; + list_for_each_entry(sym, &kelfout->symbols, list) + if (sym->type == STT_FUNC && sym->status != SAME && + sym->has_func_profiling) + nr++; + + /* create text/rela section pair */ + create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); +} + /* * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. @@ -3702,7 +3717,7 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * * TODO: Eventually we can modify recordmount so that it recognizes our bundled * sections as valid and does this work for us. */ -static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) +static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) { int nr, index; struct section *sec, *relasec; @@ -3711,15 +3726,10 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) void **funcs; unsigned long insn_offset = 0; - nr = 0; - list_for_each_entry(sym, &kelf->symbols, list) - if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) - nr++; - /* create text/rela section pair */ - sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr); + sec = find_section_by_name(&kelf->sections, "__mcount_loc"); relasec = sec->rela; + nr = (int) (sec->data->d_size / sizeof(void *)); /* populate sections */ index = 0; @@ -4165,6 +4175,9 @@ int main(int argc, char *argv[]) /* this is destructive to kelf_patched */ kpatch_migrate_included_elements(kelf_patched, &kelf_out); + /* this must be done before kelf_patched is torn down */ + kpatch_alloc_mcount_sections(kelf_patched, kelf_out); + /* * Teardown kelf_patched since we shouldn't access sections or symbols * through it anymore. Don't free however, since our section and symbol @@ -4183,7 +4196,7 @@ int main(int argc, char *argv[]) kpatch_create_callbacks_objname_rela(kelf_out, parent_name); kpatch_build_strings_section_data(kelf_out); - kpatch_create_mcount_sections(kelf_out); + kpatch_populate_mcount_sections(kelf_out); /* * At this point, the set of output sections and symbols is diff --git a/test/unit/objs b/test/unit/objs index a51c80a60..31f16a29c 160000 --- a/test/unit/objs +++ b/test/unit/objs @@ -1 +1 @@ -Subproject commit a51c80a60fc8ade7e7ec8ad875b2963f3a15a494 +Subproject commit 31f16a29c6c3dc9ac101d8ca780723a6667c219e From d4b00e16aaa144644320ebbd94c96034ea37c047 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 9 Jan 2023 02:15:58 -0800 Subject: [PATCH 04/19] create-diff-object: Create __patchable_function_entries section for aarch64 The __mcount_loc section contains the addresses of patchable ftrace sites which is used by the ftrace infrastructure in the kernel to create a list of tracable functions and to know where to patch to enable tracing of them. On aarch64 this section is called __patchable_function_entries and is generated by the compiler. Either of __mcount_loc or __patchable_function_entries is recognised by the kernel but for aarch64 use __patchable_function_entries as it is what is expected. Add aarch64 support to kpatch_alloc_mcount_sections(). The SHF_LINK_ORDER section flag must be copied to ensure that it matches to avoid the following: ld: __patchable_function_entries has both ordered [...] and unordered [...] sections Add aarch64 support to kpatch_populate_mcount_sections(). Check for the 2 required NOP instructions on function entry, which may be preceded by a BTI C instruction depending on whether the function is a leaf function. This determines the offset of the patch site. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Don't preserve the __patchable_function_entries section from the patched elf as this is already verified by kpatch_check_func_profiling_calls() - Instead get the patch entry offset by checking for a preceding BTI C instr - Copy the section flags for __patchable_function_entries --- rebased, added sh_link fix from Suraj's later commit "kpatch-build: Enable ARM64 support" Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 75 +++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 0d0efc046..63e4254f1 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3695,6 +3695,11 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } +/* + * Allocate the mcount/patchable_function_entry sections which must be done + * before the patched object is torn down so that the section flags can be + * copied. + */ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) { int nr; @@ -3707,10 +3712,36 @@ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_ nr++; /* create text/rela section pair */ - create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); + switch(kelf->arch) { + case AARCH64: { + struct section *sec, *tmp; + + sec = create_section_pair(kelfout, "__patchable_function_entries", sizeof(void *), nr); + + /* + * Depending on the compiler the __patchable_function_entries section + * can be ordered or not, copy this flag to the section we created to + * avoid: + * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections + */ + tmp = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + sec->sh.sh_flags |= (tmp->sh.sh_flags & SHF_LINK_ORDER); + sec->sh.sh_link = 1; + break; + } + case PPC64: + case X86_64: + case S390: + create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); + break; + default: + ERROR("unsupported arch\n"); + } } /* + * Populate the mcount sections allocated by kpatch_alloc_mcount_sections() + * previously. * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. * @@ -3726,8 +3757,18 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) void **funcs; unsigned long insn_offset = 0; - - sec = find_section_by_name(&kelf->sections, "__mcount_loc"); + switch(kelf->arch) { + case AARCH64: + sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + break; + case PPC64: + case X86_64: + case S390: + sec = find_section_by_name(&kelf->sections, "__mcount_loc"); + break; + default: + ERROR("unsupported arch\n"); + } relasec = sec->rela; nr = (int) (sec->data->d_size / sizeof(void *)); @@ -3744,6 +3785,34 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) } switch(kelf->arch) { + case AARCH64: { + unsigned char *insn; + int i; + + insn = sym->sec->data->d_buf; + insn_offset = 0; + + /* + * If BTI (Branch Target Identification) is enabled then there + * might be an additional 'BTI C' instruction before the two + * patchable function entry 'NOP's. + * i.e. 0xd503245f (little endian) + */ + if (insn[0] == 0x5f) { + if (insn[1] != 0x24 || insn[2] != 0x03 || insn[3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function\n", sym->name); + insn_offset += 4; + insn += 4; + } + for (i = 0; i < 8; i += 4) { + /* We expect a NOP i.e. 0xd503201f (little endian) */ + if (insn[i] != 0x1f || insn[i + 1] != 0x20 || + insn[i + 2] != 0x03 || insn [i + 3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function\n", sym->name); + } + + break; + } case PPC64: { bool found = false; From c9e1ae71688f63c8586c41c28634553783d606df Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 31 May 2024 15:24:07 -0700 Subject: [PATCH 05/19] kpatch-build: Enable ARM64 support Add the final support required for aarch64 and enable building on that arch. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Add # shellcheck disable=SC2086 - Add comment to kpatch_is_mapping_symbol() --- README.md | 2 +- kpatch-build/Makefile | 2 +- kpatch-build/create-diff-object.c | 74 +++++++++++++++++++++++++------ kpatch-build/kpatch-build | 3 ++ kpatch-build/kpatch-elf.c | 3 ++ 5 files changed, 69 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 75995fe8c..78fd14bc4 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ Supported Architectures - [x] x86-64 - [x] ppc64le -- [ ] arm64 +- [x] arm64 - [x] s390 [upstream prerequisites](doc/s390-upstream-prerequisites.md) Installation diff --git a/kpatch-build/Makefile b/kpatch-build/Makefile index bebf3cd96..7fb223138 100644 --- a/kpatch-build/Makefile +++ b/kpatch-build/Makefile @@ -22,7 +22,7 @@ PLUGIN_CFLAGS := $(filter-out -Wconversion, $(CFLAGS)) PLUGIN_CFLAGS += -shared -I$(GCC_PLUGINS_DIR)/include \ -Igcc-plugins -fPIC -fno-rtti -O2 -Wall endif -ifeq ($(filter $(ARCH),s390x x86_64 ppc64le),) +ifeq ($(filter $(ARCH),aarch64 s390x x86_64 ppc64le),) $(error Unsupported architecture ${ARCH}, check https://github.com/dynup/kpatch/#supported-architectures) endif diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 63e4254f1..9290440bd 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -173,6 +173,8 @@ static bool is_gcc6_localentry_bundled_sym(struct kpatch_elf *kelf, struct symbol *sym) { switch(kelf->arch) { + case AARCH64: + return false; case PPC64: return ((PPC64_LOCAL_ENTRY_OFFSET(sym->sym.st_other) != 0) && sym->sym.st_value == 8); @@ -230,6 +232,24 @@ static struct rela *toc_rela(const struct rela *rela) (unsigned int)rela->addend); } +/* + * Mapping symbols are used to mark and label the transitions between code and + * data in elf files. They begin with a "$" dollar symbol. Don't correlate them + * as they often all have the same name either "$x" to mark the start of code + * or "$d" to mark the start of data. + */ +static bool kpatch_is_mapping_symbol(struct kpatch_elf *kelf, struct symbol *sym) +{ + if (kelf->arch != AARCH64) + return false; + + if (sym->name && sym->name[0] == '$' && + sym->type == STT_NOTYPE && + sym->bind == STB_LOCAL) + return true; + return false; +} + /* * When compiling with -ffunction-sections and -fdata-sections, almost every * symbol gets its own dedicated section. We call such symbols "bundled" @@ -624,6 +644,13 @@ static void kpatch_compare_correlated_section(struct section *sec) goto out; } + /* As above but for __p_f_e users like aarch64 */ + if (!strcmp(sec->name, ".rela__patchable_function_entries") || + !strcmp(sec->name, "__patchable_function_entries")) { + sec->status = SAME; + goto out; + } + if (sec1->sh.sh_size != sec2->sh.sh_size || sec1->data->d_size != sec2->data->d_size || (sec1->rela && !sec2->rela) || @@ -735,7 +762,7 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr) * 51b: e8 00 00 00 00 callq 520 * 51c: R_X86_64_PC32 ___might_sleep-0x4 */ -static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, +static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, struct section *sec) { unsigned long offset, insn1_len, insn2_len; @@ -834,6 +861,23 @@ static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, return true; } +static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, + struct section *sec) +{ + switch(kelf->arch) { + case AARCH64: + /* TODO */ + return false; + case PPC64: + case S390: + case X86_64: + return _kpatch_line_macro_change_only(kelf, sec); + default: + ERROR("unsupported arch"); + } + return false; +} + /* * Child functions with "*.cold" names don't have _fentry_ calls, but "*.part", * often do. In the later case, it is not necessary to include the parent @@ -1071,15 +1115,15 @@ static void kpatch_correlate_sections(struct list_head *seclist_orig, } } -static void kpatch_correlate_symbols(struct list_head *symlist_orig, - struct list_head *symlist_patched) +static void kpatch_correlate_symbols(struct kpatch_elf *kelf_orig, + struct kpatch_elf *kelf_patched) { struct symbol *sym_orig, *sym_patched; - list_for_each_entry(sym_orig, symlist_orig, list) { + list_for_each_entry(sym_orig, &kelf_orig->symbols, list) { if (sym_orig->twin) continue; - list_for_each_entry(sym_patched, symlist_patched, list) { + list_for_each_entry(sym_patched, &kelf_patched->symbols, list) { if (kpatch_mangled_strcmp(sym_orig->name, sym_patched->name) || sym_orig->type != sym_patched->type || sym_patched->twin) continue; @@ -1099,6 +1143,9 @@ static void kpatch_correlate_symbols(struct list_head *symlist_orig, !strncmp(sym_orig->name, ".LC", 3)) continue; + if (kpatch_is_mapping_symbol(kelf_orig, sym_orig)) + continue; + /* group section symbols must have correlated sections */ if (sym_orig->sec && sym_orig->sec->sh.sh_type == SHT_GROUP && @@ -1504,7 +1551,7 @@ static void kpatch_correlate_elfs(struct kpatch_elf *kelf_orig, struct kpatch_elf *kelf_patched) { kpatch_correlate_sections(&kelf_orig->sections, &kelf_patched->sections); - kpatch_correlate_symbols(&kelf_orig->symbols, &kelf_patched->symbols); + kpatch_correlate_symbols(kelf_orig, kelf_patched); } static void kpatch_compare_correlated_elements(struct kpatch_elf *kelf) @@ -1620,7 +1667,8 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) if (is_text_section(relasec->base) && !is_text_section(sym->sec) && - is_arch(X86_64) && rela->type == R_X86_64_32S && + ((is_arch(X86_64) && rela->type == R_X86_64_32S) || + (is_arch(AARCH64) && rela->type == R_AARCH64_ABS64)) && rela->addend == (long)sym->sec->sh.sh_size && end == (long)sym->sec->sh.sh_size) { @@ -2417,28 +2465,28 @@ static bool static_call_sites_group_filter(struct lookup_table *lookup, static struct special_section special_sections[] = { { .name = "__bug_table", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = bug_table_group_size, }, { .name = ".fixup", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = fixup_group_size, }, { .name = "__ex_table", /* must come after .fixup */ - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = ex_table_group_size, }, { .name = "__jump_table", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = jump_table_group_size, .group_filter = jump_table_group_filter, }, { .name = ".printk_index", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = printk_index_group_size, }, { @@ -2453,7 +2501,7 @@ static struct special_section special_sections[] = { }, { .name = ".altinstructions", - .arch = X86_64 | S390, + .arch = AARCH64 | X86_64 | S390, .group_size = altinstructions_group_size, }, { diff --git a/kpatch-build/kpatch-build b/kpatch-build/kpatch-build index 673f804df..b302d62e6 100755 --- a/kpatch-build/kpatch-build +++ b/kpatch-build/kpatch-build @@ -381,6 +381,9 @@ find_special_section_data() { # Arch-specific features case "$ARCH" in + "aarch64") + check[a]=true # alt_instr + ;; "x86_64") check[a]=true # alt_instr kernel_version_gte 5.10.0 && check[s]=true # static_call_site diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index c9a0188fc..751b76e76 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -156,6 +156,8 @@ struct rela *find_rela_by_offset(struct section *relasec, unsigned int offset) unsigned int absolute_rela_type(struct kpatch_elf *kelf) { switch(kelf->arch) { + case AARCH64: + return R_AARCH64_ABS64; case PPC64: return R_PPC64_ADDR64; case X86_64: @@ -225,6 +227,7 @@ long rela_target_offset(struct kpatch_elf *kelf, struct section *relasec, struct section *sec = relasec->base; switch(kelf->arch) { + case AARCH64: case PPC64: add_off = 0; break; From 17cada33ad86b92860e7efa9d4ea873ce7d85c13 Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Mon, 14 Feb 2022 21:37:50 -0500 Subject: [PATCH 06/19] create-diff-object: add aarch64 ASSERT_RTNL macro detection On aarch64, only the ASSERT_RTNL macro is affected by source line number changes (WARN, BUG, etc. no longer embed line numbers in the instruction stream.) A small test function that invokes the macro for a line change from 42 to 43: 0000000000000000 : 0: d503245f bti c 4: d503201f nop 8: d503201f nop c: d503233f paciasp 10: a9bf7bfd stp x29, x30, [sp, #-16]! 14: 910003fd mov x29, sp 18: 94000000 bl 0 18: R_AARCH64_CALL26 rtnl_is_locked 1c: 34000080 cbz w0, 2c 20: a8c17bfd ldp x29, x30, [sp], #16 24: d50323bf autiasp 28: d65f03c0 ret 2c: 90000000 adrp x0, 0 2c: R_AARCH64_ADR_PREL_PG_HI21 .data.once 30: 39400001 ldrb w1, [x0] 30: R_AARCH64_LDST8_ABS_LO12_NC .data.once 34: 35ffff61 cbnz w1, 20 38: 52800022 mov w2, #0x1 // #1 3c: 90000001 adrp x1, 0 3c: R_AARCH64_ADR_PREL_PG_HI21 .rodata.str1.8+0x8 40: 39000002 strb w2, [x0] 40: R_AARCH64_LDST8_ABS_LO12_NC .data.once 44: 91000021 add x1, x1, #0x0 44: R_AARCH64_ADD_ABS_LO12_NC .rodata.str1.8+0x8 - 48: 52800542 mov w2, #0x2a // #42 + 48: 52800562 mov w2, #0x2b // #43 4c: 90000000 adrp x0, 0 4c: R_AARCH64_ADR_PREL_PG_HI21 .rodata.str1.8+0x20 50: 91000000 add x0, x0, #0x0 50: R_AARCH64_ADD_ABS_LO12_NC .rodata.str1.8+0x20 54: 94000000 bl 0 <__warn_printk> 54: R_AARCH64_CALL26 __warn_printk 58: d4210000 brk #0x800 5c: 17fffff1 b 20 Create an implementation of kpatch_line_macro_change_only() for aarch64 modeled after the other architectures. Only look for relocations to __warn_printk that ASSERT_RTNL invokes. Based-on-s390x-code-by: C. Erastus Toe Signed-off-by: Joe Lawrence --- kpatch-build/create-diff-object.c | 65 ++++++++++++++++++++++++++++++- kpatch-build/kpatch-elf.c | 2 + 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 9290440bd..6f94ee87a 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -861,13 +861,74 @@ static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, return true; } +static bool _kpatch_line_macro_change_only_aarch64(struct kpatch_elf *kelf, + struct section *sec) +{ + unsigned char *start1, *start2; + unsigned long size, offset, insn_len; + struct rela *rela; + int lineonly = 0, found; + + insn_len = insn_length(kelf, NULL); + + if (sec->status != CHANGED || + is_rela_section(sec) || + !is_text_section(sec) || + sec->sh.sh_size != sec->twin->sh.sh_size || + !sec->rela || + sec->rela->status != SAME) + return false; + + start1 = sec->twin->data->d_buf; + start2 = sec->data->d_buf; + size = sec->sh.sh_size; + for (offset = 0; offset < size; offset += insn_len) { + if (!memcmp(start1 + offset, start2 + offset, insn_len)) + continue; + + /* Verify mov w2 */ + if (((start1[offset] & 0b11111) != 0x2) || (start1[offset+3] != 0x52) || + ((start1[offset] & 0b11111) != 0x2) || (start2[offset+3] != 0x52)) + return false; + + /* + * Verify zero or more string relas followed by a + * warn_slowpath_* or another similar rela. + */ + found = 0; + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->offset < offset + insn_len) + continue; + if (rela->string) + continue; + if (!strncmp(rela->sym->name, "__warned.", 9) || + !strncmp(rela->sym->name, "__already_done.", 15)) + continue; + if (!strcmp(rela->sym->name, "__warn_printk")) { + found = 1; + break; + } + return false; + } + if (!found) + return false; + + lineonly = 1; + } + + if (!lineonly) + ERROR("no instruction changes detected for changed section %s", + sec->name); + + return true; +} + static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, struct section *sec) { switch(kelf->arch) { case AARCH64: - /* TODO */ - return false; + return _kpatch_line_macro_change_only_aarch64(kelf, sec); case PPC64: case S390: case X86_64: diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 751b76e76..d03731283 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -277,6 +277,8 @@ unsigned int insn_length(struct kpatch_elf *kelf, void *addr) char *insn = addr; switch(kelf->arch) { + case AARCH64: + return 4; case X86_64: insn_init(&decoded_insn, addr, 1); From 906903084f49d168a4326b77c1dd64455f3dbd7f Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 4 Oct 2022 22:39:58 -0700 Subject: [PATCH 07/19] testing: add aarch unit tests Update the kpatch-unit-test-objs submodule reference to add aarch64 unit tests. Signed-off-by: Joe Lawrence --- .gitmodules | 3 ++- test/unit/Makefile | 2 +- test/unit/objs | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index 49b10248a..c0e5d93a6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,4 @@ [submodule "test/unit/objs"] path = test/unit/objs - url = https://github.com/dynup/kpatch-unit-test-objs.git + url = https://github.com/mihails-strasuns/kpatch-unit-test-objs.git + branch = arm64 diff --git a/test/unit/Makefile b/test/unit/Makefile index fde1717dd..e3ed7d718 100644 --- a/test/unit/Makefile +++ b/test/unit/Makefile @@ -1,4 +1,4 @@ -ARCHES ?= ppc64le x86_64 +ARCHES ?= aarch64 ppc64le x86_64 .PHONY: all clean submodule-check diff --git a/test/unit/objs b/test/unit/objs index 31f16a29c..f7b9c85b1 160000 --- a/test/unit/objs +++ b/test/unit/objs @@ -1 +1 @@ -Subproject commit 31f16a29c6c3dc9ac101d8ca780723a6667c219e +Subproject commit f7b9c85b11fd6cd087829b5754e590b8ef6f17a5 From 33e15b8416f2ba3f6d9f266aae83995f736403c1 Mon Sep 17 00:00:00 2001 From: Misono Tomohiro Date: Wed, 7 Sep 2022 10:38:01 +0900 Subject: [PATCH 08/19] create-diff-object: Fix mapping symbol handling on aarch64 It seems mapping symbols in aarch64 elf has section size of 0. So, exclude it in section symbol replacing code just like kpatch_correlate_symbols(). This fixes the data-read-mostly unit test on aarch64. Signed-off-by: Misono Tomohiro --- kpatch-build/create-diff-object.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 6f94ee87a..5c11de007 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1766,6 +1766,9 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) */ } else if (target_off == start && target_off == end) { + if(kpatch_is_mapping_symbol(kelf, sym)) + continue; + /* * Allow replacement for references to * empty symbols. From ab3c592682c26ef7f06ded673c4c7d64d414ed03 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 28 Jul 2023 16:34:10 -0700 Subject: [PATCH 09/19] kpatch-syscall.h: add aarch64 helper Copy from kernel source tree. Signed-off-by: Misono Tomohiro --- kmod/patch/kpatch-syscall.h | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/kmod/patch/kpatch-syscall.h b/kmod/patch/kpatch-syscall.h index ec8d24d53..84b90157c 100644 --- a/kmod/patch/kpatch-syscall.h +++ b/kmod/patch/kpatch-syscall.h @@ -186,7 +186,34 @@ # endif /* LINUX_VERSION_CODE */ -#endif /* CONFIG_X86_64 */ +#elif defined(CONFIG_ARM64) + +/* arm64/include/asm/syscall_wrapper.h versions */ + +#define SC_ARM64_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ + ,,regs->regs[3],,regs->regs[4],,regs->regs[5]) + +#define __KPATCH_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __kpatch_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#endif /* which arch */ #ifndef __KPATCH_SYSCALL_DEFINEx From 9146fe387e049a251e9fde7296e1ef71cd6ba781 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 9 Jan 2023 02:16:57 -0800 Subject: [PATCH 10/19] arm64: per-func __patchable_function_entries sections new clang toolchain on arm64 produces individual __patchable_function_entries sections for each patchable func, in -ffunction-sections mode, rather than traditional single __mcount_loc section. Bend the existing logic to detect this multiplicity in the incoming kelf objects, and allocate N identical one-entry sections. These are retrieved as needed by a new function: find_nth_section_by_name() and attached to the .text sections they describe. These __pfe section are not actually arm64-specific, but a generic enhancement across gcc & clang, to allow better garbage collection of unreferenced object sections, and mcount/pfe objects which refer to them. The __pfe sections are combined in kernel-or-module final link, from 5.19.9's 9440155ccb948f8e3ce5308907a2e7378799be60. From clang-11, __pfe is supported for x86, though not yet used by kernel The split between allocate/populate phases here is necessary to enumerate/populate the outgoing section-headers before beginning to produce output sections Also adds some missing \n to log_debug()s Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 120 ++++++++++++++++++++++-------- kpatch-build/kpatch-elf.c | 32 ++++++-- kpatch-build/kpatch-elf.h | 3 + 3 files changed, 119 insertions(+), 36 deletions(-) mode change 100644 => 100755 kpatch-build/kpatch-elf.c diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 5c11de007..c1bc87dc6 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -70,6 +70,7 @@ enum subsection { enum loglevel loglevel = NORMAL; bool KLP_ARCH; +bool multi_pfe; int jump_label_errors, static_call_errors; @@ -3273,7 +3274,7 @@ static void kpatch_create_patches_sections(struct kpatch_elf *kelf, if (sym->bind == STB_LOCAL && symbol.global) ERROR("can't find local symbol '%s' in symbol table", sym->name); - log_debug("lookup for %s: obj=%s sympos=%lu size=%lu", + log_debug("lookup for %s: obj=%s sympos=%lu size=%lu\n", sym->name, symbol.objname, symbol.sympos, symbol.size); @@ -3662,7 +3663,7 @@ static void kpatch_create_intermediate_sections(struct kpatch_elf *kelf, ERROR("can't find symbol '%s' in symbol table", rela->sym->name); - log_debug("lookup for %s: obj=%s sympos=%lu", + log_debug("lookup for %s: obj=%s sympos=%lu\n", rela->sym->name, symbol.objname, symbol.sympos); @@ -3816,19 +3817,24 @@ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_ { int nr; struct symbol *sym; + int text_idx = 0; nr = 0; - list_for_each_entry(sym, &kelfout->symbols, list) + list_for_each_entry(sym, &kelfout->symbols, list) { if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) + sym->has_func_profiling) { + text_idx = sym->sec->index; nr++; + } + } /* create text/rela section pair */ switch(kelf->arch) { case AARCH64: { - struct section *sec, *tmp; - - sec = create_section_pair(kelfout, "__patchable_function_entries", sizeof(void *), nr); + struct section *sec; + int entries = multi_pfe ? 1 : nr; + int copies = multi_pfe ? nr : 1; + int flags = 0, rflags = 0; /* * Depending on the compiler the __patchable_function_entries section @@ -3836,9 +3842,26 @@ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_ * avoid: * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections */ - tmp = find_section_by_name(&kelf->sections, "__patchable_function_entries"); - sec->sh.sh_flags |= (tmp->sh.sh_flags & SHF_LINK_ORDER); - sec->sh.sh_link = 1; + sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + if (sec) { + flags = (sec->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); + if (sec->rela) + rflags = (sec->rela->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); + } + + for (nr = 0; nr < copies; nr++) { + sec = create_section_pair(kelfout, + "__patchable_function_entries", + sizeof(void *), entries); + + sec->sh.sh_flags |= flags; + if (sec->rela) + sec->rela->sh.sh_flags |= rflags; + if (multi_pfe) + sec->sh.sh_link = 0; + else + sec->sh.sh_link = text_idx; + } break; } case PPC64: @@ -3867,11 +3890,14 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) struct symbol *sym; struct rela *rela, *mcount_rela; void **funcs; - unsigned long insn_offset = 0; switch(kelf->arch) { case AARCH64: - sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + if (multi_pfe) + sec = NULL; + else + sec = find_section_by_name(&kelf->sections, + "__patchable_function_entries"); break; case PPC64: case X86_64: @@ -3881,12 +3907,20 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) default: ERROR("unsupported arch\n"); } - relasec = sec->rela; - nr = (int) (sec->data->d_size / sizeof(void *)); + + if (multi_pfe) { + relasec = NULL; + nr = 0; + } else { + relasec = sec->rela; + nr = (int) (sec->data->d_size / sizeof(void *)); + } /* populate sections */ index = 0; list_for_each_entry(sym, &kelf->symbols, list) { + unsigned long insn_offset = 0; + if (sym->type != STT_FUNC || sym->status == SAME) continue; @@ -3902,7 +3936,6 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) int i; insn = sym->sec->data->d_buf; - insn_offset = 0; /* * If BTI (Branch Target Identification) is enabled then there @@ -3993,6 +4026,18 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) ERROR("unsupported arch"); } + if (multi_pfe) { + sec = find_nth_section_by_name(&kelf->sections, nr, "__patchable_function_entries"); + if (!sec) + ERROR("cannot retrieve pre-allocated __pfe #%d\n", nr); + + relasec = sec->rela; + sym->sec->pfe = sec; + sec->sh.sh_link = sec->index; + + nr++; + } + /* * 'rela' points to the mcount/fentry call. * @@ -4002,7 +4047,13 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) mcount_rela->sym = sym; mcount_rela->type = absolute_rela_type(kelf); mcount_rela->addend = insn_offset - sym->sym.st_value; - mcount_rela->offset = (unsigned int) (index * sizeof(*funcs)); + + if (multi_pfe) { + mcount_rela->offset = 0; + sec = NULL; + } else { + mcount_rela->offset = (unsigned int) (index * sizeof(*funcs)); + } index++; } @@ -4161,6 +4212,7 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) struct symbol *sym; struct rela *rela; unsigned char *insn; + list_for_each_entry(sym, &kelf->symbols, list) { if (sym->type != STT_FUNC || sym->is_pfx || !sym->sec || !sym->sec->rela) @@ -4168,21 +4220,23 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) switch(kelf->arch) { case AARCH64: { - struct section *sec = find_section_by_name(&kelf->sections, - "__patchable_function_entries"); - /* - * If we can't find the __patchable_function_entries section or - * there are no relocations in it then not patchable. - */ - if (!sec || !sec->rela) - return; - list_for_each_entry(rela, &sec->rela->relas, list) { - if (rela->sym->sec && sym->sec == rela->sym->sec) { - sym->has_func_profiling = 1; - break; + struct section *sec; + + list_for_each_entry(sec, &kelf->sections, list) { + if (strcmp(sec->name, "__patchable_function_entries")) + continue; + if (multi_pfe && sym->sec->pfe != sec) + continue; + if (!sec->rela) + continue; + + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->sym->sec && sym->sec == rela->sym->sec) { + sym->has_func_profiling = 1; + goto next_symbol; + } } } - break; } case PPC64: @@ -4215,6 +4269,7 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) default: ERROR("unsupported arch"); } + next_symbol:; } } @@ -4262,6 +4317,12 @@ static error_t parse_opt (int key, char *arg, struct argp_state *state) return 0; } +static bool has_multi_pfe(struct kpatch_elf *kelf) +{ + return !!find_nth_section_by_name(&kelf->sections, 1, + "__patchable_function_entries"); +} + static struct argp argp = { options, parse_opt, args_doc, NULL }; int main(int argc, char *argv[]) @@ -4295,6 +4356,7 @@ int main(int argc, char *argv[]) kelf_orig = kpatch_elf_open(orig_obj); kelf_patched = kpatch_elf_open(patched_obj); + multi_pfe = has_multi_pfe(kelf_orig) || has_multi_pfe(kelf_patched); kpatch_find_func_profiling_calls(kelf_orig); kpatch_find_func_profiling_calls(kelf_patched); diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c old mode 100644 new mode 100755 index d03731283..d9eaf21d0 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -108,17 +108,29 @@ struct section *find_section_by_index(struct list_head *list, unsigned int index return NULL; } -struct section *find_section_by_name(struct list_head *list, const char *name) +struct section *find_nth_section_by_name( struct list_head *list, int nth, const char *name) { struct section *sec; - list_for_each_entry(sec, list, list) - if (!strcmp(sec->name, name)) - return sec; + if (!list || !list->next || !name) + return NULL; + + list_for_each_entry(sec, list, list) { + if (strcmp(sec->name, name)) + continue; + if (--nth >= 0) + continue; + return sec; + } return NULL; } +struct section *find_section_by_name(struct list_head *list, const char *name) +{ + return find_nth_section_by_name(list, 0, name); +} + struct symbol *find_symbol_by_index(struct list_head *list, size_t index) { struct symbol *sym; @@ -1007,11 +1019,17 @@ void kpatch_reindex_elements(struct kpatch_elf *kelf) index = 0; list_for_each_entry(sym, &kelf->symbols, list) { sym->index = index++; - if (sym->sec) + if (sym->sec) { sym->sym.st_shndx = (unsigned short)sym->sec->index; - else if (sym->sym.st_shndx != SHN_ABS && - sym->sym.st_shndx != SHN_LIVEPATCH) + if (sym->sec->pfe) { + sym->sec->pfe->sh.sh_link = sym->sec->index; + if (sym->sec->pfe->rela) + sym->sec->pfe->rela->sh.sh_info = sym->sec->index; + } + } else if (sym->sym.st_shndx != SHN_ABS && + sym->sym.st_shndx != SHN_LIVEPATCH) { sym->sym.st_shndx = SHN_UNDEF; + } } } diff --git a/kpatch-build/kpatch-elf.h b/kpatch-build/kpatch-elf.h index 7f787e526..64ebf8863 100644 --- a/kpatch-build/kpatch-elf.h +++ b/kpatch-build/kpatch-elf.h @@ -65,6 +65,7 @@ struct section { struct symbol *secsym, *sym; }; }; + struct section *pfe; /* arm64 per-func __patchable_function_entries */ }; enum symbol_strip { @@ -138,6 +139,8 @@ bool is_debug_section(struct section *sec); struct section *find_section_by_index(struct list_head *list, unsigned int index); struct section *find_section_by_name(struct list_head *list, const char *name); +struct section *find_nth_section_by_name(struct list_head *list, int nth, + const char *name); struct symbol *find_symbol_by_index(struct list_head *list, size_t index); struct symbol *find_symbol_by_name(struct list_head *list, const char *name); struct rela *find_rela_by_offset(struct section *relasec, unsigned int offset); From dbebe52b685d5baa7f474d7ce5a7ac744649e04c Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Wed, 12 Jul 2023 08:13:27 -0700 Subject: [PATCH 11/19] arm64 leaf-function fix On arm64, kpatch_find_func_profiling_calls() was skipping leaf functions, with no relocations, so they weren't patchable. Here other archs need to walk a function's reloc entries to check for __fentry__ or __mcount, so it's valid to skip over functions without sym->sec->rela, because they cannot be patchable, else they would have at least an __fentry__ call relocation. But arm64 marks functions patchable in a different way, with per-func __patchable_function_entries sections referring _to_ the func, not relocations _within_ the func, so a function w/o relocations for text or data can still be patchable. Move the sym->sec->rela check to the per-arch paths. This allows gcc-static-local-var-5.patch to generate livepatch, on arm64 & x86 Suggested-By: Bill Wendling Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index c1bc87dc6..92c38b82f 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -4214,8 +4214,7 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) unsigned char *insn; list_for_each_entry(sym, &kelf->symbols, list) { - if (sym->type != STT_FUNC || sym->is_pfx || - !sym->sec || !sym->sec->rela) + if (sym->type != STT_FUNC || sym->is_pfx || !sym->sec) continue; switch(kelf->arch) { @@ -4240,6 +4239,8 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) break; } case PPC64: + if (!sym->sec->rela) + continue; list_for_each_entry(rela, &sym->sec->rela->relas, list) { if (!strcmp(rela->sym->name, "_mcount")) { sym->has_func_profiling = 1; @@ -4248,6 +4249,8 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) } break; case X86_64: + if (!sym->sec->rela) + continue; rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); if ((rela->type != R_X86_64_NONE && @@ -4259,6 +4262,8 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) sym->has_func_profiling = 1; break; case S390: + if (!sym->sec->rela) + continue; /* Check for compiler generated fentry nop - jgnop 0 */ insn = sym->sec->data->d_buf; if (insn[0] == 0xc0 && insn[1] == 0x04 && From 6b699759919daa896aa5f2a5ad36b0edaa8ab8a9 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Sun, 16 Oct 2022 22:55:44 -0700 Subject: [PATCH 12/19] kpatch-cc skip arch/arm64/kernel/vdso*/* Signed-off-by: Pete Swain --- kpatch-build/kpatch-cc | 1 + 1 file changed, 1 insertion(+) diff --git a/kpatch-build/kpatch-cc b/kpatch-build/kpatch-cc index 17aae25b6..d5ec99362 100755 --- a/kpatch-build/kpatch-cc +++ b/kpatch-build/kpatch-cc @@ -42,6 +42,7 @@ if [[ "$TOOLCHAINCMD" =~ ^(.*-)?gcc$ || "$TOOLCHAINCMD" =~ ^(.*-)?clang$ ]] ; th arch/s390/boot/*|\ arch/s390/purgatory/*|\ arch/s390/kernel/vdso64/*|\ + arch/arm64/kernel/vdso*/*|\ drivers/firmware/efi/libstub/*|\ init/version.o|\ init/version-timestamp.o|\ From db9c3abb4ef2a9b8e7efe20ef6746873e6673416 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 9 Jan 2023 02:18:33 -0800 Subject: [PATCH 13/19] adapt to clang/arm64 naming New toolchain/arch, new conventions for section/label/etc names gcc's .LCx symbols point to string literals in '.rodata..str1.*' sections. Clang creates similar .Ltmp%d symbols in '.rodata.str' The function is_string_literal_section() generalized (too much?) to match either - clang's/arm64 /^\.rodata\.str$/ - gcc's /^\.rodata\./ && /\.str1\./ Various matchers for .data.unlikely .bss.unlikely replaced by is_data_unlikely_section() generalized to match - gcc's ".data.unlikely" - clang's ".(data|bss).module_name.unlikely" .data.once handled similarly Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 34 +++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 92c38b82f..5cb15ac2b 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -354,6 +354,28 @@ static bool is_string_literal_section(struct section *sec) return !strncmp(sec->name, ".rodata.", 8) && strstr(sec->name, ".str"); } +/* gcc's ".data.unlikely" or clang's ".(data|bss).module_name.unlikely" */ +static bool is_data_unlikely_section(const char *name) +{ + size_t len = strlen(name); + + return (len >= 5 + 8 && + ((!strncmp(name, ".data.", 6) || + !strncmp(name, ".bss.", 5)) && + strstr(name + len - 9, ".unlikely"))); +} + +/* either ".data.once" or clang's ".(data|bss).module_name.once" */ +static bool is_data_once_section(const char *name) +{ + size_t len = strlen(name); + + return (len >= 5 + 4 && + (!strncmp(name, ".data.", 6) || + !strncmp(name, ".bss.", 5)) && + strstr(name + len - 5, ".once")); +} + /* * This function detects whether the given symbol is a "special" static local * variable (for lack of a better term). @@ -395,7 +417,7 @@ static bool is_special_static(struct symbol *sym) if (sym->type != STT_OBJECT || sym->bind != STB_LOCAL) return false; - if (!strcmp(sym->sec->name, ".data.once")) + if (is_data_once_section(sym->sec->name)) return true; for (var_name = var_names; *var_name; var_name++) { @@ -1200,9 +1222,11 @@ static void kpatch_correlate_symbols(struct kpatch_elf *kelf_orig, * The .LCx symbols point to string literals in * '.rodata..str1.*' sections. They get included * in kpatch_include_standard_elements(). + * Clang creates similar .Ltmp%d symbols in .rodata.str */ if (sym_orig->type == STT_NOTYPE && - !strncmp(sym_orig->name, ".LC", 3)) + !(strncmp(sym_orig->name, ".LC", 3) && + strncmp(sym_orig->name, ".Ltmp", 5))) continue; if (kpatch_is_mapping_symbol(kelf_orig, sym_orig)) @@ -1847,8 +1871,10 @@ static void kpatch_verify_patchability(struct kpatch_elf *kelf) * (.data.unlikely and .data.once is ok b/c it only has __warned vars) */ if (sec->include && sec->status != NEW && - (!strncmp(sec->name, ".data", 5) || !strncmp(sec->name, ".bss", 4)) && - (strcmp(sec->name, ".data.unlikely") && strcmp(sec->name, ".data.once"))) { + (!strncmp(sec->name, ".data", 5) || + !strncmp(sec->name, ".bss", 4)) && + !is_data_once_section(sec->name) && + !is_data_unlikely_section(sec->name)) { log_normal("data section %s selected for inclusion\n", sec->name); errs++; From a55965eee2e87cdd4c759bc3b567b308aa3ee842 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Tue, 31 Jan 2023 18:15:08 -0800 Subject: [PATCH 14/19] create-diff-object: merge aarch64 kpatch_line_macro_change_only() Generalized kpatch_line_macro_change_only() & insn_is_load_immediate() to collapse the aarch64 support back into parent. I'm assuming the 3rd start1 of the original /* Verify mov w2 */ if (((start1[offset] & 0b11111) != 0x2) || (start1[offset+3] != 0x52) || ((start1[offset] & 0b11111) != 0x2) || (start2[offset+3] != 0x52)) was a typo for start2. That's now absorbed into insn_is_load_immediate() leaving just one aarch64-specific piece: thinning out the match-list for diagnosing a __LINE__ reference, to just "__warn_printf". --- kpatch-build/create-diff-object.c | 96 ++++++------------------------- 1 file changed, 16 insertions(+), 80 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 5cb15ac2b..7189a0051 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -752,6 +752,12 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr) break; + case AARCH64: + /* Verify mov w2 */ + if ((insn[0] & 0b11111) == 0x2 && insn[3] == 0x52) + return true; + break; + default: ERROR("unsupported arch"); } @@ -785,13 +791,14 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr) * 51b: e8 00 00 00 00 callq 520 * 51c: R_X86_64_PC32 ___might_sleep-0x4 */ -static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, +static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, struct section *sec) { unsigned long offset, insn1_len, insn2_len; void *data1, *data2, *insn1, *insn2; struct rela *r, *rela; bool found, found_any = false; + bool warn_printk_only = (kelf->arch == AARCH64); if (sec->status != CHANGED || is_rela_section(sec) || @@ -855,8 +862,15 @@ static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, !strncmp(rela->sym->name, "__func__.", 9)) continue; + if (!strcmp(rela->sym->name, "__warn_printk")) { + found = true; + break; + } + + if (warn_printk_only) + return false; + if (!strncmp(rela->sym->name, "warn_slowpath_", 14) || - !strcmp(rela->sym->name, "__warn_printk") || !strcmp(rela->sym->name, "__might_sleep") || !strcmp(rela->sym->name, "___might_sleep") || !strcmp(rela->sym->name, "__might_fault") || @@ -884,84 +898,6 @@ static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, return true; } -static bool _kpatch_line_macro_change_only_aarch64(struct kpatch_elf *kelf, - struct section *sec) -{ - unsigned char *start1, *start2; - unsigned long size, offset, insn_len; - struct rela *rela; - int lineonly = 0, found; - - insn_len = insn_length(kelf, NULL); - - if (sec->status != CHANGED || - is_rela_section(sec) || - !is_text_section(sec) || - sec->sh.sh_size != sec->twin->sh.sh_size || - !sec->rela || - sec->rela->status != SAME) - return false; - - start1 = sec->twin->data->d_buf; - start2 = sec->data->d_buf; - size = sec->sh.sh_size; - for (offset = 0; offset < size; offset += insn_len) { - if (!memcmp(start1 + offset, start2 + offset, insn_len)) - continue; - - /* Verify mov w2 */ - if (((start1[offset] & 0b11111) != 0x2) || (start1[offset+3] != 0x52) || - ((start1[offset] & 0b11111) != 0x2) || (start2[offset+3] != 0x52)) - return false; - - /* - * Verify zero or more string relas followed by a - * warn_slowpath_* or another similar rela. - */ - found = 0; - list_for_each_entry(rela, &sec->rela->relas, list) { - if (rela->offset < offset + insn_len) - continue; - if (rela->string) - continue; - if (!strncmp(rela->sym->name, "__warned.", 9) || - !strncmp(rela->sym->name, "__already_done.", 15)) - continue; - if (!strcmp(rela->sym->name, "__warn_printk")) { - found = 1; - break; - } - return false; - } - if (!found) - return false; - - lineonly = 1; - } - - if (!lineonly) - ERROR("no instruction changes detected for changed section %s", - sec->name); - - return true; -} - -static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, - struct section *sec) -{ - switch(kelf->arch) { - case AARCH64: - return _kpatch_line_macro_change_only_aarch64(kelf, sec); - case PPC64: - case S390: - case X86_64: - return _kpatch_line_macro_change_only(kelf, sec); - default: - ERROR("unsupported arch"); - } - return false; -} - /* * Child functions with "*.cold" names don't have _fentry_ calls, but "*.part", * often do. In the later case, it is not necessary to include the parent From 8ac88e50356e0e80cb7e0b4bc21d57b233de82ff Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 28 Jul 2023 16:35:45 -0700 Subject: [PATCH 15/19] create-diff-object: keep ubsan section If CONFIG_UBSAN is enabled, ubsan section (.data..Lubsan_{data,type}) can be created. Keep them unconditionally. NOTE: This patch needs to be verified. Signed-off-by: Misono Tomohiro --- kpatch-build/create-diff-object.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 7189a0051..bb9ad99e9 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1646,6 +1646,7 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) !strcmp(rela->sym->name, ".fixup") || !strcmp(rela->sym->name, ".altinstr_replacement") || !strcmp(rela->sym->name, ".altinstr_aux") || + !strncmp(rela->sym->name, ".data..Lubsan", 13) || !strcmp(rela->sym->name, ".text..refcount") || !strncmp(rela->sym->name, "__ftr_alt_", 10)) continue; @@ -1810,7 +1811,8 @@ static void kpatch_verify_patchability(struct kpatch_elf *kelf) (!strncmp(sec->name, ".data", 5) || !strncmp(sec->name, ".bss", 4)) && !is_data_once_section(sec->name) && - !is_data_unlikely_section(sec->name)) { + !is_data_unlikely_section(sec->name) && + strncmp(sec->name, ".data..Lubsan", 13)) { log_normal("data section %s selected for inclusion\n", sec->name); errs++; @@ -1906,6 +1908,7 @@ static void kpatch_include_standard_elements(struct kpatch_elf *kelf) !strcmp(sec->name, ".symtab") || !strcmp(sec->name, ".toc") || !strcmp(sec->name, ".rodata") || + !strncmp(sec->name, ".data..Lubsan", 13) || is_string_literal_section(sec)) { kpatch_include_section(sec); } From d7a3c475d53696635a6ca2f79a4db7744fc5611f Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Thu, 10 Aug 2023 10:43:28 -0700 Subject: [PATCH 16/19] uninit var in kpatch-elf.c Initialize add_off earlier, so it's obviously never used uninitialized. Clang was warning on this, even if gcc was not. No functional change, the only path which left it undefined would call ERROR() anyway. --- kpatch-build/kpatch-elf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index d9eaf21d0..049062bf5 100755 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -235,13 +235,12 @@ static void rela_insn(const struct section *sec, const struct rela *rela, long rela_target_offset(struct kpatch_elf *kelf, struct section *relasec, struct rela *rela) { - long add_off; + long add_off = 0; struct section *sec = relasec->base; switch(kelf->arch) { case AARCH64: case PPC64: - add_off = 0; break; case X86_64: if (!is_text_section(sec) || From dc2864904f6ae9add92575c086b1a0670de21188 Mon Sep 17 00:00:00 2001 From: zimao Date: Mon, 7 Aug 2023 21:56:50 +0000 Subject: [PATCH 17/19] create-diff-object: Remove the multi_pfe flag. In ARM64, every function section should have its own pfe section. It is a bug in GCC 11/12 which will only generate a single pfe section for all functions. The bug has been fixed in GCC 13.1. As the create-diff-object is generating the pfe sections on its own, we should also fix this bug, instead of try to repeat the bug. -- Adjusted whitespace in Zimao's proposed code. Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 159 ++++++++++-------------------- kpatch-build/kpatch-elf.c | 6 +- 2 files changed, 57 insertions(+), 108 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index bb9ad99e9..185b54a50 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -70,7 +70,6 @@ enum subsection { enum loglevel loglevel = NORMAL; bool KLP_ARCH; -bool multi_pfe; int jump_label_errors, static_call_errors; @@ -3773,114 +3772,68 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } -/* - * Allocate the mcount/patchable_function_entry sections which must be done - * before the patched object is torn down so that the section flags can be - * copied. - */ -static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) +static void kpatch_set_pfe_link(struct kpatch_elf *kelf) { - int nr; - struct symbol *sym; - int text_idx = 0; + struct section* sec; + struct rela *rela; - nr = 0; - list_for_each_entry(sym, &kelfout->symbols, list) { - if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) { - text_idx = sym->sec->index; - nr++; + list_for_each_entry(sec, &kelf->sections, list) { + if (strcmp(sec->name, "__patchable_function_entries")) { + continue; } - } - - /* create text/rela section pair */ - switch(kelf->arch) { - case AARCH64: { - struct section *sec; - int entries = multi_pfe ? 1 : nr; - int copies = multi_pfe ? nr : 1; - int flags = 0, rflags = 0; - /* - * Depending on the compiler the __patchable_function_entries section - * can be ordered or not, copy this flag to the section we created to - * avoid: - * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections - */ - sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); - if (sec) { - flags = (sec->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); - if (sec->rela) - rflags = (sec->rela->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); + if (!sec->rela) { + continue; } - - for (nr = 0; nr < copies; nr++) { - sec = create_section_pair(kelfout, - "__patchable_function_entries", - sizeof(void *), entries); - - sec->sh.sh_flags |= flags; - if (sec->rela) - sec->rela->sh.sh_flags |= rflags; - if (multi_pfe) - sec->sh.sh_link = 0; - else - sec->sh.sh_link = text_idx; + list_for_each_entry(rela, &sec->rela->relas, list) { + rela->sym->sec->pfe = sec; } - break; - } - case PPC64: - case X86_64: - case S390: - create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); - break; - default: - ERROR("unsupported arch\n"); } } /* - * Populate the mcount sections allocated by kpatch_alloc_mcount_sections() - * previously. * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. * * TODO: Eventually we can modify recordmount so that it recognizes our bundled * sections as valid and does this work for us. */ -static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) +static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) { int nr, index; - struct section *sec, *relasec; + struct section *relasec; struct symbol *sym; struct rela *rela, *mcount_rela; void **funcs; + bool pfe_per_function = false; - switch(kelf->arch) { + nr = 0; + list_for_each_entry(sym, &kelf->symbols, list) + if (sym->type == STT_FUNC && sym->status != SAME && + sym->has_func_profiling) + nr++; + + switch (kelf->arch) { case AARCH64: - if (multi_pfe) - sec = NULL; - else - sec = find_section_by_name(&kelf->sections, - "__patchable_function_entries"); + /* For aarch64, we will create separate __patchable_function_entries sections for each symbols. */ + pfe_per_function = true; + relasec = NULL; break; case PPC64: case X86_64: case S390: - sec = find_section_by_name(&kelf->sections, "__mcount_loc"); + { + struct section *sec; + + /* create text/rela section pair */ + sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr); + relasec = sec->rela; break; + } default: ERROR("unsupported arch\n"); } - if (multi_pfe) { - relasec = NULL; - nr = 0; - } else { - relasec = sec->rela; - nr = (int) (sec->data->d_size / sizeof(void *)); - } - /* populate sections */ index = 0; list_for_each_entry(sym, &kelf->symbols, list) { @@ -3897,6 +3850,7 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) switch(kelf->arch) { case AARCH64: { + struct section *sec; unsigned char *insn; int i; @@ -3921,6 +3875,14 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) ERROR("%s: unexpected instruction in patch section of function\n", sym->name); } + /* Allocate __patchable_function_entries for symbol */ + sec = create_section_pair(kelf, "__patchable_function_entries", sizeof(void *), 1); + sec->sh.sh_flags |= SHF_WRITE | SHF_LINK_ORDER; + /* We will reset this sh_link in the reindex function. */ + sec->sh.sh_link = 0; + + relasec = sec->rela; + sym->sec->pfe = sec; break; } case PPC64: { @@ -3991,18 +3953,6 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) ERROR("unsupported arch"); } - if (multi_pfe) { - sec = find_nth_section_by_name(&kelf->sections, nr, "__patchable_function_entries"); - if (!sec) - ERROR("cannot retrieve pre-allocated __pfe #%d\n", nr); - - relasec = sec->rela; - sym->sec->pfe = sec; - sec->sh.sh_link = sec->index; - - nr++; - } - /* * 'rela' points to the mcount/fentry call. * @@ -4013,9 +3963,8 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) mcount_rela->type = absolute_rela_type(kelf); mcount_rela->addend = insn_offset - sym->sym.st_value; - if (multi_pfe) { + if (pfe_per_function) { mcount_rela->offset = 0; - sec = NULL; } else { mcount_rela->offset = (unsigned int) (index * sizeof(*funcs)); } @@ -4185,19 +4134,21 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) switch(kelf->arch) { case AARCH64: { struct section *sec; - list_for_each_entry(sec, &kelf->sections, list) { - if (strcmp(sec->name, "__patchable_function_entries")) + if (strcmp(sec->name, "__patchable_function_entries")) { continue; - if (multi_pfe && sym->sec->pfe != sec) + } + if (sym->sec->pfe != sec) { continue; - if (!sec->rela) + } + if (!sec->rela) { continue; + } list_for_each_entry(rela, &sec->rela->relas, list) { if (rela->sym->sec && sym->sec == rela->sym->sec) { sym->has_func_profiling = 1; - goto next_symbol; + goto next_symbol; } } } @@ -4287,12 +4238,6 @@ static error_t parse_opt (int key, char *arg, struct argp_state *state) return 0; } -static bool has_multi_pfe(struct kpatch_elf *kelf) -{ - return !!find_nth_section_by_name(&kelf->sections, 1, - "__patchable_function_entries"); -} - static struct argp argp = { options, parse_opt, args_doc, NULL }; int main(int argc, char *argv[]) @@ -4326,7 +4271,10 @@ int main(int argc, char *argv[]) kelf_orig = kpatch_elf_open(orig_obj); kelf_patched = kpatch_elf_open(patched_obj); - multi_pfe = has_multi_pfe(kelf_orig) || has_multi_pfe(kelf_patched); + + kpatch_set_pfe_link(kelf_orig); + kpatch_set_pfe_link(kelf_patched); + kpatch_find_func_profiling_calls(kelf_orig); kpatch_find_func_profiling_calls(kelf_patched); @@ -4388,9 +4336,6 @@ int main(int argc, char *argv[]) /* this is destructive to kelf_patched */ kpatch_migrate_included_elements(kelf_patched, &kelf_out); - /* this must be done before kelf_patched is torn down */ - kpatch_alloc_mcount_sections(kelf_patched, kelf_out); - /* * Teardown kelf_patched since we shouldn't access sections or symbols * through it anymore. Don't free however, since our section and symbol @@ -4409,7 +4354,7 @@ int main(int argc, char *argv[]) kpatch_create_callbacks_objname_rela(kelf_out, parent_name); kpatch_build_strings_section_data(kelf_out); - kpatch_populate_mcount_sections(kelf_out); + kpatch_create_mcount_sections(kelf_out); /* * At this point, the set of output sections and symbols is diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 049062bf5..a2223fc4a 100755 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -685,6 +685,7 @@ void kpatch_dump_kelf(struct kpatch_elf *kelf) if (sec->rela) printf(", rela-> %s", sec->rela->name); } + printf(", pfe-> [%d]", (sec->pfe) == NULL ? -1 : (int)sec->pfe->index); next: printf("\n"); } @@ -694,8 +695,10 @@ void kpatch_dump_kelf(struct kpatch_elf *kelf) printf("sym %02d, type %d, bind %d, ndx %02d, name %s (%s)", sym->index, sym->type, sym->bind, sym->sym.st_shndx, sym->name, status_str(sym->status)); - if (sym->sec && (sym->type == STT_FUNC || sym->type == STT_OBJECT)) + if (sym->sec && (sym->type == STT_FUNC || sym->type == STT_OBJECT)) { printf(" -> %s", sym->sec->name); + printf(", profiling: %d", sym->has_func_profiling); + } printf("\n"); } } @@ -964,6 +967,7 @@ struct section *create_section_pair(struct kpatch_elf *kelf, char *name, relasec->sh.sh_type = SHT_RELA; relasec->sh.sh_entsize = sizeof(GElf_Rela); relasec->sh.sh_addralign = 8; + relasec->sh.sh_flags = SHF_INFO_LINK; /* set text rela section pointer */ sec->rela = relasec; From fbcd40744ab2d83517e20fd8431f5532e684a4f2 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 10 Oct 2022 19:03:09 -0700 Subject: [PATCH 18/19] doc/arm64-upstream-prerequisites.md --- README.md | 2 +- doc/arm64-upstream-prerequisites.md | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 doc/arm64-upstream-prerequisites.md diff --git a/README.md b/README.md index 78fd14bc4..b9c563000 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ Supported Architectures - [x] x86-64 - [x] ppc64le -- [x] arm64 +- [x] arm64 [upstream prerequisites](doc/arm64-upstream-prerequisites.md) - [x] s390 [upstream prerequisites](doc/s390-upstream-prerequisites.md) Installation diff --git a/doc/arm64-upstream-prerequisites.md b/doc/arm64-upstream-prerequisites.md new file mode 100644 index 000000000..3c49af860 --- /dev/null +++ b/doc/arm64-upstream-prerequisites.md @@ -0,0 +1,11 @@ +### arm64 backporting + +**Prerequisite kernel patches:** +**v5.19:** +- [Madhavan Venkataraman's [RFC PATCH v2 00/20] arm64: livepatch: Use ORC for dynamic frame pointer validation](https://lore.kernel.org/linux-arm-kernel/20220524001637.1707472-1-madvenka@linux.microsoft.com/) +- also tested against madvenka's earlier pre-objtool series up to v15 + +**v5.15 and v5.10:** +- under development, both known to work with backports of madvenka's v15, + but the objtool-using version above is likely to be the approach that + finally merges into upstream kernel From 063c8cea030417dfab2c6e678e2ebfd32153966f Mon Sep 17 00:00:00 2001 From: Mihails Strasuns Date: Fri, 14 Jun 2024 13:27:47 +0000 Subject: [PATCH 19/19] Fix shellcheck warnings Signed-off-by: Mihails Strasuns --- kpatch-build/kpatch-build | 6 +++--- kpatch/kpatch | 1 - test/integration/lib.sh | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/kpatch-build/kpatch-build b/kpatch-build/kpatch-build index b302d62e6..12db2fb2e 100755 --- a/kpatch-build/kpatch-build +++ b/kpatch-build/kpatch-build @@ -1424,7 +1424,7 @@ for i in $FILES; do # create-diff-object orig.o patched.o parent-name parent-symtab # Module.symvers patch-mod-name output.o - "$TOOLSDIR"/create-diff-object $CDO_FLAGS "orig/$i" "patched/$i" "$KOBJFILE_NAME" \ + "$TOOLSDIR"/create-diff-object "${CDO_FLAGS[@]}" "orig/$i" "patched/$i" "$KOBJFILE_NAME" \ "$SYMTAB" "$SYMVERS_FILE" "${MODNAME//-/_}" \ "output/$i" 2>&1 | logger 1 check_pipe_status create-diff-object @@ -1494,7 +1494,7 @@ cd "$TEMPDIR/patch" || die # We no longer need kpatch-cc for ((idx=0; idx<${#MAKEVARS[@]}; idx++)); do - MAKEVARS[$idx]=${MAKEVARS[$idx]/${KPATCH_CC_PREFIX}/} + MAKEVARS[idx]=${MAKEVARS[idx]/${KPATCH_CC_PREFIX}/} done export KPATCH_BUILD="$KERNEL_SRCDIR" KPATCH_NAME="$MODNAME" \ @@ -1510,7 +1510,7 @@ if [[ "$USE_KLP" -eq 1 ]]; then extra_flags="--no-klp-arch-sections" fi cp -f "$TEMPDIR/patch/$MODNAME.ko" "$TEMPDIR/patch/tmp.ko" || die - "$TOOLSDIR"/create-klp-module $extra_flags "$TEMPDIR/patch/tmp.ko" "$TEMPDIR/patch/$MODNAME.ko" 2>&1 | logger 1 + "$TOOLSDIR"/create-klp-module "${extra_flags[@]}" "$TEMPDIR/patch/tmp.ko" "$TEMPDIR/patch/$MODNAME.ko" 2>&1 | logger 1 check_pipe_status create-klp-module [[ "$rc" -ne 0 ]] && die "create-klp-module: exited with return code: $rc" fi diff --git a/kpatch/kpatch b/kpatch/kpatch index edfccfead..0e94a5d73 100755 --- a/kpatch/kpatch +++ b/kpatch/kpatch @@ -355,7 +355,6 @@ load_module () { i=$((i+1)) if [[ $i -eq $MAX_LOAD_ATTEMPTS ]]; then die "failed to load module $module" - break else warn "retrying..." sleep $RETRY_INTERVAL diff --git a/test/integration/lib.sh b/test/integration/lib.sh index 84a6dc80c..86b09e4b2 100644 --- a/test/integration/lib.sh +++ b/test/integration/lib.sh @@ -116,7 +116,7 @@ kpatch_photon_dependencies() if [[ -z "$flavor" ]]; then tdnf install -y linux-debuginfo else - tdnf install -y linux-$flavor-debuginfo + tdnf install -y "linux-$flavor-debuginfo" fi }