From 0cac959182a45ce1143b36f64e2c135b6a94709f Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 6 Oct 2021 12:41:16 -0700 Subject: [PATCH 1/4] kpatch-build: Add sym->has_func_profiling support for aarch64 The "has_function_profiling" support field in the symbol struct is used to show that a function symbol is able to be patched. This is necessary to check that functions which need to be patched are able to be. On arm64 this means the presence of 2 NOP instructions at function entry which are patched by ftrace to call the ftrace handling code. These 2 NOPs are inserted by the compiler and the location of them is recorded in a section called "__patchable_function_entries". Check whether a symbol has a corresponding entry in the "__patchable_function_entries" section and if so mark it as "has_func_profiling". Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Make error message standard across architectures when no patchable entry - Don't store __patchable_function_entries section in kpatch_find_func_profiling_calls(), instead find it each time --- kpatch-build/create-diff-object.c | 2 +- kpatch-build/kpatch-elf.c | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 2636524c3..5dec1c7fa 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1635,7 +1635,7 @@ static void kpatch_check_func_profiling_calls(struct kpatch_elf *kelf) (sym->parent && sym->parent->status == CHANGED)) continue; if (!sym->twin->has_func_profiling) { - log_normal("function %s has no fentry/mcount call, unable to patch\n", + log_normal("function %s doesn't have patchable function entry, unable to patch\n", sym->name); errs++; } diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 7e272e2c8..1b0b0eddd 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -332,7 +332,24 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) break; } } -#else +#elif defined(__aarch64__) +{ + struct section *sec = find_section_by_name(&kelf->sections, + "__patchable_function_entries"); + /* + * If we can't find the __patchable_function_entries section or + * there are no relocations in it then not patchable. + */ + if (!sec || !sec->rela) + return; + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->sym->sec && sym->sec == rela->sym->sec) { + sym->has_func_profiling = 1; + break; + } + } +} +#else /* x86_64 */ rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); if ((rela->type != R_X86_64_NONE && From 6e38493f0e6cd71660f0350c9e2b0ccc1939c2c2 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Fri, 10 Dec 2021 15:25:21 -0800 Subject: [PATCH 2/4] create-diff-object: Split kpatch_create_mcount_sections into alloc and populate The function kpatch_create_mcount_sections() allocates the __mcount_loc section and then populates it with functions which have a patchable entry. The following patch will add aarch64 support to this function where the allocation will have to be done before the kelf_patched is torn down. Thus split this function so that the allocation can be performed earlier and the populating as before. No intended functional change. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Add patch to series --- kpatch-build/create-diff-object.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 5dec1c7fa..80ee759ff 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3401,6 +3401,21 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } +static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) +{ + int nr; + struct symbol *sym; + + nr = 0; + list_for_each_entry(sym, &kelfout->symbols, list) + if (sym->type == STT_FUNC && sym->status != SAME && + sym->has_func_profiling) + nr++; + + /* create text/rela section pair */ + create_section_pair(kelfout, "__mcount_loc", sizeof(void*), nr); +} + /* * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. @@ -3408,7 +3423,7 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * * TODO: Eventually we can modify recordmount so that it recognizes our bundled * sections as valid and does this work for us. */ -static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) +static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) { int nr, index; struct section *sec, *relasec; @@ -3417,15 +3432,10 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) void **funcs; unsigned long insn_offset; - nr = 0; - list_for_each_entry(sym, &kelf->symbols, list) - if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) - nr++; - /* create text/rela section pair */ - sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr); + sec = find_section_by_name(&kelf->sections, "__mcount_loc"); relasec = sec->rela; + nr = (int) (sec->data->d_size / sizeof(void *)); /* populate sections */ index = 0; @@ -3788,6 +3798,9 @@ int main(int argc, char *argv[]) /* this is destructive to kelf_patched */ kpatch_migrate_included_elements(kelf_patched, &kelf_out); + /* this must be done before kelf_patched is torn down */ + kpatch_alloc_mcount_sections(kelf_patched, kelf_out); + /* * Teardown kelf_patched since we shouldn't access sections or symbols * through it anymore. Don't free however, since our section and symbol @@ -3806,7 +3819,7 @@ int main(int argc, char *argv[]) kpatch_create_callbacks_objname_rela(kelf_out, parent_name); kpatch_build_strings_section_data(kelf_out); - kpatch_create_mcount_sections(kelf_out); + kpatch_populate_mcount_sections(kelf_out); /* * At this point, the set of output sections and symbols is From bd6019cbaa8d72012fb381e9847dd3eff5305cb3 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Fri, 10 Dec 2021 15:25:24 -0800 Subject: [PATCH 3/4] create-diff-object: Create __patchable_function_entries section for aarch64 The __mcount_loc section contains the addresses of patchable ftrace sites which is used by the ftrace infrastructure in the kernel to create a list of tracable functions and to know where to patch to enable tracing of them. On aarch64 this section is called __patchable_function_entries and is generated by the compiler. Either of __mcount_loc or __patchable_function_entries is recognised by the kernel but for aarch64 use __patchable_function_entries as it is what is expected. Add aarch64 support to kpatch_alloc_mcount_sections(). The SHF_LINK_ORDER section flag must be copied to ensure that it matches to avoid the following: ld: __patchable_function_entries has both ordered [...] and unordered [...] sections Add aarch64 support to kpatch_populate_mcount_sections(). Check for the 2 required NOP instructions on function entry, which may be preceded by a BTI C instruction depending on whether the function is a leaf function. This determines the offset of the patch site. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Don't preserve the __patchable_function_entries section from the patched elf as this is already verified by kpatch_check_func_profiling_calls() - Instead get the patch entry offset by checking for a preceding BTI C instr - Copy the section flags for __patchable_function_entries --- kpatch-build/create-diff-object.c | 63 +++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 80ee759ff..9b01a273a 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3401,6 +3401,11 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } +/* + * Allocate the mcount/patchable_function_entry sections which must be done + * before the patched object is torn down so that the section flags can be + * copied. + */ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) { int nr; @@ -3413,10 +3418,30 @@ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_ nr++; /* create text/rela section pair */ +#ifdef __aarch64__ +{ + struct section *sec, *tmp; + + sec = create_section_pair(kelfout, "__patchable_function_entries", sizeof(void*), nr); + + /* + * Depending on the compiler the __patchable_function_entries section + * can be ordered or not, copy this flag to the section we created to + * avoid: + * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections + */ + tmp = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + sec->sh.sh_flags |= (tmp->sh.sh_flags & SHF_LINK_ORDER); + sec->sh.sh_flags = 1; +} +#else /* !__aarch64__ */ create_section_pair(kelfout, "__mcount_loc", sizeof(void*), nr); +#endif } /* + * Populate the mcount sections allocated by kpatch_alloc_mcount_sections() + * previously. * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. * @@ -3428,12 +3453,16 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) int nr, index; struct section *sec, *relasec; struct symbol *sym; - struct rela *rela, *mcount_rela; + struct rela *mcount_rela; void **funcs; unsigned long insn_offset; +#ifdef __aarch64__ + sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); +#else /* !__aarch64__ */ sec = find_section_by_name(&kelf->sections, "__mcount_loc"); +#endif relasec = sec->rela; nr = (int) (sec->data->d_size / sizeof(void *)); @@ -3450,8 +3479,8 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) } #ifdef __x86_64__ - - rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); +{ + struct rela *rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); /* * For "call fentry", the relocation points to 1 byte past the @@ -3491,9 +3520,37 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) rela->type = R_X86_64_PC32; } +} +#elif defined(__aarch64__) +{ + unsigned char *insn; + int i; + + insn = sym->sec->data->d_buf; + insn_offset = 0; + /* + * If BTI (Branch Target Identification) is enabled then there + * might be an additional 'BTI C' instruction before the two + * patchable function entry 'NOP's. + * i.e. 0xd503245f (little endian) + */ + if (insn[0] == 0x5f) { + if (insn[1] != 0x24 || insn[2] != 0x03 || insn[3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function", sym->name); + insn_offset += 4; + insn += 4; + } + for (i = 0; i < 8; i += 4) { + /* We expect a NOP i.e. 0xd503201f (little endian) */ + if (insn[i] != 0x1f || insn[i + 1] != 0x20 || + insn[i + 2] != 0x03 || insn [i + 3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function", sym->name); + } +} #else /* __powerpc64__ */ { + struct rela *rela; bool found = false; list_for_each_entry(rela, &sym->sec->rela->relas, list) From 00082aadb5cf5c2f75b670161faa4c981ae53138 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 6 Oct 2021 12:49:33 -0700 Subject: [PATCH 4/4] kpatch-build: Enable ARM64 support Add the final support required for aarch64 and enable building on that arch. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Add # shellcheck disable=SC2086 - Add comment to kpatch_is_mapping_symbol() --- README.md | 2 +- kpatch-build/Makefile | 1 + kpatch-build/create-diff-object.c | 76 +++++++++++++++++++++++-------- kpatch-build/kpatch-build | 48 +++++++++++++++++++ 4 files changed, 107 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 2e89c97ba..4b4065cf9 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Supported Architectures - [x] x86-64 - [x] ppc64le -- [ ] arm64 +- [x] arm64 - [ ] s390 Installation diff --git a/kpatch-build/Makefile b/kpatch-build/Makefile index 50899b644..9273de12d 100644 --- a/kpatch-build/Makefile +++ b/kpatch-build/Makefile @@ -22,6 +22,7 @@ GCC_PLUGINS_DIR := $(shell gcc -print-file-name=plugin) PLUGIN_CFLAGS := $(filter-out -Wconversion, $(CFLAGS)) PLUGIN_CFLAGS += -shared -I$(GCC_PLUGINS_DIR)/include \ -Igcc-plugins -fPIC -fno-rtti -O2 -Wall +else ifeq ($(ARCH),aarch64) else $(error Unsupported architecture ${ARCH}, check https://github.com/dynup/kpatch/#supported-architectures) endif diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 9b01a273a..34a031ff5 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -62,7 +62,9 @@ #ifdef __powerpc64__ #define ABSOLUTE_RELA_TYPE R_PPC64_ADDR64 -#else +#elif defined(__aarch64__) +#define ABSOLUTE_RELA_TYPE R_AARCH64_ABS64 +#elif defined(__x86_64__) #define ABSOLUTE_RELA_TYPE R_X86_64_64 #endif @@ -214,6 +216,28 @@ static struct rela *toc_rela(const struct rela *rela) (unsigned int)rela->addend); } +#ifdef __aarch64__ +/* + * Mapping symbols are used to mark and label the transitions between code and + * data in elf files. They begin with a "$" dollar symbol. Don't correlate them + * as they often all have the same name either "$x" to mark the start of code + * or "$d" to mark the start of data. + */ +static bool kpatch_is_mapping_symbol(struct symbol *sym) +{ + if (sym->name && sym->name[0] == '$' + && sym->type == STT_NOTYPE \ + && sym->bind == STB_LOCAL) + return 1; + return 0; +} +#else +static int kpatch_is_mapping_symbol(struct symbol *sym) +{ + return 0; +} +#endif + /* * When compiling with -ffunction-sections and -fdata-sections, almost every * symbol gets its own dedicated section. We call such symbols "bundled" @@ -564,6 +588,13 @@ static void kpatch_compare_correlated_section(struct section *sec) goto out; } + /* As above but for aarch64 */ + if (!strcmp(sec->name, ".rela__patchable_function_entries") || + !strcmp(sec->name, "__patchable_function_entries")) { + sec->status = SAME; + goto out; + } + if (sec1->sh.sh_size != sec2->sh.sh_size || sec1->data->d_size != sec2->data->d_size) { sec->status = CHANGED; @@ -1030,6 +1061,9 @@ static void kpatch_correlate_symbols(struct list_head *symlist_orig, !strncmp(sym_orig->name, ".LC", 3)) continue; + if (kpatch_is_mapping_symbol(sym_orig)) + continue; + /* group section symbols must have correlated sections */ if (sym_orig->sec && sym_orig->sec->sh.sh_type == SHT_GROUP && @@ -1539,7 +1573,7 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) continue; } -#ifdef __powerpc64__ +#if defined(__powerpc64__) || defined(__aarch64__) add_off = 0; #else if (rela->type == R_X86_64_PC32 || @@ -1571,7 +1605,8 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) end = sym->sym.st_value + sym->sym.st_size; if (!is_text_section(sym->sec) && - rela->type == R_X86_64_32S && + (rela->type == R_X86_64_32S || + rela->type == R_AARCH64_ABS64) && rela->addend == (long)sym->sec->sh.sh_size && end == (long)sym->sec->sh.sh_size) { @@ -2047,35 +2082,36 @@ static int parainstructions_group_size(struct kpatch_elf *kelf, int offset) return size; } -static int altinstructions_group_size(struct kpatch_elf *kelf, int offset) +static int smp_locks_group_size(struct kpatch_elf *kelf, int offset) +{ + return 4; +} + +static int static_call_sites_group_size(struct kpatch_elf *kelf, int offset) { static int size = 0; char *str; if (!size) { - str = getenv("ALT_STRUCT_SIZE"); + str = getenv("STATIC_CALL_STRUCT_SIZE"); if (!str) - ERROR("ALT_STRUCT_SIZE not set"); + ERROR("STATIC_CALL_STRUCT_SIZE not set"); size = atoi(str); } return size; } - -static int smp_locks_group_size(struct kpatch_elf *kelf, int offset) -{ - return 4; -} - -static int static_call_sites_group_size(struct kpatch_elf *kelf, int offset) +#endif +#if defined(__x86_64__) || defined(__aarch64__) +static int altinstructions_group_size(struct kpatch_elf *kelf, int offset) { static int size = 0; char *str; if (!size) { - str = getenv("STATIC_CALL_STRUCT_SIZE"); + str = getenv("ALT_STRUCT_SIZE"); if (!str) - ERROR("STATIC_CALL_STRUCT_SIZE not set"); + ERROR("ALT_STRUCT_SIZE not set"); size = atoi(str); } @@ -2189,15 +2225,17 @@ static struct special_section special_sections[] = { .name = ".parainstructions", .group_size = parainstructions_group_size, }, - { - .name = ".altinstructions", - .group_size = altinstructions_group_size, - }, { .name = ".static_call_sites", .group_size = static_call_sites_group_size, }, #endif +#if defined(__x86_64__) || defined(__aarch64__) + { + .name = ".altinstructions", + .group_size = altinstructions_group_size, + }, +#endif #ifdef __powerpc64__ { .name = "__ftr_fixup", diff --git a/kpatch-build/kpatch-build b/kpatch-build/kpatch-build index e473405b9..c4235f5d2 100755 --- a/kpatch-build/kpatch-build +++ b/kpatch-build/kpatch-build @@ -375,10 +375,58 @@ find_special_section_data_ppc64le() { return } +find_special_section_data_aarch64() { + [[ "$CONFIG_JUMP_LABEL" -eq 0 ]] && AWK_OPTIONS="-vskip_j=1" + [[ "$CONFIG_PRINTK_INDEX" -eq 0 ]] && AWK_OPTIONS="$AWK_OPTIONS -vskip_i=1" + + # shellcheck disable=SC2086 + SPECIAL_VARS="$(readelf -wi "$VMLINUX" | + gawk --non-decimal-data $AWK_OPTIONS ' + BEGIN { a = b = p = e = o = j = s = i = 0 } + + # Set state if name matches + a == 0 && /DW_AT_name.* alt_instr[[:space:]]*$/ {a = 1; next} + b == 0 && /DW_AT_name.* bug_entry[[:space:]]*$/ {b = 1; next} + e == 0 && /DW_AT_name.* exception_table_entry[[:space:]]*$/ {e = 1; next} + j == 0 && /DW_AT_name.* jump_entry[[:space:]]*$/ {j = 1; next} + i == 0 && /DW_AT_name.* pi_entry[[:space:]]*$/ {i = 1; next} + + # Reset state unless this abbrev describes the struct size + a == 1 && !/DW_AT_byte_size/ { a = 0; next } + b == 1 && !/DW_AT_byte_size/ { b = 0; next } + e == 1 && !/DW_AT_byte_size/ { e = 0; next } + j == 1 && !/DW_AT_byte_size/ { j = 0; next } + i == 1 && !/DW_AT_byte_size/ { i = 0; next } + + # Now that we know the size, stop parsing for it + a == 1 {printf("export ALT_STRUCT_SIZE=%d\n", $4); a = 2} + b == 1 {printf("export BUG_STRUCT_SIZE=%d\n", $4); b = 2} + e == 1 {printf("export EX_STRUCT_SIZE=%d\n", $4); e = 2} + j == 1 {printf("export JUMP_STRUCT_SIZE=%d\n", $4); j = 2} + i == 1 {printf("export PRINTK_INDEX_STRUCT_SIZE=%d\n", $4); i = 2} + + # Bail out once we have everything + a == 2 && b == 2 && e == 2 && (j == 2 || skip_j) && (i == 2 || skip_i) {exit}')" + + [[ -n "$SPECIAL_VARS" ]] && eval "$SPECIAL_VARS" + + [[ -z "$ALT_STRUCT_SIZE" ]] && die "can't find special struct alt_instr size" + [[ -z "$BUG_STRUCT_SIZE" ]] && die "can't find special struct bug_entry size" + [[ -z "$EX_STRUCT_SIZE" ]] && die "can't find special struct paravirt_patch_site size" + [[ -z "$JUMP_STRUCT_SIZE" && "$CONFIG_JUMP_LABEL" -ne 0 ]] && die "can't find special struct jump_entry size" + [[ -z "$PRINTK_INDEX_STRUCT_SIZE" && "$CONFIG_PRINTK_INDEX" -ne 0 ]] && die "can't find special struct pi_entry size" + + return + +} + find_special_section_data() { if [[ "$ARCH" = "ppc64le" ]]; then find_special_section_data_ppc64le return + elif [[ "$ARCH" = "aarch64" ]]; then + find_special_section_data_aarch64 + return fi [[ "$CONFIG_PARAVIRT" -eq 0 ]] && AWK_OPTIONS="-vskip_p=1"