diff --git a/README.md b/README.md index 2e89c97ba..4b4065cf9 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Supported Architectures - [x] x86-64 - [x] ppc64le -- [ ] arm64 +- [x] arm64 - [ ] s390 Installation diff --git a/kpatch-build/Makefile b/kpatch-build/Makefile index 50899b644..9273de12d 100644 --- a/kpatch-build/Makefile +++ b/kpatch-build/Makefile @@ -22,6 +22,7 @@ GCC_PLUGINS_DIR := $(shell gcc -print-file-name=plugin) PLUGIN_CFLAGS := $(filter-out -Wconversion, $(CFLAGS)) PLUGIN_CFLAGS += -shared -I$(GCC_PLUGINS_DIR)/include \ -Igcc-plugins -fPIC -fno-rtti -O2 -Wall +else ifeq ($(ARCH),aarch64) else $(error Unsupported architecture ${ARCH}, check https://github.com/dynup/kpatch/#supported-architectures) endif diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 2636524c3..34a031ff5 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -62,7 +62,9 @@ #ifdef __powerpc64__ #define ABSOLUTE_RELA_TYPE R_PPC64_ADDR64 -#else +#elif defined(__aarch64__) +#define ABSOLUTE_RELA_TYPE R_AARCH64_ABS64 +#elif defined(__x86_64__) #define ABSOLUTE_RELA_TYPE R_X86_64_64 #endif @@ -214,6 +216,28 @@ static struct rela *toc_rela(const struct rela *rela) (unsigned int)rela->addend); } +#ifdef __aarch64__ +/* + * Mapping symbols are used to mark and label the transitions between code and + * data in elf files. They begin with a "$" dollar symbol. Don't correlate them + * as they often all have the same name either "$x" to mark the start of code + * or "$d" to mark the start of data. + */ +static bool kpatch_is_mapping_symbol(struct symbol *sym) +{ + if (sym->name && sym->name[0] == '$' + && sym->type == STT_NOTYPE \ + && sym->bind == STB_LOCAL) + return 1; + return 0; +} +#else +static int kpatch_is_mapping_symbol(struct symbol *sym) +{ + return 0; +} +#endif + /* * When compiling with -ffunction-sections and -fdata-sections, almost every * symbol gets its own dedicated section. We call such symbols "bundled" @@ -564,6 +588,13 @@ static void kpatch_compare_correlated_section(struct section *sec) goto out; } + /* As above but for aarch64 */ + if (!strcmp(sec->name, ".rela__patchable_function_entries") || + !strcmp(sec->name, "__patchable_function_entries")) { + sec->status = SAME; + goto out; + } + if (sec1->sh.sh_size != sec2->sh.sh_size || sec1->data->d_size != sec2->data->d_size) { sec->status = CHANGED; @@ -1030,6 +1061,9 @@ static void kpatch_correlate_symbols(struct list_head *symlist_orig, !strncmp(sym_orig->name, ".LC", 3)) continue; + if (kpatch_is_mapping_symbol(sym_orig)) + continue; + /* group section symbols must have correlated sections */ if (sym_orig->sec && sym_orig->sec->sh.sh_type == SHT_GROUP && @@ -1539,7 +1573,7 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) continue; } -#ifdef __powerpc64__ +#if defined(__powerpc64__) || defined(__aarch64__) add_off = 0; #else if (rela->type == R_X86_64_PC32 || @@ -1571,7 +1605,8 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) end = sym->sym.st_value + sym->sym.st_size; if (!is_text_section(sym->sec) && - rela->type == R_X86_64_32S && + (rela->type == R_X86_64_32S || + rela->type == R_AARCH64_ABS64) && rela->addend == (long)sym->sec->sh.sh_size && end == (long)sym->sec->sh.sh_size) { @@ -1635,7 +1670,7 @@ static void kpatch_check_func_profiling_calls(struct kpatch_elf *kelf) (sym->parent && sym->parent->status == CHANGED)) continue; if (!sym->twin->has_func_profiling) { - log_normal("function %s has no fentry/mcount call, unable to patch\n", + log_normal("function %s doesn't have patchable function entry, unable to patch\n", sym->name); errs++; } @@ -2047,35 +2082,36 @@ static int parainstructions_group_size(struct kpatch_elf *kelf, int offset) return size; } -static int altinstructions_group_size(struct kpatch_elf *kelf, int offset) +static int smp_locks_group_size(struct kpatch_elf *kelf, int offset) +{ + return 4; +} + +static int static_call_sites_group_size(struct kpatch_elf *kelf, int offset) { static int size = 0; char *str; if (!size) { - str = getenv("ALT_STRUCT_SIZE"); + str = getenv("STATIC_CALL_STRUCT_SIZE"); if (!str) - ERROR("ALT_STRUCT_SIZE not set"); + ERROR("STATIC_CALL_STRUCT_SIZE not set"); size = atoi(str); } return size; } - -static int smp_locks_group_size(struct kpatch_elf *kelf, int offset) -{ - return 4; -} - -static int static_call_sites_group_size(struct kpatch_elf *kelf, int offset) +#endif +#if defined(__x86_64__) || defined(__aarch64__) +static int altinstructions_group_size(struct kpatch_elf *kelf, int offset) { static int size = 0; char *str; if (!size) { - str = getenv("STATIC_CALL_STRUCT_SIZE"); + str = getenv("ALT_STRUCT_SIZE"); if (!str) - ERROR("STATIC_CALL_STRUCT_SIZE not set"); + ERROR("ALT_STRUCT_SIZE not set"); size = atoi(str); } @@ -2189,15 +2225,17 @@ static struct special_section special_sections[] = { .name = ".parainstructions", .group_size = parainstructions_group_size, }, - { - .name = ".altinstructions", - .group_size = altinstructions_group_size, - }, { .name = ".static_call_sites", .group_size = static_call_sites_group_size, }, #endif +#if defined(__x86_64__) || defined(__aarch64__) + { + .name = ".altinstructions", + .group_size = altinstructions_group_size, + }, +#endif #ifdef __powerpc64__ { .name = "__ftr_fixup", @@ -3402,30 +3440,69 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } /* + * Allocate the mcount/patchable_function_entry sections which must be done + * before the patched object is torn down so that the section flags can be + * copied. + */ +static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) +{ + int nr; + struct symbol *sym; + + nr = 0; + list_for_each_entry(sym, &kelfout->symbols, list) + if (sym->type == STT_FUNC && sym->status != SAME && + sym->has_func_profiling) + nr++; + + /* create text/rela section pair */ +#ifdef __aarch64__ +{ + struct section *sec, *tmp; + + sec = create_section_pair(kelfout, "__patchable_function_entries", sizeof(void*), nr); + + /* + * Depending on the compiler the __patchable_function_entries section + * can be ordered or not, copy this flag to the section we created to + * avoid: + * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections + */ + tmp = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + sec->sh.sh_flags |= (tmp->sh.sh_flags & SHF_LINK_ORDER); + sec->sh.sh_flags = 1; +} +#else /* !__aarch64__ */ + create_section_pair(kelfout, "__mcount_loc", sizeof(void*), nr); +#endif +} + +/* + * Populate the mcount sections allocated by kpatch_alloc_mcount_sections() + * previously. * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. * * TODO: Eventually we can modify recordmount so that it recognizes our bundled * sections as valid and does this work for us. */ -static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) +static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) { int nr, index; struct section *sec, *relasec; struct symbol *sym; - struct rela *rela, *mcount_rela; + struct rela *mcount_rela; void **funcs; unsigned long insn_offset; - nr = 0; - list_for_each_entry(sym, &kelf->symbols, list) - if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) - nr++; - /* create text/rela section pair */ - sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr); +#ifdef __aarch64__ + sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); +#else /* !__aarch64__ */ + sec = find_section_by_name(&kelf->sections, "__mcount_loc"); +#endif relasec = sec->rela; + nr = (int) (sec->data->d_size / sizeof(void *)); /* populate sections */ index = 0; @@ -3440,8 +3517,8 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) } #ifdef __x86_64__ - - rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); +{ + struct rela *rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); /* * For "call fentry", the relocation points to 1 byte past the @@ -3481,9 +3558,37 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) rela->type = R_X86_64_PC32; } +} +#elif defined(__aarch64__) +{ + unsigned char *insn; + int i; + + insn = sym->sec->data->d_buf; + insn_offset = 0; + /* + * If BTI (Branch Target Identification) is enabled then there + * might be an additional 'BTI C' instruction before the two + * patchable function entry 'NOP's. + * i.e. 0xd503245f (little endian) + */ + if (insn[0] == 0x5f) { + if (insn[1] != 0x24 || insn[2] != 0x03 || insn[3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function", sym->name); + insn_offset += 4; + insn += 4; + } + for (i = 0; i < 8; i += 4) { + /* We expect a NOP i.e. 0xd503201f (little endian) */ + if (insn[i] != 0x1f || insn[i + 1] != 0x20 || + insn[i + 2] != 0x03 || insn [i + 3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function", sym->name); + } +} #else /* __powerpc64__ */ { + struct rela *rela; bool found = false; list_for_each_entry(rela, &sym->sec->rela->relas, list) @@ -3788,6 +3893,9 @@ int main(int argc, char *argv[]) /* this is destructive to kelf_patched */ kpatch_migrate_included_elements(kelf_patched, &kelf_out); + /* this must be done before kelf_patched is torn down */ + kpatch_alloc_mcount_sections(kelf_patched, kelf_out); + /* * Teardown kelf_patched since we shouldn't access sections or symbols * through it anymore. Don't free however, since our section and symbol @@ -3806,7 +3914,7 @@ int main(int argc, char *argv[]) kpatch_create_callbacks_objname_rela(kelf_out, parent_name); kpatch_build_strings_section_data(kelf_out); - kpatch_create_mcount_sections(kelf_out); + kpatch_populate_mcount_sections(kelf_out); /* * At this point, the set of output sections and symbols is diff --git a/kpatch-build/kpatch-build b/kpatch-build/kpatch-build index e473405b9..c4235f5d2 100755 --- a/kpatch-build/kpatch-build +++ b/kpatch-build/kpatch-build @@ -375,10 +375,58 @@ find_special_section_data_ppc64le() { return } +find_special_section_data_aarch64() { + [[ "$CONFIG_JUMP_LABEL" -eq 0 ]] && AWK_OPTIONS="-vskip_j=1" + [[ "$CONFIG_PRINTK_INDEX" -eq 0 ]] && AWK_OPTIONS="$AWK_OPTIONS -vskip_i=1" + + # shellcheck disable=SC2086 + SPECIAL_VARS="$(readelf -wi "$VMLINUX" | + gawk --non-decimal-data $AWK_OPTIONS ' + BEGIN { a = b = p = e = o = j = s = i = 0 } + + # Set state if name matches + a == 0 && /DW_AT_name.* alt_instr[[:space:]]*$/ {a = 1; next} + b == 0 && /DW_AT_name.* bug_entry[[:space:]]*$/ {b = 1; next} + e == 0 && /DW_AT_name.* exception_table_entry[[:space:]]*$/ {e = 1; next} + j == 0 && /DW_AT_name.* jump_entry[[:space:]]*$/ {j = 1; next} + i == 0 && /DW_AT_name.* pi_entry[[:space:]]*$/ {i = 1; next} + + # Reset state unless this abbrev describes the struct size + a == 1 && !/DW_AT_byte_size/ { a = 0; next } + b == 1 && !/DW_AT_byte_size/ { b = 0; next } + e == 1 && !/DW_AT_byte_size/ { e = 0; next } + j == 1 && !/DW_AT_byte_size/ { j = 0; next } + i == 1 && !/DW_AT_byte_size/ { i = 0; next } + + # Now that we know the size, stop parsing for it + a == 1 {printf("export ALT_STRUCT_SIZE=%d\n", $4); a = 2} + b == 1 {printf("export BUG_STRUCT_SIZE=%d\n", $4); b = 2} + e == 1 {printf("export EX_STRUCT_SIZE=%d\n", $4); e = 2} + j == 1 {printf("export JUMP_STRUCT_SIZE=%d\n", $4); j = 2} + i == 1 {printf("export PRINTK_INDEX_STRUCT_SIZE=%d\n", $4); i = 2} + + # Bail out once we have everything + a == 2 && b == 2 && e == 2 && (j == 2 || skip_j) && (i == 2 || skip_i) {exit}')" + + [[ -n "$SPECIAL_VARS" ]] && eval "$SPECIAL_VARS" + + [[ -z "$ALT_STRUCT_SIZE" ]] && die "can't find special struct alt_instr size" + [[ -z "$BUG_STRUCT_SIZE" ]] && die "can't find special struct bug_entry size" + [[ -z "$EX_STRUCT_SIZE" ]] && die "can't find special struct paravirt_patch_site size" + [[ -z "$JUMP_STRUCT_SIZE" && "$CONFIG_JUMP_LABEL" -ne 0 ]] && die "can't find special struct jump_entry size" + [[ -z "$PRINTK_INDEX_STRUCT_SIZE" && "$CONFIG_PRINTK_INDEX" -ne 0 ]] && die "can't find special struct pi_entry size" + + return + +} + find_special_section_data() { if [[ "$ARCH" = "ppc64le" ]]; then find_special_section_data_ppc64le return + elif [[ "$ARCH" = "aarch64" ]]; then + find_special_section_data_aarch64 + return fi [[ "$CONFIG_PARAVIRT" -eq 0 ]] && AWK_OPTIONS="-vskip_p=1" diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 7e272e2c8..1b0b0eddd 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -332,7 +332,24 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) break; } } -#else +#elif defined(__aarch64__) +{ + struct section *sec = find_section_by_name(&kelf->sections, + "__patchable_function_entries"); + /* + * If we can't find the __patchable_function_entries section or + * there are no relocations in it then not patchable. + */ + if (!sec || !sec->rela) + return; + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->sym->sec && sym->sec == rela->sym->sec) { + sym->has_func_profiling = 1; + break; + } + } +} +#else /* x86_64 */ rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); if ((rela->type != R_X86_64_NONE &&