Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Supported Architectures

- [x] x86-64
- [x] ppc64le
- [ ] arm64
- [x] arm64
- [x] s390 [upstream prerequisites](doc/s390-upstream-prerequisites.md)

Installation
Expand Down
29 changes: 28 additions & 1 deletion kmod/patch/kpatch-syscall.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,34 @@

# endif /* LINUX_VERSION_CODE */

#endif /* CONFIG_X86_64 */
#elif defined(CONFIG_ARM64)

/* arm64/include/asm/syscall_wrapper.h versions */

#define SC_ARM64_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \
,,regs->regs[0],,regs->regs[1],,regs->regs[2] \
,,regs->regs[3],,regs->regs[4],,regs->regs[5])

#define __KPATCH_SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
{ \
return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
} \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
long ret = __kpatch_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
__MAP(x,__SC_TEST,__VA_ARGS__); \
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))

#endif /* which arch */


#ifndef __KPATCH_SYSCALL_DEFINEx
Expand Down
2 changes: 1 addition & 1 deletion kpatch-build/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ PLUGIN_CFLAGS := $(filter-out -std=gnu11 -Wconversion, $(CFLAGS))
PLUGIN_CFLAGS += -shared -I$(GCC_PLUGINS_DIR)/include \
-Igcc-plugins -fPIC -fno-rtti -O2 -Wall
endif
ifeq ($(filter $(ARCH),s390x x86_64 ppc64le),)
ifeq ($(filter $(ARCH),s390x x86_64 ppc64le aarch64),)
$(error Unsupported architecture ${ARCH}, check https://github.com/dynup/kpatch/#supported-architectures)
endif

Expand Down
162 changes: 147 additions & 15 deletions kpatch-build/create-diff-object.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@ static bool is_gcc6_localentry_bundled_sym(struct kpatch_elf *kelf,
struct symbol *sym)
{
switch(kelf->arch) {
case AARCH64:
return false;
case PPC64:
return ((PPC64_LOCAL_ENTRY_OFFSET(sym->sym.st_other) != 0) &&
sym->sym.st_value == 8);
Expand Down Expand Up @@ -228,6 +230,67 @@ static struct rela *toc_rela(const struct rela *rela)
(unsigned int)rela->addend);
}

/*
* Mapping symbols are used to mark and label the transitions between code and
* data in elf files. They begin with a "$" dollar symbol. Don't correlate them
* as they often all have the same name either "$x" to mark the start of code
* or "$d" to mark the start of data.
*/
static bool kpatch_is_mapping_symbol(struct kpatch_elf *kelf, struct symbol *sym)
{
switch (kelf->arch) {
case AARCH64:
if (sym->name && sym->name[0] == '$'
&& sym->type == STT_NOTYPE \
&& sym->bind == STB_LOCAL)
return true;
case X86_64:
case PPC64:
case S390:
return false;
default:
ERROR("unsupported arch");
}

return false;
}

static unsigned int function_padding_size(struct kpatch_elf *kelf, struct symbol *sym)
{
unsigned int size = 0;

switch (kelf->arch) {
case AARCH64:
{
uint8_t *insn = sym->sec->data->d_buf;
unsigned int i;
void *insn_end = sym->sec->data->d_buf + sym->sym.st_value;

/*
* If the arm64 kernel is compiled with CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
* then there are two NOPs before the function and a `BTI C` + 2 NOPs at the
* start of the function. Verify the presence of the two NOPs before the
* function entry.
*/
for (i = 0; (void *)insn < insn_end; i++, insn += 4)
if (insn[0] != 0x1f || insn[1] != 0x20 ||
insn[2] != 0x03 || insn[3] != 0xd5)
break;

if (i == 2)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we believe the i should be either 0 or 2, would it be better if we can report an error here when the i is some other value?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the function_padding_size is decideced by the nop padding at the start of the function.
The number of nop adding to the function head is decided by the N of building option
-fpatchable-function-entry.

Or maybe this function padding size can be calculated by this building option of the target kernel
build Makefile? And we can support more specific situation?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the arm64 kernel build, there could be:

  1. no nops at the start
  2. 2 nops at the start
  3. 2 nops before the start and two after the start.

There could also be a BTI instruction at the entry of the function before the two nops, if they exist.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@puranjaymohan : From my understanding, I think @wardenjohn is right about the -fpatchable-function-entry argument... but I don't think it's trivial to figure out what that build-time option actually was. (Sure, kpatch-build could reverse engineer from the kernel config, but it may become out of date.) So for the time being, I think the dynamic padding check is easier to implement. (Incidentally, we may need this dynamic padding for ppc64le as there are kernel config options that change the number of nops from 1, 2, etc.)

Anyway, a trivial nitpick here: *insn == 0xd503201f is not endian safe. See other examples in this file that check the bytes individually.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay,
I will fix it in the next push.

Thanks

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@puranjaymohan , a reminder for the pending changes here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@ZzzMao I have already pushed the fix.
Now the code looks like:

                for (i = 0; (void *)insn < insn_end; i++, insn += 4)
                        if (insn[0] != 0x1f || insn[1] != 0x20 ||
                            insn[2] != 0x03 || insn[3] != 0xd5)
                                break;

                if (i == 2)
                        size = 8;
                else if (i != 0)
                        log_error("function %s within section %s has invalid padding\n", sym->name, sym->sec->name);

size = 8;
else if (i != 0)
log_error("function %s within section %s has invalid padding\n", sym->name, sym->sec->name);

break;
}
default:
break;
}

return size;
}

/*
* When compiling with -ffunction-sections and -fdata-sections, almost every
* symbol gets its own dedicated section. We call such symbols "bundled"
Expand All @@ -244,6 +307,8 @@ static void kpatch_bundle_symbols(struct kpatch_elf *kelf)
expected_offset = sym->pfx->sym.st_size;
else if (is_gcc6_localentry_bundled_sym(kelf, sym))
expected_offset = 8;
else if (sym->type == STT_FUNC)
expected_offset = function_padding_size(kelf, sym);
else
expected_offset = 0;

Expand Down Expand Up @@ -622,6 +687,8 @@ static void kpatch_compare_correlated_section(struct section *sec)
*/
if (!strcmp(sec->name, ".rela__mcount_loc") ||
!strcmp(sec->name, "__mcount_loc") ||
!strcmp(sec->name, ".sframe") ||
!strcmp(sec->name, ".rela.sframe") ||
!strcmp(sec->name, ".rela__patchable_function_entries") ||
!strcmp(sec->name, "__patchable_function_entries")) {
sec->status = SAME;
Expand Down Expand Up @@ -706,6 +773,12 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr)

break;

case AARCH64:
/* Verify mov w2 <line number> */
if ((insn[0] & 0b11111) == 0x2 && insn[3] == 0x52)
return true;
break;

default:
ERROR("unsupported arch");
}
Expand Down Expand Up @@ -746,6 +819,7 @@ static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf,
void *data1, *data2, *insn1, *insn2;
struct rela *r, *rela;
bool found, found_any = false;
bool warn_printk_only = (kelf->arch == AARCH64);

if (sec->status != CHANGED ||
is_rela_section(sec) ||
Expand Down Expand Up @@ -809,8 +883,15 @@ static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf,
!strncmp(rela->sym->name, "__func__.", 9))
continue;

if (!strcmp(rela->sym->name, "__warn_printk")) {
found = true;
break;
}

if (warn_printk_only)
return false;

if (!strncmp(rela->sym->name, "warn_slowpath_", 14) ||
!strcmp(rela->sym->name, "__warn_printk") ||
!strcmp(rela->sym->name, "__might_sleep") ||
!strcmp(rela->sym->name, "___might_sleep") ||
!strcmp(rela->sym->name, "__might_fault") ||
Expand Down Expand Up @@ -1075,15 +1156,15 @@ static void kpatch_correlate_sections(struct list_head *seclist_orig,
}
}

static void kpatch_correlate_symbols(struct list_head *symlist_orig,
struct list_head *symlist_patched)
static void kpatch_correlate_symbols(struct kpatch_elf *kelf_orig,
struct kpatch_elf *kelf_patched)
{
struct symbol *sym_orig, *sym_patched;

list_for_each_entry(sym_orig, symlist_orig, list) {
list_for_each_entry(sym_orig, &kelf_orig->symbols, list) {
if (sym_orig->twin)
continue;
list_for_each_entry(sym_patched, symlist_patched, list) {
list_for_each_entry(sym_patched, &kelf_patched->symbols, list) {
if (kpatch_mangled_strcmp(sym_orig->name, sym_patched->name) ||
sym_orig->type != sym_patched->type || sym_patched->twin)
continue;
Expand All @@ -1103,6 +1184,9 @@ static void kpatch_correlate_symbols(struct list_head *symlist_orig,
!strncmp(sym_orig->name, ".LC", 3))
continue;

if (kpatch_is_mapping_symbol(kelf_orig, sym_orig))
continue;

/* group section symbols must have correlated sections */
if (sym_orig->sec &&
sym_orig->sec->sh.sh_type == SHT_GROUP &&
Expand Down Expand Up @@ -1508,7 +1592,7 @@ static void kpatch_correlate_elfs(struct kpatch_elf *kelf_orig,
struct kpatch_elf *kelf_patched)
{
kpatch_correlate_sections(&kelf_orig->sections, &kelf_patched->sections);
kpatch_correlate_symbols(&kelf_orig->symbols, &kelf_patched->symbols);
kpatch_correlate_symbols(kelf_orig, kelf_patched);
}

static void kpatch_compare_correlated_elements(struct kpatch_elf *kelf)
Expand Down Expand Up @@ -1561,6 +1645,13 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
if (!is_rela_section(relasec) || is_debug_section(relasec))
continue;

/*
* We regenerate __patchable_function_entries from scratch so
* don't bother replacing section symbols in its relasec.
*/
if (is_patchable_function_entries_section(relasec))
continue;

list_for_each_entry(rela, &relasec->relas, list) {

if (rela->sym->type != STT_SECTION || !rela->sym->sec)
Expand Down Expand Up @@ -1624,7 +1715,8 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)

if (is_text_section(relasec->base) &&
!is_text_section(sym->sec) &&
rela->type == R_X86_64_32S &&
(rela->type == R_X86_64_32S ||
rela->type == R_AARCH64_ABS64) &&
rela->addend == (long)sym->sec->sh.sh_size &&
end == (long)sym->sec->sh.sh_size) {

Expand Down Expand Up @@ -1661,6 +1753,9 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
*/
} else if (target_off == start && target_off == end) {

if(kpatch_is_mapping_symbol(kelf, sym))
continue;

/*
* Allow replacement for references to
* empty symbols.
Expand Down Expand Up @@ -1700,8 +1795,8 @@ static void kpatch_check_func_profiling_calls(struct kpatch_elf *kelf)
(sym->parent && sym->parent->status == CHANGED))
continue;
if (!sym->twin->has_func_profiling) {
log_error("function %s has no fentry/mcount call, unable to patch\n",
sym->name);
log_error("function %s doesn't have patchable function entry, unable to patch\n",
sym->name);
errs++;
}
}
Expand Down Expand Up @@ -1905,6 +2000,7 @@ static void kpatch_include_standard_elements(struct kpatch_elf *kelf)
!strcmp(sec->name, ".symtab") ||
!strcmp(sec->name, ".toc") ||
!strcmp(sec->name, ".rodata") ||
!strcmp(sec->name, ".rodata.str") ||
is_string_literal_section(sec)) {
kpatch_include_section(sec);
}
Expand Down Expand Up @@ -2493,28 +2589,28 @@ static bool static_call_sites_group_filter(struct lookup_table *lookup,
static struct special_section special_sections[] = {
{
.name = "__bug_table",
.arch = X86_64 | PPC64 | S390,
.arch = AARCH64 | X86_64 | PPC64 | S390,
.group_size = bug_table_group_size,
},
{
.name = ".fixup",
.arch = X86_64 | PPC64 | S390,
.arch = AARCH64 | X86_64 | PPC64 | S390,
.group_size = fixup_group_size,
},
{
.name = "__ex_table", /* must come after .fixup */
.arch = X86_64 | PPC64 | S390,
.arch = AARCH64 | X86_64 | PPC64 | S390,
.group_size = ex_table_group_size,
},
{
.name = "__jump_table",
.arch = X86_64 | PPC64 | S390,
.arch = AARCH64 | X86_64 | PPC64 | S390,
.group_size = jump_table_group_size,
.group_filter = jump_table_group_filter,
},
{
.name = ".printk_index",
.arch = X86_64 | PPC64 | S390,
.arch = AARCH64 | X86_64 | PPC64 | S390,
.group_size = printk_index_group_size,
},
{
Expand All @@ -2529,7 +2625,7 @@ static struct special_section special_sections[] = {
},
{
.name = ".altinstructions",
.arch = X86_64 | S390,
.arch = AARCH64 | X86_64 | S390,
.group_size = altinstructions_group_size,
},
{
Expand Down Expand Up @@ -3847,6 +3943,38 @@ static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf)
}

switch(kelf->arch) {
case AARCH64: {
unsigned char *insn = sym->sec->data->d_buf;
int padding;
int i;

/*
* Skip the padding NOPs added by CALL_OPS.
*/
padding = function_padding_size(kelf, sym);
insn += padding;

/*
* If BTI (Branch Target Identification) is enabled then there
* might be an additional 'BTI C' instruction before the two
* patchable function entry 'NOP's.
* i.e. 0xd503245f (little endian)
*/
if (insn[0] == 0x5f) {
if (insn[1] != 0x24 || insn[2] != 0x03 || insn[3] != 0xd5)
ERROR("%s: unexpected instruction in patch section of function\n", sym->name);
if (!padding)
insn_offset += 4;
insn += 4;
}
for (i = 0; i < 8; i += 4) {
/* We expect a NOP i.e. 0xd503201f (little endian) */
if (insn[i] != 0x1f || insn[i + 1] != 0x20 ||
insn[i + 2] != 0x03 || insn [i + 3] != 0xd5)
ERROR("%s: unexpected instruction in patch section of function\n", sym->name);
}
break;
}
case PPC64: {
unsigned char *insn;

Expand Down Expand Up @@ -4180,6 +4308,10 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf)
insn[4] == 0x00 && insn[5] == 0x00)
sym->has_func_profiling = 1;
break;
case AARCH64:
if (kpatch_symbol_has_pfe_entry(kelf, sym))
sym->has_func_profiling = 1;
break;
default:
ERROR("unsupported arch");
}
Expand Down
5 changes: 4 additions & 1 deletion kpatch-build/kpatch-build
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
# - Builds the patched kernel/module and monitors changed objects
# - Builds the patched objects with gcc flags -f[function|data]-sections
# - Runs kpatch tools to create and link the patch kernel module
VERSION=0.9.9
VERSION=0.9.10

set -o pipefail

Expand Down Expand Up @@ -402,6 +402,9 @@ find_special_section_data() {
"s390x")
check[a]=true # alt_instr
;;
"aarch64")
check[a]=true # alt_instr
;;
esac

# Kernel CONFIG_ features
Expand Down
Loading