From 299182e92256d243a42d0c968e24a7fa4e10a60f Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Fri, 22 Nov 2024 16:50:07 -0500 Subject: [PATCH] WIP --- kpatch-build/create-diff-object.c | 51 ++++++++++--------------------- kpatch-build/kpatch-elf.c | 18 ++++++----- kpatch-build/kpatch-elf.h | 2 +- 3 files changed, 27 insertions(+), 44 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 3bef3628..bcba5911 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3682,9 +3682,9 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } /* - * Associate __patchable_function_entries relas with their particular - * __patchable_function_entries section (as there may be multiple pfe - * sections. + * Create links between text sections and their corresponding + * __patchable_function_entries sections (as there may be multiple pfe + * sections). */ static void kpatch_set_pfe_link(struct kpatch_elf *kelf) { @@ -3695,16 +3695,14 @@ static void kpatch_set_pfe_link(struct kpatch_elf *kelf) return; list_for_each_entry(sec, &kelf->sections, list) { - if (strcmp(sec->name, "__patchable_function_entries")) { + if (strcmp(sec->name, "__patchable_function_entries")) continue; - } - if (!sec->rela) { + if (!sec->rela) continue; - } - list_for_each_entry(rela, &sec->rela->relas, list) { + + list_for_each_entry(rela, &sec->rela->relas, list) rela->sym->sec->pfe = sec; - } } } @@ -3809,13 +3807,11 @@ static void kpatch_create_pfe_sections(struct kpatch_elf *kelf) * Create a .rela__patchable_function_entries entry which also points to it. */ ALLOC_LINK(pfe_rela, &relasec->relas); - struct symbol *section_sym; /* __patchable_function_entries relocates off the section symbol */ - section_sym = find_symbol_by_name(&kelf->symbols, sym->sec->name); - pfe_rela->sym = section_sym; + pfe_rela->sym = sym->sec->sym; pfe_rela->type = absolute_rela_type(kelf); - pfe_rela->addend = insn_offset - section_sym->sym.st_value; + pfe_rela->addend = insn_offset - sym->sec->sym->sym.st_value; pfe_rela->offset = 0; index++; @@ -3954,13 +3950,7 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf, bool has_pfe) { - /* - * If the patched ELF file has patchable_function_entries, - * create those sections in the output ELF -- unless it's - * x86_64, where their presence is only a side effect of - * the build. - */ - if (has_pfe && kelf->arch != X86_64) + if (has_pfe) kpatch_create_pfe_sections(kelf); else kpatch_create_mcount_sections(kelf); @@ -4159,10 +4149,6 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) } break; case X86_64: - /* - * x86_64 still uses __fentry__, cannot rely on - * pfe to indicate ftrace call site - */ if (sym->sec->rela) { rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); @@ -4176,17 +4162,12 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) } break; case S390: - if (kpatch_symbol_has_pfe_entry(kelf, sym)) { - ERROR("unsupported arch"); - } else if (sym->sec->rela) { - - /* Check for compiler generated fentry nop - jgnop 0 */ - insn = sym->sec->data->d_buf; - if (insn[0] == 0xc0 && insn[1] == 0x04 && - insn[2] == 0x00 && insn[3] == 0x00 && - insn[4] == 0x00 && insn[5] == 0x00) - sym->has_func_profiling = 1; - } + /* Check for compiler generated fentry nop - jgnop 0 */ + insn = sym->sec->data->d_buf; + if (insn[0] == 0xc0 && insn[1] == 0x04 && + insn[2] == 0x00 && insn[3] == 0x00 && + insn[4] == 0x00 && insn[5] == 0x00) + sym->has_func_profiling = 1; break; default: ERROR("unsupported arch"); diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 3aa28241..27c0b52f 100755 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -608,8 +608,15 @@ struct kpatch_elf *kpatch_elf_open(const char *name) kpatch_create_rela_list(kelf, relasec); } - if (find_section_by_name(&kelf->sections, "__patchable_function_entries")) - kelf->has_pfe = true; + /* + * x86_64's pfe sections are only a side effect + * CONFIG_CALL_PADDING building with * -fpatchable-function-entry=16,16, + * These sections aren't used by ftrace on this arch, set do not + * bother reading/writing them for x86_64. + */ + if (kelf->arch != X86_64) + if (find_section_by_name(&kelf->sections, "__patchable_function_entries")) + kelf->has_pfe = true; return kelf; } @@ -986,13 +993,8 @@ void kpatch_reindex_elements(struct kpatch_elf *kelf) sym->index = index++; if (sym->sec) { sym->sym.st_shndx = (unsigned short)sym->sec->index; - if (sym->sec->pfe) { - sym->sec->pfe->sh.sh_link = sym->sec->index; - if (sym->sec->pfe->rela) - sym->sec->pfe->rela->sh.sh_info = sym->sec->index; - } } else if (sym->sym.st_shndx != SHN_ABS && - sym->sym.st_shndx != SHN_LIVEPATCH) { + sym->sym.st_shndx != SHN_LIVEPATCH) { sym->sym.st_shndx = SHN_UNDEF; } } diff --git a/kpatch-build/kpatch-elf.h b/kpatch-build/kpatch-elf.h index a05f1648..052ca855 100644 --- a/kpatch-build/kpatch-elf.h +++ b/kpatch-build/kpatch-elf.h @@ -65,7 +65,7 @@ struct section { struct symbol *secsym, *sym; }; }; - struct section *pfe; /* per-function __patchable_function_entries */ + struct section *pfe; }; enum symbol_strip {