Skip to content

Commit

Permalink
ia64: drop marked broken DISCONTIGMEM and VIRTUAL_MEM_MAP
Browse files Browse the repository at this point in the history
DISCONTIGMEM was marked BROKEN in 5.11. Let's remove it.

Booted SPARSEMEM successfully on rx3600.

Link: https://lkml.kernel.org/r/20210404193440.2615358-1-slyfox@gentoo.org
Signed-off-by: Sergei Trofimovich <slyfox@gentoo.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Sergei Trofimovich authored and torvalds committed Apr 30, 2021
1 parent 5f28bde commit 9187592
Show file tree
Hide file tree
Showing 13 changed files with 4 additions and 331 deletions.
23 changes: 0 additions & 23 deletions arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -286,15 +286,6 @@ config FORCE_CPEI_RETARGET
config ARCH_SELECT_MEMORY_MODEL
def_bool y

config ARCH_DISCONTIGMEM_ENABLE
def_bool y
depends on BROKEN
help
Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa.rst> for more.

config ARCH_FLATMEM_ENABLE
def_bool y

Expand Down Expand Up @@ -325,22 +316,8 @@ config NODES_SHIFT
MAX_NUMNODES will be 2^(This value).
If in doubt, use the default.

# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
# VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP
bool "Virtual mem map"
depends on !SPARSEMEM && !FLATMEM
default y
help
Say Y to compile the kernel with support for a virtual mem map.
This code also only takes effect if a memory hole of greater than
1 Gb is found during boot. You must turn this option on if you
require the DISCONTIGMEM option for your machine. If you are
unsure, say Y.

config HOLES_IN_ZONE
bool
default y if VIRTUAL_MEM_MAP

config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
Expand Down
1 change: 0 additions & 1 deletion arch/ia64/configs/bigsur_defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ CONFIG_SGI_PARTITION=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
# CONFIG_VIRTUAL_MEM_MAP is not set
CONFIG_IA64_PALINFO=y
CONFIG_EFI_VARS=y
CONFIG_BINFMT_MISC=m
Expand Down
11 changes: 0 additions & 11 deletions arch/ia64/include/asm/meminit.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,4 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end);

extern int register_active_ranges(u64 start, u64 len, int nid);

#ifdef CONFIG_VIRTUAL_MEM_MAP
extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else
static inline int vmemmap_find_next_valid_pfn(int node, int i)
{
return i + 1;
}
#endif
#endif /* meminit_h */
25 changes: 2 additions & 23 deletions arch/ia64/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,31 +95,10 @@ do { \

#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)

#ifdef CONFIG_VIRTUAL_MEM_MAP
extern int ia64_pfn_valid (unsigned long pfn);
#else
# define ia64_pfn_valid(pfn) 1
#endif

#ifdef CONFIG_VIRTUAL_MEM_MAP
extern struct page *vmem_map;
#ifdef CONFIG_DISCONTIGMEM
# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
# define pfn_to_page(pfn) (vmem_map + (pfn))
# define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#else
# include <asm-generic/memory_model.h>
#endif
#else
# include <asm-generic/memory_model.h>
#endif
#include <asm-generic/memory_model.h>

#ifdef CONFIG_FLATMEM
# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
#elif defined(CONFIG_DISCONTIGMEM)
extern unsigned long min_low_pfn;
extern unsigned long max_low_pfn;
# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
# define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif

#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
Expand Down
5 changes: 0 additions & 5 deletions arch/ia64/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -223,18 +223,13 @@ ia64_phys_addr_valid (unsigned long addr)


#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
extern unsigned long VMALLOC_END;
#else
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
# define vmemmap ((struct page *)VMALLOC_END)
#else
# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
#endif
#endif

/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ endif

extra-y := head.o vmlinux.lds

obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \
Expand Down
12 changes: 0 additions & 12 deletions arch/ia64/kernel/ia64_ksyms.c

This file was deleted.

2 changes: 1 addition & 1 deletion arch/ia64/kernel/machine_kexec.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ void machine_kexec(struct kimage *image)

void arch_crash_save_vmcoreinfo(void)
{
#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM)
#if defined(CONFIG_SPARSEMEM)
VMCOREINFO_SYMBOL(pgdat_list);
VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES);
#endif
Expand Down
1 change: 0 additions & 1 deletion arch/ia64/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,5 @@ obj-y := init.o fault.o tlb.o extable.o ioremap.o

obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_SPARSEMEM) += discontig.o
obj-$(CONFIG_FLATMEM) += contig.o
4 changes: 0 additions & 4 deletions arch/ia64/mm/contig.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,11 +153,7 @@ find_memory (void)
efi_memmap_walk(find_max_min_low_pfn, NULL);
max_pfn = max_low_pfn;

#ifdef CONFIG_VIRTUAL_MEM_MAP
efi_memmap_walk(filter_memory, register_active_ranges);
#else
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
#endif

find_initrd();

Expand Down
21 changes: 0 additions & 21 deletions arch/ia64/mm/discontig.c
Original file line number Diff line number Diff line change
Expand Up @@ -585,25 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
}
}

static void __init virtual_map_init(void)
{
#ifdef CONFIG_VIRTUAL_MEM_MAP
int node;

VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);

for_each_online_node(node) {
unsigned long pfn_offset = mem_data[node].min_pfn;

NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
}
#endif
}

/**
* paging_init - setup page tables
*
Expand All @@ -619,8 +600,6 @@ void __init paging_init(void)

sparse_init();

virtual_map_init();

memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA32] = max_dma;
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
Expand Down
15 changes: 0 additions & 15 deletions arch/ia64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,18 +84,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (faulthandler_disabled() || !mm)
goto no_context;

#ifdef CONFIG_VIRTUAL_MEM_MAP
/*
* If fault is in region 5 and we are in the kernel, we may already
* have the mmap_lock (pfn_valid macro is called during mmap). There
* is no vma for region 5 addr's anyway, so skip getting the semaphore
* and go directly to the exception handling code.
*/

if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
goto bad_area_no_up;
#endif

/*
* This is to handle the kprobes on user space access instructions
*/
Expand Down Expand Up @@ -213,9 +201,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re

bad_area:
mmap_read_unlock(mm);
#ifdef CONFIG_VIRTUAL_MEM_MAP
bad_area_no_up:
#endif
if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
{
Expand Down
Loading

0 comments on commit 9187592

Please sign in to comment.