summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 19:25:15 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 19:25:15 +0300
commit59fc453b21f767f2fb0ff4dc0a947e9b9c9e6d14 (patch)
tree42029c432982ebabb462bd16a413a033125fd793
parent310c7585e8300ddc46211df0757c11e4299ec482 (diff)
parent2ebe82288b3278b8e538ee8adce4142dbdedd8f6 (diff)
downloadlinux-59fc453b21f767f2fb0ff4dc0a947e9b9c9e6d14.tar.xz
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - the rest of MM - lib/bitmap updates - hfs updates - fatfs updates - various other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (94 commits) mm/gup.c: fix __get_user_pages_fast() comment mm: Fix warning in insert_pfn() memory-hotplug.rst: add some details about locking internals powerpc/powernv: hold device_hotplug_lock when calling memtrace_offline_pages() powerpc/powernv: hold device_hotplug_lock when calling device_online() mm/memory_hotplug: fix online/offline_pages called w.o. mem_hotplug_lock mm/memory_hotplug: make add_memory() take the device_hotplug_lock mm/memory_hotplug: make remove_memory() take the device_hotplug_lock mm/memblock.c: warn if zero alignment was requested memblock: stop using implicit alignment to SMP_CACHE_BYTES docs/boot-time-mm: remove bootmem documentation mm: remove include/linux/bootmem.h memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants mm: remove nobootmem memblock: rename __free_pages_bootmem to memblock_free_pages memblock: rename free_all_bootmem to memblock_free_all memblock: replace free_bootmem_late with memblock_free_late memblock: replace free_bootmem{_node} with memblock_free mm: nobootmem: remove bootmem allocation APIs memblock: replace alloc_bootmem with memblock_alloc ...
-rw-r--r--.mailmap5
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst42
-rw-r--r--Documentation/core-api/boot-time-mm.rst69
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/processor.h6
-rw-r--r--arch/alpha/kernel/core_apecs.c3
-rw-r--r--arch/alpha/kernel/core_cia.c4
-rw-r--r--arch/alpha/kernel/core_irongate.c4
-rw-r--r--arch/alpha/kernel/core_lca.c3
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/core_mcpcia.c6
-rw-r--r--arch/alpha/kernel/core_t2.c2
-rw-r--r--arch/alpha/kernel/core_titan.c8
-rw-r--r--arch/alpha/kernel/core_tsunami.c8
-rw-r--r--arch/alpha/kernel/core_wildfire.c6
-rw-r--r--arch/alpha/kernel/pci-noop.c6
-rw-r--r--arch/alpha/kernel/pci.c6
-rw-r--r--arch/alpha/kernel/pci_iommu.c14
-rw-r--r--arch/alpha/kernel/setup.c3
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/mm/init.c4
-rw-r--r--arch/alpha/mm/numa.c1
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/processor.h8
-rw-r--r--arch/arc/kernel/unwind.c6
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arc/mm/init.c3
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/include/asm/processor.h6
-rw-r--r--arch/arm/kernel/devtree.c1
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm/mm/init.c3
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/xen/mm.c1
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/processor.h7
-rw-r--r--arch/arm64/kernel/acpi.c1
-rw-r--r--arch/arm64/kernel/acpi_numa.c1
-rw-r--r--arch/arm64/kernel/setup.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/init.c5
-rw-r--r--arch/arm64/mm/kasan_init.c3
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/mm/numa.c5
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/processor.h11
-rw-r--r--arch/c6x/kernel/setup.c1
-rw-r--r--arch/c6x/mm/dma-coherent.c4
-rw-r--r--arch/c6x/mm/init.c7
-rw-r--r--arch/csky/Kconfig2
-rw-r--r--arch/csky/include/asm/processor.h6
-rw-r--r--arch/csky/kernel/setup.c1
-rw-r--r--arch/csky/mm/highmem.c4
-rw-r--r--arch/csky/mm/init.c3
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/h8300/include/asm/processor.h6
-rw-r--r--arch/h8300/kernel/setup.c1
-rw-r--r--arch/h8300/mm/init.c6
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/hexagon/include/asm/processor.h3
-rw-r--r--arch/hexagon/kernel/dma.c2
-rw-r--r--arch/hexagon/kernel/setup.c2
-rw-r--r--arch/hexagon/mm/init.c3
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/processor.h6
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/mca.c10
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/signal.c4
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/ia64/mm/contig.c6
-rw-r--r--arch/ia64/mm/discontig.c7
-rw-r--r--arch/ia64/mm/init.c11
-rw-r--r--arch/ia64/mm/numa.c2
-rw-r--r--arch/ia64/mm/tlb.c8
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/ia64/sn/kernel/bte.c2
-rw-r--r--arch/ia64/sn/kernel/io_common.c11
-rw-r--r--arch/ia64/sn/kernel/setup.c7
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/atari/stram.c5
-rw-r--r--arch/m68k/coldfire/m54xx.c2
-rw-r--r--arch/m68k/include/asm/processor.h6
-rw-r--r--arch/m68k/kernel/setup_mm.c1
-rw-r--r--arch/m68k/kernel/setup_no.c1
-rw-r--r--arch/m68k/kernel/uboot.c2
-rw-r--r--arch/m68k/mm/init.c6
-rw-r--r--arch/m68k/mm/mcfmmu.c5
-rw-r--r--arch/m68k/mm/motorola.c8
-rw-r--r--arch/m68k/mm/sun3mmu.c6
-rw-r--r--arch/m68k/sun3/config.c2
-rw-r--r--arch/m68k/sun3/dvma.c2
-rw-r--r--arch/m68k/sun3/mmu_emu.c2
-rw-r--r--arch/m68k/sun3/sun3dvma.c5
-rw-r--r--arch/m68k/sun3x/dvma.c2
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/processor.h12
-rw-r--r--arch/microblaze/mm/consistent.c2
-rw-r--r--arch/microblaze/mm/init.c7
-rw-r--r--arch/microblaze/pci/pci-common.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/ar7/memory.c2
-rw-r--r--arch/mips/ath79/setup.c2
-rw-r--r--arch/mips/bcm63xx/prom.c2
-rw-r--r--arch/mips/bcm63xx/setup.c2
-rw-r--r--arch/mips/bmips/setup.c2
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c4
-rw-r--r--arch/mips/dec/prom/memory.c2
-rw-r--r--arch/mips/emma/common/prom.c2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/include/asm/processor.h5
-rw-r--r--arch/mips/jazz/jazzdma.c2
-rw-r--r--arch/mips/kernel/crash.c2
-rw-r--r--arch/mips/kernel/crash_dump.c2
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/setup.c7
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/kvm/commpage.c2
-rw-r--r--arch/mips/kvm/dyntrans.c2
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/interrupt.c2
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/lantiq/prom.c2
-rw-r--r--arch/mips/lasat/prom.c2
-rw-r--r--arch/mips/loongson64/common/init.c2
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c3
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/pgtable-32.c2
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/netlogic/xlp/dt.c2
-rw-r--r--arch/mips/pci/pci-legacy.c2
-rw-r--r--arch/mips/pci/pci.c2
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/rb532/prom.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/mips/sibyte/common/cfe.c2
-rw-r--r--arch/mips/sibyte/swarm/setup.c2
-rw-r--r--arch/mips/txx9/rbtx4938/prom.c2
-rw-r--r--arch/nds32/Kconfig2
-rw-r--r--arch/nds32/include/asm/processor.h6
-rw-r--r--arch/nds32/kernel/setup.c3
-rw-r--r--arch/nds32/mm/highmem.c2
-rw-r--r--arch/nds32/mm/init.c13
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/nios2/include/asm/processor.h6
-rw-r--r--arch/nios2/kernel/prom.c2
-rw-r--r--arch/nios2/kernel/setup.c1
-rw-r--r--arch/nios2/mm/init.c4
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/openrisc/include/asm/processor.h5
-rw-r--r--arch/openrisc/kernel/setup.c3
-rw-r--r--arch/openrisc/mm/init.c7
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/processor.h11
-rw-r--r--arch/parisc/mm/init.c3
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/processor.h6
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c5
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_32.c10
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/lib/alloc.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c9
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c4
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c9
-rw-r--r--arch/powerpc/platforms/ps3/setup.c4
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c8
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c4
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/processor.h6
-rw-r--r--arch/riscv/mm/init.c5
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/kernel/crash_dump.c5
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/vmem.c7
-rw-r--r--arch/s390/numa/mode_emu.c3
-rw-r--r--arch/s390/numa/numa.c3
-rw-r--r--arch/s390/numa/toptree.c4
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/processor_32.h6
-rw-r--r--arch/sh/include/asm/processor_64.h15
-rw-r--r--arch/sh/mm/init.c9
-rw-r--r--arch/sh/mm/ioremap_fixed.c2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/processor_64.h6
-rw-r--r--arch/sparc/kernel/mdesc.c7
-rw-r--r--arch/sparc/kernel/prom_32.c4
-rw-r--r--arch/sparc/kernel/prom_64.c2
-rw-r--r--arch/sparc/kernel/setup_64.c12
-rw-r--r--arch/sparc/kernel/smp_64.c18
-rw-r--r--arch/sparc/mm/init_32.c5
-rw-r--r--arch/sparc/mm/init_64.c27
-rw-r--r--arch/sparc/mm/srmmu.c12
-rw-r--r--arch/um/Kconfig2
-rw-r--r--arch/um/drivers/net_kern.c4
-rw-r--r--arch/um/drivers/vector_kern.c4
-rw-r--r--arch/um/kernel/initrd.c4
-rw-r--r--arch/um/kernel/mem.c16
-rw-r--r--arch/um/kernel/physmem.c1
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/include/asm/processor.h6
-rw-r--r--arch/unicore32/kernel/hibernate.c2
-rw-r--r--arch/unicore32/kernel/setup.c5
-rw-r--r--arch/unicore32/mm/init.c7
-rw-r--r--arch/unicore32/mm/mmu.c3
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/kexec.h3
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/e820.c6
-rw-r--r--arch/x86/kernel/mpparse.c1
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/pvclock.c2
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/setup_percpu.c14
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tce_64.c6
-rw-r--r--arch/x86/mm/amdtopology.c1
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/highmem_32.c4
-rw-r--r--arch/x86/mm/init.c1
-rw-r--r--arch/x86/mm/init_32.c5
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c11
-rw-r--r--arch/x86/mm/kaslr.c1
-rw-r--r--arch/x86/mm/numa.c3
-rw-r--r--arch/x86/mm/numa_32.c1
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/numa_emulation.c1
-rw-r--r--arch/x86/mm/pageattr-test.c2
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pat.c2
-rw-r--r--arch/x86/mm/physaddr.c2
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/platform/efi/efi.c3
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/platform/efi/quirks.c7
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c4
-rw-r--r--arch/x86/power/hibernate_32.c2
-rw-r--r--arch/x86/um/asm/processor_32.h8
-rw-r--r--arch/x86/um/asm/processor_64.h3
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/enlighten_pv.c3
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/x86/xen/p2m.c6
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/include/asm/processor.h8
-rw-r--r--arch/xtensa/kernel/pci.c2
-rw-r--r--arch/xtensa/mm/cache.c2
-rw-r--r--arch/xtensa/mm/init.c4
-rw-r--r--arch/xtensa/mm/kasan_init.c5
-rw-r--r--arch/xtensa/mm/mmu.c4
-rw-r--r--arch/xtensa/platforms/iss/network.c4
-rw-r--r--arch/xtensa/platforms/iss/setup.c2
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/bounce.c2
-rw-r--r--drivers/acpi/acpi_memhotplug.c4
-rw-r--r--drivers/acpi/numa.c1
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/base/memory.c22
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/clk/ti/clk.c4
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/apple-properties.c4
-rw-r--r--drivers/firmware/efi/memmap.c2
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/firmware/memmap.c5
-rw-r--r--drivers/iommu/mtk_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/macintosh/smu.c7
-rw-r--r--drivers/mtd/ar7part.c2
-rw-r--r--drivers/net/arcnet/arc-rimi.c2
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/of/fdt.c24
-rw-r--r--drivers/of/of_reserved_mem.c14
-rw-r--r--drivers/of/unittest.c4
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c2
-rw-r--r--drivers/sfi/sfi_core.c2
-rw-r--r--drivers/staging/android/ion/Kconfig2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c2
-rw-r--r--drivers/usb/early/xhci-dbc.c14
-rw-r--r--drivers/xen/balloon.c5
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--drivers/xen/xen-selfballoon.c2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/fat/dir.c6
-rw-r--r--fs/fat/fat.h4
-rw-r--r--fs/fat/file.c17
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fat/misc.c91
-rw-r--r--fs/fat/namei_msdos.c17
-rw-r--r--fs/fat/namei_vfat.c15
-rw-r--r--fs/hfs/brec.c5
-rw-r--r--fs/hfs/btree.c41
-rw-r--r--fs/hfs/btree.h1
-rw-r--r--fs/hfs/catalog.c16
-rw-r--r--fs/hfs/extent.c10
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/attributes.c10
-rw-r--r--fs/hfsplus/brec.c5
-rw-r--r--fs/hfsplus/btree.c44
-rw-r--r--fs/hfsplus/catalog.c24
-rw-r--r--fs/hfsplus/extents.c8
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c1
-rw-r--r--fs/inode.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/page.c2
-rw-r--r--fs/proc/vmcore.c4
-rw-r--r--fs/pstore/Kconfig1
-rw-r--r--fs/reiserfs/Makefile9
-rw-r--r--fs/reiserfs/xattr.c7
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/linux/bitmap.h37
-rw-r--r--include/linux/bootmem.h404
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/hmm.h33
-rw-r--r--include/linux/memblock.h165
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/rbtree_augmented.h4
-rw-r--r--include/linux/signal.h6
-rw-r--r--init/do_mounts.c31
-rw-r--r--init/main.c15
-rw-r--r--ipc/ipc_sysctl.c30
-rw-r--r--ipc/util.h9
-rw-r--r--kernel/bounds.c4
-rw-r--r--kernel/dma/direct.c2
-rw-r--r--kernel/dma/swiotlb.c8
-rw-r--r--kernel/fail_function.c3
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/locking/qspinlock_paravirt.h2
-rw-r--r--kernel/panic.c10
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/printk/printk.c5
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/signal.c2
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/bitmap.c22
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/kstrtox.c16
-rw-r--r--lib/lz4/lz4_decompress.c481
-rw-r--r--lib/lz4/lz4defs.h9
-rw-r--r--lib/parser.c16
-rw-r--r--lib/sg_pool.c7
-rw-r--r--lib/zlib_inflate/inflate.c12
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/Makefile8
-rw-r--r--mm/bootmem.c811
-rw-r--r--mm/gup.c4
-rw-r--r--mm/gup_benchmark.c3
-rw-r--r--mm/hmm.c134
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kasan/kasan_init.c7
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memblock.c164
-rw-r--r--mm/memory.c9
-rw-r--r--mm/memory_hotplug.c60
-rw-r--r--mm/nobootmem.c445
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/page_ext.c6
-rw-r--r--mm/page_idle.c2
-rw-r--r--mm/page_owner.c2
-rw-r--r--mm/page_poison.c2
-rw-r--r--mm/page_vma_mapped.c24
-rw-r--r--mm/percpu.c48
-rw-r--r--mm/sparse-vmemmap.c6
-rw-r--r--mm/sparse.c19
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/xfrm/xfrm_hash.c2
-rwxr-xr-xscripts/checkpatch.pl11
423 files changed, 1918 insertions, 3094 deletions
diff --git a/.mailmap b/.mailmap
index 89f532caf639..a76be45fef6c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -160,6 +160,11 @@ Peter Oruba <peter.oruba@amd.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
+Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
+Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
+Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
+Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
+Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index 25157aec5b31..5c4432c96c4b 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -5,7 +5,7 @@ Memory Hotplug
==============
:Created: Jul 28 2007
-:Updated: Add description of notifier of memory hotplug: Oct 11 2007
+:Updated: Add some details about locking internals: Aug 20 2018
This document is about memory hotplug including how-to-use and current status.
Because Memory Hotplug is still under development, contents of this text will
@@ -392,6 +392,46 @@ Need more implementation yet....
- Notification completion of remove works by OS to firmware.
- Guard from remove if not yet.
+
+Locking Internals
+=================
+
+When adding/removing memory that uses memory block devices (i.e. ordinary RAM),
+the device_hotplug_lock should be held to:
+
+- synchronize against online/offline requests (e.g. via sysfs). This way, memory
+ block devices can only be accessed (.online/.state attributes) by user
+ space once memory has been fully added. And when removing memory, we
+ know nobody is in critical sections.
+- synchronize against CPU hotplug and similar (e.g. relevant for ACPI and PPC)
+
+Especially, there is a possible lock inversion that is avoided using
+device_hotplug_lock when adding memory and user space tries to online that
+memory faster than expected:
+
+- device_online() will first take the device_lock(), followed by
+ mem_hotplug_lock
+- add_memory_resource() will first take the mem_hotplug_lock, followed by
+ the device_lock() (while creating the devices, during bus_add_device()).
+
+As the device is visible to user space before taking the device_lock(), this
+can result in a lock inversion.
+
+onlining/offlining of memory should be done via device_online()/
+device_offline() - to make sure it is properly synchronized to actions
+via sysfs. Holding device_hotplug_lock is advised (to e.g. protect online_type)
+
+When adding/removing/onlining/offlining memory or adding/removing
+heterogeneous/device memory, we should always hold the mem_hotplug_lock in
+write mode to serialise memory hotplug (e.g. access to global/zone
+variables).
+
+In addition, mem_hotplug_lock (in contrast to device_hotplug_lock) in read
+mode allows for a quite efficient get_online_mems/put_online_mems
+implementation, so code accessing memory can protect from that memory
+vanishing.
+
+
Future Work
===========
diff --git a/Documentation/core-api/boot-time-mm.rst b/Documentation/core-api/boot-time-mm.rst
index 6e12e89a03e0..e5ec9f1a563d 100644
--- a/Documentation/core-api/boot-time-mm.rst
+++ b/Documentation/core-api/boot-time-mm.rst
@@ -5,54 +5,23 @@ Boot time memory management
Early system initialization cannot use "normal" memory management
simply because it is not set up yet. But there is still need to
allocate memory for various data structures, for instance for the
-physical page allocator. To address this, a specialized allocator
-called the :ref:`Boot Memory Allocator <bootmem>`, or bootmem, was
-introduced. Several years later PowerPC developers added a "Logical
-Memory Blocks" allocator, which was later adopted by other
-architectures and renamed to :ref:`memblock <memblock>`. There is also
-a compatibility layer called `nobootmem` that translates bootmem
-allocation interfaces to memblock calls.
+physical page allocator.
-The selection of the early allocator is done using
-``CONFIG_NO_BOOTMEM`` and ``CONFIG_HAVE_MEMBLOCK`` kernel
-configuration options. These options are enabled or disabled
-statically by the architectures' Kconfig files.
-
-* Architectures that rely only on bootmem select
- ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=n``.
-* The users of memblock with the nobootmem compatibility layer set
- ``CONFIG_NO_BOOTMEM=y && CONFIG_HAVE_MEMBLOCK=y``.
-* And for those that use both memblock and bootmem the configuration
- includes ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=y``.
-
-Whichever allocator is used, it is the responsibility of the
-architecture specific initialization to set it up in
-:c:func:`setup_arch` and tear it down in :c:func:`mem_init` functions.
+A specialized allocator called ``memblock`` performs the
+boot time memory management. The architecture specific initialization
+must set it up in :c:func:`setup_arch` and tear it down in
+:c:func:`mem_init` functions.
Once the early memory management is available it offers a variety of
functions and macros for memory allocations. The allocation request
may be directed to the first (and probably the only) node or to a
particular node in a NUMA system. There are API variants that panic
-when an allocation fails and those that don't. And more recent and
-advanced memblock even allows controlling its own behaviour.
-
-.. _bootmem:
-
-Bootmem
-=======
+when an allocation fails and those that don't.
-(mostly stolen from Mel Gorman's "Understanding the Linux Virtual
-Memory Manager" `book`_)
+Memblock also offers a variety of APIs that control its own behaviour.
-.. _book: https://www.kernel.org/doc/gorman/
-
-.. kernel-doc:: mm/bootmem.c
- :doc: bootmem overview
-
-.. _memblock:
-
-Memblock
-========
+Memblock Overview
+=================
.. kernel-doc:: mm/memblock.c
:doc: memblock overview
@@ -61,26 +30,6 @@ Memblock
Functions and structures
========================
-Common API
-----------
-
-The functions that are described in this section are available
-regardless of what early memory manager is enabled.
-
-.. kernel-doc:: mm/nobootmem.c
-
-Bootmem specific API
---------------------
-
-These interfaces available only with bootmem, i.e when ``CONFIG_NO_BOOTMEM=n``
-
-.. kernel-doc:: include/linux/bootmem.h
-.. kernel-doc:: mm/bootmem.c
- :functions:
-
-Memblock specific API
----------------------
-
Here is the description of memblock data structures, functions and
macros. Some of them are actually internal, but since they are
documented it would be silly to omit them. Besides, reading the
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 620b0a711ee4..5b4f88363453 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -31,8 +31,6 @@ config ALPHA
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index cb05d045efe3..6100431da07a 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -11,12 +11,6 @@
#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
/*
- * Returns current instruction pointer ("program counter").
- */
-#define current_text_addr() \
- ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
-
-/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c
index 1bf3eef34c22..6df765ff2b10 100644
--- a/arch/alpha/kernel/core_apecs.c
+++ b/arch/alpha/kernel/core_apecs.c
@@ -346,7 +346,8 @@ apecs_init_arch(void)
* Window 1 is direct access 1GB at 1GB
* Window 2 is scatter-gather 8MB at 8MB (for isa)
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_pci = NULL;
__direct_map_base = 0x40000000;
__direct_map_size = 0x40000000;
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index 4b38386f6e62..867e8730b0c5 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -21,7 +21,7 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/mce.h>
@@ -331,7 +331,7 @@ cia_prepare_tbia_workaround(int window)
long i;
/* Use minimal 1K map. */
- ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
+ ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
index f70986683fc6..a9fd133a7fb2 100644
--- a/arch/alpha/kernel/core_irongate.c
+++ b/arch/alpha/kernel/core_irongate.c
@@ -20,7 +20,6 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/ptrace.h>
@@ -234,8 +233,7 @@ albacore_init_arch(void)
unsigned long size;
size = initrd_end - initrd_start;
- free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
- PAGE_ALIGN(size));
+ memblock_free(__pa(initrd_start), PAGE_ALIGN(size));
if (!move_initrd(pci_mem))
printk("irongate_init_arch: initrd too big "
"(%ldK)\ndisabling initrd\n",
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c
index 81c0c43635b0..57e0750419f2 100644
--- a/arch/alpha/kernel/core_lca.c
+++ b/arch/alpha/kernel/core_lca.c
@@ -275,7 +275,8 @@ lca_init_arch(void)
* Note that we do not try to save any of the DMA window CSRs
* before setting them, since we cannot read those CSRs on LCA.
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_pci = NULL;
__direct_map_base = 0x40000000;
__direct_map_size = 0x40000000;
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index bdebb8c206f1..c1d0c18c71ca 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -18,7 +18,7 @@
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
@@ -82,7 +82,7 @@ mk_resource_name(int pe, int port, char *str)
char *name;
sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
- name = alloc_bootmem(strlen(tmp) + 1);
+ name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES);
strcpy(name, tmp);
return name;
@@ -117,7 +117,7 @@ alloc_io7(unsigned int pe)
return NULL;
}
- io7 = alloc_bootmem(sizeof(*io7));
+ io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES);
io7->pe = pe;
raw_spin_lock_init(&io7->irq_lock);
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
index b1549db54260..74b1d018124c 100644
--- a/arch/alpha/kernel/core_mcpcia.c
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -364,9 +364,11 @@ mcpcia_startup_hose(struct pci_controller *hose)
* Window 1 is scatter-gather (up to) 1GB at 1GB (for pci)
* Window 2 is direct access 2GB at 2GB
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_pci = iommu_arena_new(hose, 0x40000000,
- size_for_memory(0x40000000), 0);
+ size_for_memory(0x40000000),
+ SMP_CACHE_BYTES);
__direct_map_base = 0x80000000;
__direct_map_size = 0x80000000;
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index 2c00b61ca379..98d5b6ff8a76 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -351,7 +351,7 @@ t2_sg_map_window2(struct pci_controller *hose,
/* Note we can only do 1 SG window, as the other is for direct, so
do an ISA SG area, especially for the floppy. */
- hose->sg_isa = iommu_arena_new(hose, base, length, 0);
+ hose->sg_isa = iommu_arena_new(hose, base, length, SMP_CACHE_BYTES);
hose->sg_pci = NULL;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
index 132b06bdf903..2a2820fb1be6 100644
--- a/arch/alpha/kernel/core_titan.c
+++ b/arch/alpha/kernel/core_titan.c
@@ -16,7 +16,7 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
@@ -316,10 +316,12 @@ titan_init_one_pachip_port(titan_pachip_port *port, int index)
* Window 1 is direct access 1GB at 2GB
* Window 2 is scatter-gather 1GB at 3GB
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_isa->align_entry = 8; /* 64KB for ISA */
- hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000,
+ SMP_CACHE_BYTES);
hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
port->wsba[0].csr = hose->sg_isa->dma_base | 3;
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
index e7c956ea46b6..fc1ab73f23de 100644
--- a/arch/alpha/kernel/core_tsunami.c
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -17,7 +17,7 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
@@ -319,12 +319,14 @@ tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
* NOTE: we need the align_entry settings for Acer devices on ES40,
* specifically floppy and IDE when memory is larger than 2GB.
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
/* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */
hose->sg_isa->align_entry = 4;
hose->sg_pci = iommu_arena_new(hose, 0x40000000,
- size_for_memory(0x40000000), 0);
+ size_for_memory(0x40000000),
+ SMP_CACHE_BYTES);
hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */
__direct_map_base = 0x80000000;
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
index cad36fc6ed7d..353c03d15442 100644
--- a/arch/alpha/kernel/core_wildfire.c
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -111,8 +111,10 @@ wildfire_init_hose(int qbbno, int hoseno)
* ??? We ought to scale window 3 memory.
*
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
- hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000,
+ SMP_CACHE_BYTES);
pci = WILDFIRE_pci(qbbno, hoseno);
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index c7c5879869d3..091cff3c68fd 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -7,7 +7,7 @@
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/capability.h>
#include <linux/mm.h>
@@ -33,7 +33,7 @@ alloc_pci_controller(void)
{
struct pci_controller *hose;
- hose = alloc_bootmem(sizeof(*hose));
+ hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
*hose_tail = hose;
hose_tail = &hose->next;
@@ -44,7 +44,7 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return alloc_bootmem(sizeof(struct resource));
+ return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
}
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index c668c3b7a167..97098127df83 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/slab.h>
@@ -392,7 +392,7 @@ alloc_pci_controller(void)
{
struct pci_controller *hose;
- hose = alloc_bootmem(sizeof(*hose));
+ hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
*hose_tail = hose;
hose_tail = &hose->next;
@@ -403,7 +403,7 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return alloc_bootmem(sizeof(struct resource));
+ return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 6923b0d9c1e1..46e08e0d9181 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -7,7 +7,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/gfp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
@@ -74,26 +74,26 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
#ifdef CONFIG_DISCONTIGMEM
- arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
+ arena = memblock_alloc_node(sizeof(*arena), align, nid);
if (!NODE_DATA(nid) || !arena) {
printk("%s: couldn't allocate arena from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
- arena = alloc_bootmem(sizeof(*arena));
+ arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
}
- arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
+ arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
if (!NODE_DATA(nid) || !arena->ptes) {
printk("%s: couldn't allocate arena ptes from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
- arena->ptes = __alloc_bootmem(mem_size, align, 0);
+ arena->ptes = memblock_alloc_from(mem_size, align, 0);
}
#else /* CONFIG_DISCONTIGMEM */
- arena = alloc_bootmem(sizeof(*arena));
- arena->ptes = __alloc_bootmem(mem_size, align, 0);
+ arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
+ arena->ptes = memblock_alloc_from(mem_size, align, 0);
#endif /* CONFIG_DISCONTIGMEM */
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 4f0d94471bc9..a37fd990bd55 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -29,7 +29,6 @@
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
@@ -294,7 +293,7 @@ move_initrd(unsigned long mem_limit)
unsigned long size;
size = initrd_end - initrd_start;
- start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
+ start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
if (!start || __pa(start) + size > mem_limit) {
initrd_start = initrd_end = 0;
return NULL;
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index ff4f54b86c7f..cd9a112d67ff 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -32,7 +32,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/reboot.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 9d74520298ab..a42fc5c4db89 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -19,7 +19,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h> /* max_low_pfn */
+#include <linux/memblock.h> /* max_low_pfn */
#include <linux/vmalloc.h>
#include <linux/gfp.h>
@@ -282,7 +282,7 @@ mem_init(void)
{
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 26cd925d19b1..74846553e3f1 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/initrd.h>
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e98c6b8e6186..c9e2a1323536 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -37,14 +37,12 @@ config ARC
select HAVE_KERNEL_LZMA
select HAVE_KPROBES
select HAVE_KRETPROBES
- select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 8ee41e988169..10346d6cf926 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -98,14 +98,6 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc,
extern unsigned int get_wchan(struct task_struct *p);
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- * Should the PC register be read instead ? This macro does not seem to
- * be used in many places so this wont be all that bad.
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 183391d4d33a..d34f69eb1a95 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -15,7 +15,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
@@ -181,8 +181,8 @@ static void init_unwind_hdr(struct unwind_table *table,
*/
static void *__init unw_hdr_alloc_early(unsigned long sz)
{
- return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
- MAX_DMA_ADDRESS);
+ return memblock_alloc_from_nopanic(sz, sizeof(unsigned int),
+ MAX_DMA_ADDRESS);
}
static void *unw_hdr_alloc(unsigned long sz)
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 77ff64a874a1..48e700151810 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -7,7 +7,7 @@
*
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <asm/processor.h>
@@ -123,7 +123,7 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
pud_k = pud_offset(pgd_k, kvaddr);
pmd_k = pmd_offset(pud_k, kvaddr);
- pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd_k, pte_k);
return pte_k;
}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index ba145065c579..f8fe5668b30f 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
@@ -218,7 +217,7 @@ void __init mem_init(void)
free_highmem_page(pfn_to_page(tmp));
#endif
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b8c6062ca0c1..91be74d8df65 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -82,7 +82,6 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if (HAVE_KPROBES)
- select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
@@ -100,7 +99,6 @@ config ARM
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
- select NO_BOOTMEM
select OF_EARLY_FLATTREE if OF
select OF_RESERVED_MEM if OF
select OLD_SIGACTION
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 1bf65b47808a..120f4c9bbfde 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -11,12 +11,6 @@
#ifndef __ASM_ARM_PROCESSOR_H
#define __ASM_ARM_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <asm/hw_breakpoint.h>
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 13bcd3b867cb..e3057c1b55b9 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -12,7 +12,6 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 4c249cb261f3..ac7e08886863 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -16,7 +16,6 @@
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/of_platform.h>
@@ -857,7 +856,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
*/
boot_alias_start = phys_to_idmap(start);
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
res->end = phys_to_idmap(end);
@@ -865,7 +864,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
request_resource(&iomem_resource, res);
}
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM";
res->start = start;
res->end = end;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 56a1fe90d394..083dcd9942ce 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -141,7 +141,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/platform_data/ti-sysc.h>
@@ -726,7 +726,7 @@ static int __init _setup_clkctrl_provider(struct device_node *np)
u64 size;
int i;
- provider = memblock_virt_alloc(sizeof(*provider), 0);
+ provider = memblock_alloc(sizeof(*provider), SMP_CACHE_BYTES);
if (!provider)
return -ENOMEM;
@@ -736,12 +736,14 @@ static int __init _setup_clkctrl_provider(struct device_node *np)
of_property_count_elems_of_size(np, "reg", sizeof(u32)) / 2;
provider->addr =
- memblock_virt_alloc(sizeof(void *) * provider->num_addrs, 0);
+ memblock_alloc(sizeof(void *) * provider->num_addrs,
+ SMP_CACHE_BYTES);
if (!provider->addr)
return -ENOMEM;
provider->size =
- memblock_virt_alloc(sizeof(u32) * provider->num_addrs, 0);
+ memblock_alloc(sizeof(u32) * provider->num_addrs,
+ SMP_CACHE_BYTES);
if (!provider->size)
return -ENOMEM;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 66566472c153..661fe48ab78d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -9,7 +9,6 @@
*
* DMA uncached mapping support.
*/
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/genalloc.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0cc8e04295a4..32e4845af2b6 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -11,7 +11,6 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
@@ -508,7 +507,7 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
free_unused_memmap();
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e46a6a446cdd..f5cc1ccfea3d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -721,7 +721,7 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
{
- void *ptr = __va(memblock_alloc(sz, align));
+ void *ptr = __va(memblock_phys_alloc(sz, align));
memset(ptr, 0, sz);
return ptr;
}
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 785d2a562a23..cb44aa290e73 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -1,6 +1,5 @@
#include <linux/cpu.h>
#include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/export.h>
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 0641ba54ab62..e70a49fc8dcd 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -1,4 +1,4 @@
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/spinlock.h>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 964f682a2b7b..787d7850e064 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -139,7 +139,6 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
select HAVE_NMI
select HAVE_PATA_PLATFORM
@@ -161,7 +160,6 @@ config ARM64
select MULTI_IRQ_HANDLER
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 2bf6691371c2..3e2091708b8e 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -25,13 +25,6 @@
#define USER_DS (TASK_SIZE_64 - 1)
#ifndef __ASSEMBLY__
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <linux/build_bug.h>
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index ed46dc188b22..44e3c351e1ea 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -16,7 +16,6 @@
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index 4f4f1815e047..eac1d0cc595c 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -18,7 +18,6 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
-#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d0f62dd24c90..953e316521fc 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -26,7 +26,6 @@
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/cache.h>
-#include <linux/bootmem.h>
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
@@ -217,8 +216,9 @@ static void __init request_standard_resources(void)
kernel_data.end = __pa_symbol(_end - 1);
num_standard_resources = memblock.memory.cnt;
- standard_resources = alloc_bootmem_low(num_standard_resources *
- sizeof(*standard_resources));
+ standard_resources = memblock_alloc_low(num_standard_resources *
+ sizeof(*standard_resources),
+ SMP_CACHE_BYTES);
for_each_memblock(memory, region) {
res = &standard_resources[i++];
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d190612b8f33..3a703e5d4e32 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -19,7 +19,7 @@
#include <linux/gfp.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/cache.h>
#include <linux/export.h>
#include <linux/slab.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3cf87341859f..9d9582cac6c4 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -22,7 +22,6 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/cache.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
@@ -536,7 +535,7 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
* memmap array.
*/
if (pg < pgend)
- free_bootmem(pg, pgend - pg);
+ memblock_free(pg, pgend - pg);
}
/*
@@ -599,7 +598,7 @@ void __init mem_init(void)
free_unused_memmap();
#endif
/* this will put all unused low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
kexec_reserve_crashkres_pages();
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index fccb1a6f8c6f..63527e585aac 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -11,7 +11,6 @@
*/
#define pr_fmt(fmt) "kasan: " fmt
-#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/sched/task.h>
@@ -38,7 +37,7 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
- void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
+ void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, node);
return __pa(p);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 9498c15b847b..394b8d554def 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -101,7 +101,7 @@ static phys_addr_t __init early_pgtable_alloc(void)
phys_addr_t phys;
void *ptr;
- phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
/*
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index d7b66fc5e1c5..27a31efd9e8e 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -20,7 +20,6 @@
#define pr_fmt(fmt) "NUMA: " fmt
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -168,7 +167,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
{
int nid = early_cpu_to_node(cpu);
- return memblock_virt_alloc_try_nid(size, align,
+ return memblock_alloc_try_nid(size, align,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
@@ -237,7 +236,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
- nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
/* report and initialize */
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index f65a084607fd..84420109113d 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -13,7 +13,6 @@ config C6X
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
- select HAVE_MEMBLOCK
select SPARSE_IRQ
select IRQ_DOMAIN
select OF
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index 8f7cce829f8e..a8581f5b27f6 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -18,17 +18,6 @@
#include <asm/current.h>
/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() \
-({ \
- void *__pc; \
- asm("mvc .S2 pce1,%0\n" : "=b"(__pc)); \
- __pc; \
-})
-
-/*
* User space process size. This is mostly meaningless for NOMMU
* but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
* Since calls like mmap() can return an address or an error, we
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
index 05d96a9541b5..2e1c0ea22eb0 100644
--- a/arch/c6x/kernel/setup.c
+++ b/arch/c6x/kernel/setup.c
@@ -11,7 +11,6 @@
#include <linux/dma-mapping.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
#include <linux/clkdev.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index d0a8e0c4b27e..01305c787201 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -135,8 +135,8 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
if (dma_size & (PAGE_SIZE - 1))
++dma_pages;
- bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
- sizeof(long));
+ bitmap_phys = memblock_phys_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
+ sizeof(long));
dma_bitmap = phys_to_virt(bitmap_phys);
memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index 4cc72b0d1c1d..af5ada0520be 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -11,7 +11,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#ifdef CONFIG_BLK_DEV_RAM
#include <linux/blkdev.h>
#endif
@@ -38,7 +38,8 @@ void __init paging_init(void)
struct pglist_data *pgdat = NODE_DATA(0);
unsigned long zones_size[MAX_NR_ZONES] = {0, };
- empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
+ PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/*
@@ -61,7 +62,7 @@ void __init mem_init(void)
high_memory = (void *)(memory_end & PAGE_MASK);
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 0a0558567eaa..cb64f8dacd08 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -36,10 +36,8 @@ config CSKY
select HAVE_C_RECORDMCOUNT
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
- select HAVE_MEMBLOCK
select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 5ad4f0b83092..b1748659b2e9 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -4,12 +4,6 @@
#ifndef __ASM_CSKY_PROCESSOR_H
#define __ASM_CSKY_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#include <linux/bitops.h>
#include <asm/segment.h>
#include <asm/ptrace.h>
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index a5e3ab1d5360..dff8b89444ec 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -3,7 +3,6 @@
#include <linux/console.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index e168ac087ccb..53b1bfa4c462 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -4,7 +4,7 @@
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/smp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -140,7 +140,7 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte)));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index ce2711e050ad..dc07c078f9b8 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -14,7 +14,6 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/swap.h>
@@ -47,7 +46,7 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_HIGHMEM
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 0b334b671e90..d19c6b16cd5d 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -15,8 +15,6 @@ config H8300
select OF
select OF_IRQ
select OF_EARLY_FLATTREE
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select TIMER_OF
select H8300_TMR8
select HAVE_KERNEL_GZIP
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
index 985346393e4a..a060b41b2d31 100644
--- a/arch/h8300/include/asm/processor.h
+++ b/arch/h8300/include/asm/processor.h
@@ -12,12 +12,6 @@
#ifndef __ASM_H8300_PROCESSOR_H
#define __ASM_H8300_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#include <linux/compiler.h>
#include <asm/segment.h>
#include <asm/ptrace.h>
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 34e2df5c0d6d..b32bfa1fe99e 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -18,7 +18,6 @@
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 015287ac8ce8..6519252ac4db 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -30,7 +30,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
@@ -67,7 +67,7 @@ void __init paging_init(void)
* Initialize the bad page table and bad page to point
* to a couple of allocated pages.
*/
- empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/*
@@ -96,7 +96,7 @@ void __init mem_init(void)
max_mapnr = MAP_NR(high_memory);
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 7b25d7c8fa49..2b688af379e6 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -21,9 +21,7 @@ config HEXAGON
select GENERIC_IRQ_SHOW
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
- select HAVE_MEMBLOCK
select ARCH_DISCARD_MEMBLOCK
- select NO_BOOTMEM
select NEED_SG_DMA_LENGTH
select NO_IOPORT_MAP
select GENERIC_IOMAP
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index ce67940860a5..227bcb9cfdac 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -27,9 +27,6 @@
#include <asm/registers.h>
#include <asm/hexagon_vm.h>
-/* must be a macro */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
/* task_struct, defined elsewhere, is the "process descriptor" */
struct task_struct;
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 706699374444..38eaa7b703e7 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -19,7 +19,7 @@
*/
#include <linux/dma-noncoherent.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/genalloc.h>
#include <linux/module.h>
#include <asm/page.h>
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index dc8c7e75b5d1..b3c3e04d4e57 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -20,7 +20,7 @@
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index d789b9cc0189..1719ede9e9bd 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/atomic.h>
#include <linux/highmem.h>
@@ -68,7 +67,7 @@ unsigned long long kmap_generation;
void __init mem_init(void)
{
/* No idea where this is actually declared. Seems to evade LXR. */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
/*
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 8b4a0c1748c0..36773def6920 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,9 +26,7 @@ config IA64
select HAVE_FUNCTION_TRACER
select TTY
select HAVE_ARCH_TRACEHOOK
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select NO_BOOTMEM
select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_SG_CHAIN
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 10061ccf0440..c91ef98ed6bf 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -602,12 +602,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
*unat = (*unat & ~mask) | (nat << bit);
}
-/*
- * Get the current instruction/program counter value.
- */
-#define current_text_addr() \
- ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
-
static inline __u64
ia64_get_ivr (void)
{
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 39f4433a6f0e..bec762a9b418 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -12,7 +12,7 @@
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kexec.h>
#include <linux/elfcore.h>
#include <linux/sysctl.h>
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f77d80edddfe..8f106638913c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -23,7 +23,7 @@
* Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 6b51c88e3578..b49fe6f618ed 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -6,7 +6,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/compiler.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */
EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 550243a94b5d..fe6e4946672e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -90,7 +90,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/delay.h>
#include <asm/hw_irq.h>
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6115464d5f03..91bd1e129379 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -77,7 +77,7 @@
#include <linux/sched/task.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/module.h>
@@ -361,9 +361,9 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
#define IA64_LOG_ALLOCATE(it, size) \
{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size); \
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size);}
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@@ -1835,8 +1835,8 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
/* Caller prevents this from being called after init */
static void * __ref mca_bootmem(void)
{
- return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
+ return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
+ KERNEL_STACK_SIZE, 0);
}
/* Do per-CPU MCA-related initialization. */
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index dfe40cbdf3b3..45f956ad715a 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/module.h>
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 0e6c2d9fb498..583a3746d70b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/cpu.h>
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 9a960829a01d..99099f73b207 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -344,10 +344,10 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
get_signal(&ksig);
/*
- * get_signal_to_deliver() may have run a debugger (via notify_parent())
+ * get_signal() may have run a debugger (via notify_parent())
* and the debugger may have modified the state (e.g., to arrange for an
* inferior call), thus it's important to check for restarting _after_
- * get_signal_to_deliver().
+ * get_signal().
*/
if ((long) scr->pt.r10 != -1)
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 74fe317477e6..51ec944b036c 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/init.h>
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 9b820f7a6a98..e311ee13e61d 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -19,7 +19,7 @@
#include <linux/node.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/nodemask.h>
#include <linux/notifier.h>
#include <linux/export.h>
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index e04efa088902..7601fe0622d2 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -28,7 +28,7 @@
* acquired, then the read-write lock must be acquired first.
*/
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e2e40bbd391c..6e447234205c 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -14,7 +14,6 @@
* Routines used by ia64 machines with contiguous (or virtually contiguous)
* memory.
*/
-#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/mm.h>
@@ -85,8 +84,9 @@ skip:
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(),
+ PERCPU_PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS));
}
/**
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 1928d5719e41..8a965784340c 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/efi.h>
@@ -451,8 +450,10 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
if (bestnode == -1)
bestnode = anynode;
- ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ bestnode);
return ptr;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 3b85c3ecac38..d5e12ff1d73c 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -447,19 +446,19 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
pud = pud_offset(pgd, address);
if (pud_none(*pud))
- pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
pte = pte_offset_kernel(pmd, address);
if (pte_none(*pte))
- set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
+ set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT,
PAGE_KERNEL));
}
return 0;
@@ -627,7 +626,7 @@ mem_init (void)
set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
/*
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index aa19b7ac8222..3861d6e32d5f 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -15,7 +15,7 @@
#include <linux/mm.h>
#include <linux/node.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <asm/mmzone.h>
#include <asm/numa.h>
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index acf10eb9da15..9340bcb4f29c 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -21,7 +21,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <asm/delay.h>
@@ -59,8 +59,10 @@ struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
void __init
mmu_context_init (void)
{
- ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
- ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
+ ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
+ ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
}
/*
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 5d71800df431..196a0dd7ff97 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -20,7 +20,7 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <asm/machvec.h>
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 9146192b86f5..9900e6d4add6 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -16,7 +16,7 @@
#include <asm/nodedata.h>
#include <asm/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 102aabad6d20..8df13d0d96fa 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -6,7 +6,7 @@
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/sn/types.h>
@@ -385,16 +385,15 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
{
struct hubdev_info *hubdev_info;
int size;
- pg_data_t *pg;
size = sizeof(struct hubdev_info);
if (node >= num_online_nodes()) /* Headless/memless IO nodes */
- pg = NODE_DATA(0);
- else
- pg = NODE_DATA(node);
+ node = 0;
- hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
+ hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
+ SMP_CACHE_BYTES,
+ node);
npda->pdinfo = (void *)hubdev_info;
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 5f6b6b48c1d5..a6d40a2c5bff 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -20,7 +20,7 @@
#include <linux/mm.h>
#include <linux/serial.h>
#include <linux/irq.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/interrupt.h>
#include <linux/acpi.h>
@@ -511,7 +511,8 @@ static void __init sn_init_pdas(char **cmdline_p)
*/
for_each_online_node(cnode) {
nodepdaindr[cnode] =
- alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
+ memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
+ cnode);
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@@ -522,7 +523,7 @@ static void __init sn_init_pdas(char **cmdline_p)
*/
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
nodepdaindr[cnode] =
- alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
+ memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
/*
* Now copy the array of nodepda pointers to each nodepda.
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index c7b2a8d60a41..1bc9f1ba759a 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -27,9 +27,7 @@ config M68K
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select DMA_DIRECT_OPS if HAS_DMA
- select HAVE_MEMBLOCK
select ARCH_DISCARD_MEMBLOCK
- select NO_BOOTMEM
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index c83d66442612..6ffc204eb07d 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mount.h>
#include <linux/blkdev.h>
#include <linux/module.h>
@@ -95,7 +95,8 @@ void __init atari_stram_reserve_pages(void *start_mem)
{
if (kernel_in_stram) {
pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
- stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size);
+ stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
+ PAGE_SIZE);
stram_pool.end = stram_pool.start + pool_size - 1;
request_resource(&iomem_resource, &stram_pool);
stram_virt_offset = 0;
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index adad03ca6e11..360c723c0ae6 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/clk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 464e9f5f50ee..3750819ac5a1 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -8,12 +8,6 @@
#ifndef __ASM_M68K_PROCESSOR_H
#define __ASM_M68K_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#include <linux/thread_info.h>
#include <asm/segment.h>
#include <asm/fpu.h>
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 5d3596c180f9..a1a3eaeaf58c 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index cfd5475bfc31..3c5def10d486 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -27,7 +27,6 @@
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
index 107082877064..1b4c562753da 100644
--- a/arch/m68k/kernel/uboot.c
+++ b/arch/m68k/kernel/uboot.c
@@ -16,7 +16,7 @@
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 38e2b272c220..933c33e76a48 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -17,7 +17,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
@@ -93,7 +93,7 @@ void __init paging_init(void)
high_memory = (void *) end_mem;
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
@@ -140,7 +140,7 @@ static inline void init_pointer_tables(void)
void __init mem_init(void)
{
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
init_pointer_tables();
mem_init_print_info(NULL);
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index f5453d944ff5..0de4999a3810 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -13,7 +13,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/setup.h>
@@ -44,7 +43,7 @@ void __init paging_init(void)
enum zone_type zone;
int i;
- empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE);
pg_dir = swapper_pg_dir;
@@ -52,7 +51,7 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+ next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 4e17ecb5928a..7497cf30bf1c 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
@@ -55,7 +54,7 @@ static pte_t * __init kernel_page_table(void)
{
pte_t *ptablep;
- ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
clear_page(ptablep);
__flush_page_to_ram(ptablep);
@@ -95,7 +94,8 @@ static pmd_t * __init kernel_ptr_table(void)
last_pgtable += PTRS_PER_PMD;
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
- last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
clear_page(last_pgtable);
__flush_page_to_ram(last_pgtable);
@@ -275,7 +275,7 @@ void __init paging_init(void)
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index 4a9979908357..f736db48a2e1 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -16,7 +16,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
@@ -45,7 +45,7 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long size;
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
address = PAGE_OFFSET;
pg_dir = swapper_pg_dir;
@@ -55,7 +55,7 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long)alloc_bootmem_pages(size);
+ next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 79a2bb857906..542c4404861c 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -15,7 +15,7 @@
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/platform_device.h>
#include <asm/oplib.h>
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
index 5f92c72b05c3..a2c1c9304895 100644
--- a/arch/m68k/sun3/dvma.c
+++ b/arch/m68k/sun3/dvma.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/list.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index d30da12a1702..582a1284059a 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/sched/mm.h>
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 8546922adb47..4d64711d3d47 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -7,7 +7,7 @@
* Contains common routines for sun3/sun3x DVMA management.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -267,7 +267,8 @@ void __init dvma_init(void)
list_add(&(hole->list), &hole_list);
- iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
+ iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
+ SMP_CACHE_BYTES);
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c
index b2acbc862f60..89e630e66555 100644
--- a/arch/m68k/sun3x/dvma.c
+++ b/arch/m68k/sun3x/dvma.c
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <asm/sun3x.h>
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 164a4857737a..effed2efd306 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,8 +28,6 @@ config MICROBLAZE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
- select NO_BOOTMEM
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_OPROFILE
select IRQ_DOMAIN
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 330d556860ba..66b537b8d138 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -46,12 +46,6 @@ extern void ret_from_kernel_thread(void);
# define TASK_SIZE (0x81000000 - 0x80000000)
/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-# define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
-/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's. We won't be using it
*/
@@ -92,12 +86,6 @@ extern unsigned long get_wchan(struct task_struct *p);
# ifndef __ASSEMBLY__
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-# define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
/* If you change this, you must change the associated assembly-languages
* constants defined below, THREAD_*.
*/
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index d801cc5f5b95..45e0a1aa9357 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -28,7 +28,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index df6de7ccdc2e..b17fd8aafd64 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -7,10 +7,9 @@
* for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/memblock.h>
#include <linux/mm.h> /* mem_init */
#include <linux/initrd.h>
#include <linux/pagemap.h>
@@ -204,7 +203,7 @@ void __init mem_init(void)
high_memory = (void *)__va(memory_start + lowmem_size - 1);
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_HIGHMEM
highmem_setup();
#endif
@@ -377,7 +376,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
if (mem_init_done)
p = kzalloc(size, mask);
else {
- p = alloc_bootmem(size);
+ p = memblock_alloc(size, SMP_CACHE_BYTES);
if (p)
memset(p, 0, size);
}
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 2ffd171af8b6..6b89a66ec1a5 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -20,7 +20,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/shmem_fs.h>
#include <linux/list.h>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 80778b40f8fa..8272ea4c7264 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -60,7 +60,6 @@ config MIPS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KPROBES
select HAVE_KRETPROBES
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
@@ -78,7 +77,6 @@ config MIPS
select RTC_LIB
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
- select NO_BOOTMEM
menu "Machine selection"
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
index 0332f0514d05..80390a9ec264 100644
--- a/arch/mips/ar7/memory.c
+++ b/arch/mips/ar7/memory.c
@@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/pfn.h>
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 4c7a93f4039a..9728abcb18fa 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 7019e2967009..77a836e661c9 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -7,7 +7,7 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/smp.h>
#include <asm/bootinfo.h>
#include <asm/bmips.h>
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 2be9caaa2085..e28ee9a7cc7e 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/pm.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 6329c5f780d6..1738a06396f9 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/bitops.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 236833be6fbe..e8eb60ed99f2 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -11,7 +11,7 @@
* Copyright (C) 2010 Cavium Networks, Inc.
*/
#include <linux/dma-direct.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swiotlb.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -244,7 +244,7 @@ void __init plat_swiotlb_setup(void)
swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
- octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
+ octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index a2acc6454cf3..5073d2ed78bb 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/types.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/emma/common/prom.c b/arch/mips/emma/common/prom.c
index cae42259d6da..675337b8a4a0 100644
--- a/arch/mips/emma/common/prom.c
+++ b/arch/mips/emma/common/prom.c
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index dd9496f26e6a..429b7f8d2aeb 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swap.h>
#include <asm/sgialib.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index c373eb605040..ce3ed4d17813 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -24,11 +24,6 @@
#include <asm/prefetch.h>
/*
- * Return current * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-/*
* System setup and hardware flags..
*/
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 0a0aaf39fd16..4c41ed0a637e 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/dma-direct.h>
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 2c7288041a99..81845ba04835 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -3,7 +3,7 @@
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index a8657d29c62e..01b2bd95ba1f 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 89950b7bf536..93b8e0b4332f 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -12,7 +12,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 01a5ff4c41ff..ea09ed6a80a9 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -15,7 +15,6 @@
#include <linux/export.h>
#include <linux/screen_info.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
@@ -561,7 +560,7 @@ static void __init bootmem_init(void)
extern void show_kernel_relocation(const char *level);
offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
- free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
+ memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
/*
@@ -859,7 +858,7 @@ static void __init arch_mem_init(char **cmdline_p)
* Prevent memblock from allocating high memory.
* This cannot be done before max_low_pfn is detected, so up
* to this point is possible to only reserve physical memory
- * with memblock_reserve; memblock_virt_alloc* can be used
+ * with memblock_reserve; memblock_alloc* can be used
* only after this point
*/
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
@@ -917,7 +916,7 @@ static void __init resource_init(void)
if (end >= HIGHMEM_START)
end = HIGHMEM_START - 1;
- res = alloc_bootmem(sizeof(struct resource));
+ res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
res->start = start;
res->end = end;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5feef28deac8..0f852e1b5891 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -28,7 +28,6 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
@@ -2263,7 +2262,7 @@ void __init trap_init(void)
memblock_set_bottom_up(true);
ebase = (unsigned long)
- __alloc_bootmem(size, 1 << fls(size), 0);
+ memblock_alloc_from(size, 1 << fls(size), 0);
memblock_set_bottom_up(false);
/*
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 0bef238d2c0c..6176b9acba95 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -26,7 +26,7 @@
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index f43629979a0e..5812e6145801 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -14,7 +14,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index f8e772564d74..d77b61b3d6ee 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -16,7 +16,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include "commpage.h"
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 4144bfaef137..ec9ed23bca7f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -15,7 +15,7 @@
#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/random.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index aa0a1a00faf6..7257e8b6f5a9 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index f7ea8e21656b..1fcc4d149054 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -18,7 +18,7 @@
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/fpu.h>
#include <asm/page.h>
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index d984bd5c2ec5..14d4c5e2b42f 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -8,7 +8,7 @@
#include <linux/export.h>
#include <linux/clk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 37b8fc5b9ac9..5ce1407de2d5 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -8,7 +8,7 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <asm/bootinfo.h>
#include <asm/lasat/lasat.h>
diff --git a/arch/mips/loongson64/common/init.c b/arch/mips/loongson64/common/init.c
index 6ef17120722f..c073fbcb9805 100644
--- a/arch/mips/loongson64/common/init.c
+++ b/arch/mips/loongson64/common/init.c
@@ -8,7 +8,7 @@
* option) any later version.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/bootinfo.h>
#include <asm/traps.h>
#include <asm/smp-ops.h>
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index c1e6ec52c614..622761878cd1 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -18,7 +18,6 @@
#include <linux/nodemask.h>
#include <linux/swap.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/highmem.h>
#include <asm/page.h>
@@ -272,7 +271,7 @@ void __init paging_init(void)
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
mem_init_print_info(NULL);
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 15cae0f11880..b521d8e2d359 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -22,7 +22,7 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
@@ -243,7 +243,8 @@ void __init fixrange_init(unsigned long start, unsigned long end,
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -462,7 +463,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
maar_init();
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_free_highmem();
mem_init_print_info(NULL);
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index b19a3c506b1e..e2a33adc0f29 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -7,7 +7,7 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index a47556723b85..868921adef1d 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -12,7 +12,7 @@
* Steven J. Hill <sjhill@mips.com>
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index b5ba83f4c646..c856f2a3ea42 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -33,7 +33,7 @@
*/
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 3c3b1e6abb53..687513880fbf 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -11,7 +11,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index c2e94cf5ecda..e68b44b27c0d 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -11,7 +11,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 1ada8492733b..d544e7b07f7a 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -14,7 +14,7 @@
#include <linux/sizes.h>
#include <linux/of_fdt.h>
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index 6484e4a4597b..361a690facbf 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -29,7 +29,7 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/blkdev.h>
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 6f7bef052b7f..d8b8444d6795 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -18,7 +18,6 @@
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/swap.h>
-#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/highmem.h>
#include <asm/page.h>
@@ -475,7 +474,7 @@ void __init paging_init(void)
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
mem_init_print_info(NULL);
}
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 092fb2a6ec4a..12a780f251e1 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -21,7 +21,7 @@
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pm.h>
#include <linux/smp.h>
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 152ca71cc2d7..3b034b7178d6 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/mips/txx9/rbtx4938/prom.c b/arch/mips/txx9/rbtx4938/prom.c
index bcb469247e8c..2b36a2ee744c 100644
--- a/arch/mips/txx9/rbtx4938/prom.c
+++ b/arch/mips/txx9/rbtx4938/prom.c
@@ -11,7 +11,7 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/bootinfo.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/rbtx4938.h>
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 56992330026a..7a04adacb2f0 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -29,14 +29,12 @@ config NDS32
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_KMEMLEAK
- select HAVE_MEMBLOCK
select HAVE_REGS_AND_STACK_ACCESS_API
select IRQ_DOMAIN
select LOCKDEP_SUPPORT
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
- select NO_BOOTMEM
select NO_IOPORT_MAP
select RTC_LIB
select THREAD_INFO_IN_TASK
diff --git a/arch/nds32/include/asm/processor.h b/arch/nds32/include/asm/processor.h
index 9c83caf4269f..c2660f566bac 100644
--- a/arch/nds32/include/asm/processor.h
+++ b/arch/nds32/include/asm/processor.h
@@ -4,12 +4,6 @@
#ifndef __ASM_NDS32_PROCESSOR_H
#define __ASM_NDS32_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <asm/ptrace.h>
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index 63a1a5ef5219..eacc79024879 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -2,9 +2,8 @@
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/cpu.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
#include <linux/memblock.h>
+#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c
index e17cb8a69315..022779af6148 100644
--- a/arch/nds32/mm/highmem.c
+++ b/arch/nds32/mm/highmem.c
@@ -6,7 +6,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index c713d2ad55dc..131104bd2538 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -7,12 +7,11 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
-#include <linux/memblock.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -81,7 +80,7 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(pte, 0, PAGE_SIZE);
set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
@@ -114,7 +113,7 @@ static void __init fixedrange_init(void)
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
- fixmap_pmd_p = (pmd_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ fixmap_pmd_p = (pmd_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(fixmap_pmd_p, 0, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
@@ -127,7 +126,7 @@ static void __init fixedrange_init(void)
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(pte, 0, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
pkmap_page_table = pte;
@@ -153,7 +152,7 @@ void __init paging_init(void)
fixedrange_init();
/* allocate space for empty_zero_page */
- zero_page = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ zero_page = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(zero_page, 0, PAGE_SIZE);
zone_sizes_init();
@@ -192,7 +191,7 @@ void __init mem_init(void)
free_highmem();
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 2df0c57f2833..7e95506e957a 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -23,9 +23,7 @@ config NIOS2
select SPARSE_IRQ
select USB_ARCH_HAS_HCD if USB_SUPPORT
select CPU_NO_EFFICIENT_FFS
- select HAVE_MEMBLOCK
select ARCH_DISCARD_MEMBLOCK
- select NO_BOOTMEM
config GENERIC_CSUM
def_bool y
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
index 4944e2e1d8b0..94bcb86f679f 100644
--- a/arch/nios2/include/asm/processor.h
+++ b/arch/nios2/include/asm/processor.h
@@ -38,12 +38,6 @@
#define KUSER_SIZE (PAGE_SIZE)
#ifndef __ASSEMBLY__
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
# define TASK_SIZE 0x7FFF0000UL
# define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index a6d4f7530247..232a36b511aa 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/io.h>
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 2d0011ddd4d5..6bbd4ae2beb0 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index c92fe4234009..16cea5776b87 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -23,7 +23,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/binfmts.h>
@@ -73,7 +73,7 @@ void __init mem_init(void)
high_memory = __va(end_mem);
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index a655ae280637..285f7d05c8ed 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -12,7 +12,6 @@ config OPENRISC
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
- select HAVE_MEMBLOCK
select GPIOLIB
select HAVE_ARCH_TRACEHOOK
select SPARSE_IRQ
@@ -32,7 +31,6 @@ config OPENRISC
select HAVE_DEBUG_STACKOVERFLOW
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
- select NO_BOOTMEM
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_RWLOCKS
select OMPIC if SMP
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index af31a9fe736a..351d3aed7a06 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -30,11 +30,6 @@
| SPR_SR_DCE | SPR_SR_SM)
#define USER_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
| SPR_SR_DCE | SPR_SR_IEE | SPR_SR_TEE)
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
/*
* User space process size. This is hardcoded into a few places,
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index e17fcd83120f..c605bdad1746 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -30,13 +30,12 @@
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
#include <linux/of.h>
-#include <linux/memblock.h>
#include <linux/device.h>
#include <asm/sections.h>
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 6972d5d6f23f..d157310eb377 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -26,12 +26,11 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/blkdev.h> /* for initrd_* */
#include <linux/pagemap.h>
-#include <linux/memblock.h>
#include <asm/segment.h>
#include <asm/pgalloc.h>
@@ -106,7 +105,7 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
/* Fill the newly allocated page with PTE'S */
@@ -213,7 +212,7 @@ void __init mem_init(void)
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 2175e4bfd9fc..c9697529b3f0 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -126,7 +126,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
if (likely(mem_init_done)) {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
} else {
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
}
if (pte)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index f1cd12afd943..92a339ee28b3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -15,8 +15,6 @@ config PARISC
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select BUG
select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 2bd5e695bdad..6e2a8176b0dd 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -20,17 +20,6 @@
#include <asm/percpu.h>
#endif /* __ASSEMBLY__ */
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#ifdef CONFIG_PA20
-#define current_ia(x) __asm__("mfia %0" : "=r"(x))
-#else /* mfia added in pa2.0 */
-#define current_ia(x) __asm__("blr 0,%0\n\tnop" : "=r"(x))
-#endif
-#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
-
#define HAVE_ARCH_PICK_MMAP_LAYOUT
#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index f88a52b8531c..2d7cffcaa476 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/delay.h>
@@ -621,7 +620,7 @@ void __init mem_init(void)
high_memory = __va((max_pfn << PAGE_SHIFT));
set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_PA11
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e84943d24e5c..2d51b2bd4aa1 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -206,7 +206,6 @@ config PPC
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
@@ -231,7 +230,6 @@ config PPC
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
select NEED_SG_DMA_LENGTH
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7d04d60a39c9..ee58526cb6c2 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -67,12 +67,6 @@ extern int _chrp_type;
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
/* Macros for adjusting thread priority (hardware multi-threading) */
#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
#define HMT_low() asm volatile("or 1,1,1 # low priority")
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index f432054234a4..8be3721d9302 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -1008,9 +1008,7 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
/* Count and allocate space for cpu features */
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
&nr_dt_cpu_features);
- dt_cpu_features = __va(
- memblock_alloc(sizeof(struct dt_cpu_feature)*
- nr_dt_cpu_features, PAGE_SIZE));
+ dt_cpu_features = __va(memblock_phys_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE));
cpufeatures_setup_start(isa);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0ee3e6d50f28..913bfca09c4f 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -198,7 +198,7 @@ void __init allocate_paca_ptrs(void)
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
- paca_ptrs = __va(memblock_alloc(paca_ptrs_size, 0));
+ paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES));
memset(paca_ptrs, 0x88, paca_ptrs_size);
}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 4da8ed576229..d3f04f2d8249 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -10,7 +10,7 @@
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/list.h>
@@ -203,7 +203,8 @@ pci_create_OF_bus_map(void)
struct property* of_prop;
struct device_node *dn;
- of_prop = memblock_virt_alloc(sizeof(struct property) + 256, 0);
+ of_prop = memblock_alloc(sizeof(struct property) + 256,
+ SMP_CACHE_BYTES);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c4d7078e5295..fe758cedb93f 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -126,7 +126,7 @@ static void __init move_device_tree(void)
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
overlaps_crashkernel(start, size) ||
overlaps_initrd(start, size)) {
- p = __va(memblock_alloc(size, PAGE_SIZE));
+ p = __va(memblock_phys_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
initial_boot_params = p;
DBG("Moved device tree to 0x%p\n", p);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9ca9db707bcb..93ee3703b42f 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,7 +33,6 @@
#include <linux/serial_8250.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
#include <asm/debugfs.h>
@@ -460,8 +459,7 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- cpu_to_phys_id = __va(memblock_alloc(nr_cpu_ids * sizeof(u32),
- __alignof__(u32)));
+ cpu_to_phys_id = __va(memblock_phys_alloc(nr_cpu_ids * sizeof(u32), __alignof__(u32)));
memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32));
for_each_node_by_type(dn, "cpu") {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 8c507be12c3c..81909600013a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -206,9 +206,9 @@ void __init irqstack_early_init(void)
* as the memblock is limited to lowmem by default */
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
hardirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
}
}
@@ -227,12 +227,12 @@ void __init exc_lvl_early_init(void)
#endif
critirq_ctx[hw_cpu] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
dbgirq_ctx[hw_cpu] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
mcheckirq_ctx[hw_cpu] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
#endif
}
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index faf00222b324..2a51e4cc8246 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -29,10 +29,9 @@
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/lockdep.h>
-#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/nmi.h>
@@ -763,13 +762,15 @@ void __init emergency_stack_init(void)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
- return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
- __pa(MAX_DMA_ADDRESS));
+ return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ early_cpu_to_node(cpu));
+
}
static void __init pcpu_fc_free(void *ptr, size_t size)
{
- free_bootmem(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index 06796dec01ea..dedf88a76f58 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -2,7 +2,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/string.h>
#include <asm/setup.h>
@@ -14,7 +14,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
if (slab_is_available())
p = kzalloc(size, mask);
else {
- p = memblock_virt_alloc(size, 0);
+ p = memblock_alloc(size, SMP_CACHE_BYTES);
}
return p;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a7226ed9cae6..8cf035e68378 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -15,7 +15,6 @@
#include <linux/export.h>
#include <linux/of_fdt.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/moduleparam.h>
#include <linux/swap.h>
#include <linux/swapops.h>
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index dd949d6649a2..0a64fffabee1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -27,12 +27,11 @@
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <linux/suspend.h>
-#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -349,7 +348,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
set_max_mapnr(max_pfn);
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_HIGHMEM
{
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 4d80239ef83c..2faca46ad720 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -44,7 +44,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/slab.h>
@@ -461,10 +461,11 @@ void __init mmu_context_init(void)
/*
* Allocate the maps used by context management
*/
- context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0);
- context_mm = memblock_virt_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 0);
+ context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
+ SMP_CACHE_BYTES);
#ifdef CONFIG_SMP
- stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
+ stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
"powerpc/mmu/ctx:prepare",
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 693ae1c1acba..3a048e98a132 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -11,7 +11,7 @@
#define pr_fmt(fmt) "numa: " fmt
#include <linux/threads.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
@@ -19,7 +19,6 @@
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
-#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/pfn.h>
#include <linux/cpuset.h>
@@ -788,7 +787,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
/* report and initialize */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5877f5aa8f5d..bda3c6f1bd32 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -50,7 +50,7 @@ __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
if (slab_is_available()) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else {
- pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
clear_page(pte);
}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 38a793bfca37..f6f575bae3bc 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -224,7 +224,7 @@ void __init MMU_init_hw(void)
* Find some memory for the hash table.
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
- Hash = __va(memblock_alloc(Hash_size, Hash_size));
+ Hash = __va(memblock_phys_alloc(Hash_size, Hash_size));
memset(Hash, 0, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index f06c83f321e6..f2971522fb4a 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -213,7 +213,7 @@ static int __init iob_init(struct device_node *dn)
pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
/* Allocate a spare page to map all invalid IOTLB pages. */
- tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
+ tmp = memblock_phys_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
if (!tmp)
panic("IOBMAP: Cannot allocate spare page!");
/* Empty l1 is marked invalid */
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 60b03a1703d1..ae54d7fe68f3 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -18,7 +18,7 @@
#include <linux/errno.h>
#include <linux/adb.h>
#include <linux/pmu.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <asm/sections.h>
@@ -513,7 +513,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
printk(KERN_ERR "nvram: no address\n");
return -EINVAL;
}
- nvram_image = memblock_virt_alloc(NVRAM_SIZE, 0);
+ nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES);
nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index a29fdf8a2e56..84d038ed3882 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -70,6 +70,7 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
return 0;
}
+/* called with device_hotplug_lock held */
static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
{
u64 end_pfn = start_pfn + nr_pages - 1;
@@ -110,6 +111,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
/* Trace memory needs to be aligned to the size */
end_pfn = round_down(end_pfn - nr_pages, nr_pages);
+ lock_device_hotplug();
for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
/*
@@ -118,15 +120,15 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
* we never try to remove memory that spans two iomem
* resources.
*/
- lock_device_hotplug();
end_pfn = base_pfn + nr_pages;
for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
- remove_memory(nid, pfn << PAGE_SHIFT, bytes);
+ __remove_memory(nid, pfn << PAGE_SHIFT, bytes);
}
unlock_device_hotplug();
return base_pfn << PAGE_SHIFT;
}
}
+ unlock_device_hotplug();
return 0;
}
@@ -242,9 +244,11 @@ static int memtrace_online(void)
* we need to online the memory ourselves.
*/
if (!memhp_auto_online) {
+ lock_device_hotplug();
walk_memory_range(PFN_DOWN(ent->start),
PFN_UP(ent->start + ent->size - 1),
NULL, online_mem_block);
+ unlock_device_hotplug();
}
/*
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index a4641515956f..beed86f4224b 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -171,7 +171,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
/*
* Allocate a buffer to hold the MC recoverable ranges.
*/
- mc_recoverable_range =__va(memblock_alloc(size, __alignof__(u64)));
+ mc_recoverable_range =__va(memblock_phys_alloc(size, __alignof__(u64)));
memset(mc_recoverable_range, 0, size);
for (i = 0; i < mc_recoverable_range_len; i++) {
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index cde710297a4e..dd807446801e 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -17,11 +17,10 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
-#include <linux/memblock.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
#include <linux/sizes.h>
@@ -3770,7 +3769,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb_id = be64_to_cpup(prop64);
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
- phb = memblock_virt_alloc(sizeof(*phb), 0);
+ phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
/* Allocate PCI controller */
phb->hose = hose = pcibios_alloc_controller(np);
@@ -3816,7 +3815,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
else
phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
- phb->diag_data = memblock_virt_alloc(phb->diag_data_size, 0);
+ phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
/* Parse 32-bit and IO ranges (if any) */
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
@@ -3875,7 +3874,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
}
pemap_off = size;
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
- aux = memblock_virt_alloc(size, 0);
+ aux = memblock_alloc(size, SMP_CACHE_BYTES);
phb->ioda.pe_alloc = aux;
phb->ioda.m64_segmap = aux + m64map_off;
phb->ioda.m32_segmap = aux + m32map_off;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 77a37520068d..658bfab3350b 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -24,7 +24,7 @@
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@@ -126,7 +126,7 @@ static void __init prealloc(struct ps3_prealloc *p)
if (!p->size)
return;
- p->address = memblock_virt_alloc(p->size, p->align);
+ p->address = memblock_alloc(p->size, p->align);
printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p->address);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2b796da822c2..2a983b5a52e1 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -300,7 +300,7 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
nid = memory_add_physaddr_to_nid(base);
for (i = 0; i < sections_per_block; i++) {
- remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
+ __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
base += MIN_MEMORY_BLOCK_SIZE;
}
@@ -389,7 +389,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
block_sz = pseries_memory_block_size();
nid = memory_add_physaddr_to_nid(lmb->base_addr);
- remove_memory(nid, lmb->base_addr, block_sz);
+ __remove_memory(nid, lmb->base_addr, block_sz);
/* Update memory regions for memory remove */
memblock_remove(lmb->base_addr, block_sz);
@@ -668,7 +668,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
nid = memory_add_physaddr_to_nid(lmb->base_addr);
/* Add the memory */
- rc = add_memory(nid, lmb->base_addr, block_sz);
+ rc = __add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
invalidate_lmb_associativity_index(lmb);
return rc;
@@ -676,7 +676,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
rc = dlpar_online_lmb(lmb);
if (rc) {
- remove_memory(nid, lmb->base_addr, block_sz);
+ __remove_memory(nid, lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 5ca3e22d0512..a5b40d1460f1 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -261,7 +261,7 @@ static void allocate_dart(void)
* that to work around what looks like a problem with the HT bridge
* prefetching into invalid pages and corrupting data
*/
- tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
+ tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
DARTMAP_RPNMASK);
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index e64a411d1a00..d45450f6666a 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/msi_bitmap.h>
#include <asm/setup.h>
@@ -128,7 +128,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
if (bmp->bitmap_from_slab)
bmp->bitmap = kzalloc(size, GFP_KERNEL);
else {
- bmp->bitmap = memblock_virt_alloc(size, 0);
+ bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
/* the bitmap won't be freed from memblock allocator */
kmemleak_not_leak(bmp->bitmap);
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index fe451348ae57..d86842c21710 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -28,14 +28,12 @@ config RISCV
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
- select NO_BOOTMEM
select RISCV_ISA_A if SMP
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 50de774d827a..0531f49af5c3 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -33,12 +33,6 @@
struct task_struct;
struct pt_regs;
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
/* CPU-specific state of a task */
struct thread_struct {
/* Callee-saved registers */
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 58a522f9bcc3..1d9bfaff60bc 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -13,9 +13,8 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/initrd.h>
#include <linux/memblock.h>
+#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/sizes.h>
@@ -55,7 +54,7 @@ void __init mem_init(void)
#endif /* CONFIG_FLATMEM */
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8b25e1f45b27..5173366af8f3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -163,7 +163,6 @@ config S390
select HAVE_LIVEPATCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
select HAVE_MOD_ARCH_SPECIFIC
@@ -175,7 +174,6 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select SPARSE_IRQ
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 34768e6ef4fb..302795c47c06 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -73,12 +73,6 @@ static inline int test_cpu_flag_of(int flag, int cpu)
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
-
static inline void get_cpu_id(struct cpuid *ptr)
{
asm volatile("stidp %0" : "=Q" (*ptr));
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 376f6b6dfb3c..97eae3871868 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -13,10 +13,9 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/elf.h>
#include <asm/asm-offsets.h>
-#include <linux/memblock.h>
#include <asm/os_info.h>
#include <asm/elf.h>
#include <asm/ipl.h>
@@ -61,7 +60,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
{
struct save_area *sa;
- sa = (void *) memblock_alloc(sizeof(*sa), 8);
+ sa = (void *) memblock_phys_alloc(sizeof(*sa), 8);
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
else
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a2e952b66248..72dd23ef771b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -34,7 +34,6 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
@@ -378,7 +377,7 @@ static void __init setup_lowcore(void)
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
- lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
+ lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS |
@@ -422,7 +421,7 @@ static void __init setup_lowcore(void)
* Allocate the global restart stack which is the same for
* all CPUs in cast *one* of them does a PSW restart.
*/
- restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
+ restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
@@ -488,7 +487,7 @@ static void __init setup_resources(void)
bss_resource.end = (unsigned long) __bss_stop - 1;
for_each_memblock(memory, reg) {
- res = memblock_virt_alloc(sizeof(*res), 8);
+ res = memblock_alloc(sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -502,7 +501,7 @@ static void __init setup_resources(void)
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
- sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
+ sub_res = memblock_alloc(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -967,7 +966,8 @@ static void __init setup_randomness(void)
{
struct sysinfo_3_2_2 *vmms;
- vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
+ PAGE_SIZE);
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1b3188f57b58..f82b3d3c36e2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -20,7 +20,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -35,7 +35,6 @@
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/crash_dump.h>
-#include <linux/memblock.h>
#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
@@ -761,7 +760,7 @@ void __init smp_detect_cpus(void)
u16 address;
/* Get CPU information */
- info = memblock_virt_alloc(sizeof(*info), 8);
+ info = memblock_alloc(sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e8184a15578a..8992b04c0ade 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -8,7 +8,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/uaccess.h>
#include <linux/sysctl.h>
#include <linux/cpuset.h>
@@ -519,7 +519,7 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
- mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
+ mask->next = memblock_alloc(sizeof(*mask->next), 8);
mask = mask->next;
}
}
@@ -537,7 +537,7 @@ void __init topology_init_early(void)
}
if (!MACHINE_HAS_TOPOLOGY)
goto out;
- tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
+ tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ec31b48a42a5..ebe748a9f472 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -18,7 +18,7 @@
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 84111a43ea29..eba2def3414d 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -16,7 +16,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <asm/diag.h>
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 92d7a153e72a..76d0708438e9 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -21,7 +21,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/pfn.h>
#include <linux/poison.h>
@@ -29,7 +29,6 @@
#include <linux/export.h>
#include <linux/cma.h>
#include <linux/gfp.h>
-#include <linux/memblock.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
@@ -139,7 +138,7 @@ void __init mem_init(void)
cmma_init();
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
cmma_init_nodat();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index db55561c5981..0472e27febdf 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -4,14 +4,13 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
-#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -36,7 +35,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
- return (void *) memblock_alloc(size, size);
+ return (void *) memblock_phys_alloc(size, size);
}
void *vmem_crst_alloc(unsigned long val)
@@ -57,7 +56,7 @@ pte_t __ref *vmem_pte_alloc(void)
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
- pte = (pte_t *) memblock_alloc(size, size);
+ pte = (pte_t *) memblock_phys_alloc(size, size);
if (!pte)
return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 83b222c57609..bfba273c32c0 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/node.h>
#include <linux/memory.h>
#include <linux/slab.h>
@@ -313,7 +312,7 @@ static void __ref create_core_to_node_map(void)
{
int i;
- emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
+ emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 5bd374491f94..ae0d9e889534 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/cpumask.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/node.h>
@@ -64,7 +63,7 @@ static __init pg_data_t *alloc_node_data(void)
{
pg_data_t *res;
- res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
+ res = (pg_data_t *) memblock_phys_alloc(sizeof(pg_data_t), 8);
memset(res, 0, sizeof(pg_data_t));
return res;
}
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 21d1e8a1546d..71a608cd4f61 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -8,7 +8,7 @@
*/
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/list_sort.h>
@@ -34,7 +34,7 @@ struct toptree __ref *toptree_alloc(int level, int id)
if (slab_is_available())
res = kzalloc(sizeof(*res), GFP_KERNEL);
else
- res = memblock_virt_alloc(sizeof(*res), 8);
+ res = memblock_alloc(sizeof(*res), 8);
if (!res)
return res;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 475d786a65b0..f82a4da7adf3 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -9,9 +9,7 @@ config SUPERH
select CLKDEV_LOOKUP
select DMA_DIRECT_OPS
select HAVE_IDE if HAS_IOPORT_MAP
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select NO_BOOTMEM
select ARCH_DISCARD_MEMBLOCK
select HAVE_OPROFILE
select HAVE_GENERIC_DMA_COHERENT
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 95100d8a0b7b..0e0ecc0132e3 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -16,12 +16,6 @@
#include <asm/types.h>
#include <asm/hw_breakpoint.h>
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
-
/* Core Processor Version Register */
#define CCN_PVR 0xff000030
#define CCN_CVR 0xff000040
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 777a16318aff..f3d7075648d0 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -19,21 +19,6 @@
#include <asm/types.h>
#include <cpu/registers.h>
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ \
-void *pc; \
-unsigned long long __dummy = 0; \
-__asm__("gettr tr0, %1\n\t" \
- "pta 4, tr0\n\t" \
- "gettr tr0, %0\n\t" \
- "ptabs %1, tr0\n\t" \
- :"=r" (pc), "=r" (__dummy) \
- : "1" (__dummy)); \
-pc; })
-
#endif
/*
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 7713c084d040..c8c13c777162 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -11,12 +11,11 @@
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/gfp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/pagemap.h>
#include <linux/percpu.h>
#include <linux/io.h>
-#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <asm/mmu_context.h>
@@ -128,7 +127,7 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
if (pud_none(*pud)) {
pmd_t *pmd;
- pmd = alloc_bootmem_pages(PAGE_SIZE);
+ pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
BUG_ON(pmd != pmd_offset(pud, 0));
}
@@ -141,7 +140,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
if (pmd_none(*pmd)) {
pte_t *pte;
- pte = alloc_bootmem_pages(PAGE_SIZE);
+ pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -350,7 +349,7 @@ void __init mem_init(void)
high_memory = max_t(void *, high_memory,
__va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
- free_all_bootmem();
+ memblock_free_all();
/* Set this up early, so we can take care of the zero page */
cpu_cache_init();
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index 927a1294c465..07e744d75fa0 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/io.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <asm/fixmap.h>
#include <asm/page.h>
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 7e2aa59fcc29..490b2c95c212 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -45,8 +45,6 @@ config SPARC
select LOCKDEP_SMALL if LOCKDEP
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 192493c257fa..3c4bc2189092 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -7,12 +7,6 @@
#ifndef __ASM_SPARC_PROCESSOR_H
#define __ASM_SPARC_PROCESSOR_H
-/*
- * Sparc32 implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("sethi %%hi(1f), %0; or %0, %%lo(1f), %0;\n1:" : "=r" (pc)); pc; })
-
#include <asm/psr.h>
#include <asm/ptrace.h>
#include <asm/head.h>
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index aac23d4a4ddd..5cf145f18f36 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -8,12 +8,6 @@
#ifndef __ASM_SPARC64_PROCESSOR_H
#define __ASM_SPARC64_PROCESSOR_H
-/*
- * Sparc64 implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; })
-
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 39a2503fa3e1..9a26b442f820 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -5,13 +5,12 @@
*/
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/memblock.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/refcount.h>
@@ -170,7 +169,7 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
mdesc_size);
alloc_size = PAGE_ALIGN(handle_size);
- paddr = memblock_alloc(alloc_size, PAGE_SIZE);
+ paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE);
hp = NULL;
if (paddr) {
@@ -190,7 +189,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
alloc_size = PAGE_ALIGN(hp->handle_size);
start = __pa(hp);
- free_bootmem_late(start, alloc_size);
+ memblock_free_late(start, alloc_size);
}
static struct mdesc_mem_ops memblock_mdesc_ops = {
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index b51cbb9e87dc..d41e2a749c5d 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -19,7 +19,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/prom.h>
#include <asm/oplib.h>
@@ -32,7 +32,7 @@ void * __init prom_early_alloc(unsigned long size)
{
void *ret;
- ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+ ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
if (ret != NULL)
memset(ret, 0, size);
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index baeaeed64993..c37955d127fe 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -34,7 +34,7 @@
void * __init prom_early_alloc(unsigned long size)
{
- unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
+ unsigned long paddr = memblock_phys_alloc(size, SMP_CACHE_BYTES);
void *ret;
if (!paddr) {
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 7944b3ca216a..cd2825cb8420 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -32,7 +32,7 @@
#include <linux/initrd.h>
#include <linux/module.h>
#include <linux/start_kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -621,12 +621,10 @@ void __init alloc_irqstack_bootmem(void)
for_each_possible_cpu(i) {
node = cpu_to_node(i);
- softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
- THREAD_SIZE,
- THREAD_SIZE, 0);
- hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
- THREAD_SIZE,
- THREAD_SIZE, 0);
+ softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
+ THREAD_SIZE, node);
+ hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
+ THREAD_SIZE, node);
}
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3ea1f3c06a0..4792e08ad36b 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -22,7 +22,7 @@
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
@@ -1588,26 +1588,26 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
void *ptr;
if (!node_online(node) || !NODE_DATA(node)) {
- ptr = __alloc_bootmem(size, align, goal);
+ ptr = memblock_alloc_from(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr));
} else {
- ptr = __alloc_bootmem_node(NODE_DATA(node),
- size, align, goal);
+ ptr = memblock_alloc_try_nid(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
"%016lx\n", cpu, size, node, __pa(ptr));
}
return ptr;
#else
- return __alloc_bootmem(size, align, goal);
+ return memblock_alloc_from(size, align, goal);
#endif
}
static void __init pcpu_free_bootmem(void *ptr, size_t size)
{
- free_bootmem(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
@@ -1627,7 +1627,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
if (pgd_none(*pgd)) {
pud_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pgd_populate(&init_mm, pgd, new);
}
@@ -1635,7 +1635,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
if (pud_none(*pud)) {
pmd_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, new);
}
@@ -1643,7 +1643,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
if (!pmd_present(*pmd)) {
pte_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, new);
}
}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 92634d4e440c..d900952bfc5f 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -22,7 +22,6 @@
#include <linux/initrd.h>
#include <linux/init.h>
#include <linux/highmem.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/poison.h>
@@ -265,7 +264,7 @@ void __init mem_init(void)
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
sparc_valid_addr_bitmap = (unsigned long *)
- __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
+ memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
if (sparc_valid_addr_bitmap == NULL) {
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
@@ -277,7 +276,7 @@ void __init mem_init(void)
max_mapnr = last_valid_pfn - pfn_base;
high_memory = __va(max_low_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 39822f611c01..3c8aac21f426 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -11,7 +11,7 @@
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/initrd.h>
@@ -25,7 +25,6 @@
#include <linux/sort.h>
#include <linux/ioport.h>
#include <linux/percpu.h>
-#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/gfp.h>
@@ -1092,7 +1091,8 @@ static void __init allocate_node_data(int nid)
#ifdef CONFIG_NEED_MULTIPLE_NODES
unsigned long paddr;
- paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
+ paddr = memblock_phys_alloc_try_nid(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, nid);
if (!paddr) {
prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
prom_halt();
@@ -1266,8 +1266,8 @@ static int __init grab_mlgroups(struct mdesc_handle *md)
if (!count)
return -ENOENT;
- paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
- SMP_CACHE_BYTES);
+ paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
+ SMP_CACHE_BYTES);
if (!paddr)
return -ENOMEM;
@@ -1307,8 +1307,8 @@ static int __init grab_mblocks(struct mdesc_handle *md)
if (!count)
return -ENOENT;
- paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
- SMP_CACHE_BYTES);
+ paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
+ SMP_CACHE_BYTES);
if (!paddr)
return -ENOMEM;
@@ -1810,7 +1810,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
if (pgd_none(*pgd)) {
pud_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
@@ -1822,7 +1823,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
vstart = kernel_map_hugepud(vstart, vend, pud);
continue;
}
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pud_populate(&init_mm, pud, new);
}
@@ -1835,7 +1837,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
vstart = kernel_map_hugepmd(vstart, vend, pmd);
continue;
}
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pmd_populate_kernel(&init_mm, pmd, new);
}
@@ -2541,12 +2544,12 @@ void __init mem_init(void)
{
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
/*
* Must be done after boot memory is put on freelist, because here we
* might set fields in deferred struct pages that have not yet been
- * initialized, and free_all_bootmem() initializes all the reserved
+ * initialized, and memblock_free_all() initializes all the reserved
* deferred pages for us.
*/
register_page_bootmem_info();
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index be9cb0065179..a6142c5abf61 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -11,7 +11,7 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/kdebug.h>
@@ -303,13 +303,13 @@ static void __init srmmu_nocache_init(void)
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
- srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
- SRMMU_NOCACHE_ALIGN_MAX, 0UL);
+ srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
+ SRMMU_NOCACHE_ALIGN_MAX, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap =
- __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
- SMP_CACHE_BYTES, 0UL);
+ memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -467,7 +467,7 @@ static void __init sparc_context_init(int numctx)
unsigned long size;
size = numctx * sizeof(struct ctx_list);
- ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+ ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 10c15b8853ae..6b9938919f0b 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -12,8 +12,6 @@ config UML
select HAVE_UID16
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 3ef1b48e064a..624cb47cc9cd 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -6,7 +6,7 @@
* Licensed under the GPL.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/inetdevice.h>
@@ -650,7 +650,7 @@ static int __init eth_setup(char *str)
return 1;
}
- new = alloc_bootmem(sizeof(*new));
+ new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
INIT_LIST_HEAD(&new->list);
new->index = n;
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 50ee3bb5a63a..10d8d20eb9ec 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -9,7 +9,7 @@
*/
#include <linux/version.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/inetdevice.h>
@@ -1580,7 +1580,7 @@ static int __init vector_setup(char *str)
str, error);
return 1;
}
- new = alloc_bootmem(sizeof(*new));
+ new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
INIT_LIST_HEAD(&new->list);
new->unit = n;
new->arguments = str;
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 6f6e7896e53f..ce169ea87e61 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -4,7 +4,7 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/initrd.h>
#include <asm/types.h>
#include <init.h>
@@ -36,7 +36,7 @@ int __init read_initrd(void)
return 0;
}
- area = alloc_bootmem(size);
+ area = memblock_alloc(size, SMP_CACHE_BYTES);
if (load_initrd(initrd, area, size) == -1)
return 0;
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 3c0e470ea646..1067469ba2ea 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -5,7 +5,7 @@
#include <linux/stddef.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/swap.h>
@@ -46,11 +46,11 @@ void __init mem_init(void)
*/
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
- free_bootmem(__pa(brk_end), uml_reserved - brk_end);
+ memblock_free(__pa(brk_end), uml_reserved - brk_end);
uml_reserved = brk_end;
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
max_low_pfn = totalram_pages;
max_pfn = totalram_pages;
mem_init_print_info(NULL);
@@ -64,7 +64,8 @@ void __init mem_init(void)
static void __init one_page_table_init(pmd_t *pmd)
{
if (pmd_none(*pmd)) {
- pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
@@ -75,7 +76,7 @@ static void __init one_page_table_init(pmd_t *pmd)
static void __init one_md_table_init(pud_t *pud)
{
#ifdef CONFIG_3_LEVEL_PGTABLES
- pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
if (pmd_table != pmd_offset(pud, 0))
BUG();
@@ -124,7 +125,7 @@ static void __init fixaddr_user_init( void)
return;
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
- v = (unsigned long) alloc_bootmem_low_pages(size);
+ v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
@@ -143,7 +144,8 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES], vaddr;
int i;
- empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
+ empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0;
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 296a91a04598..5bf56af4d5b9 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -4,7 +4,6 @@
*/
#include <linux/module.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h>
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 0c5111b206bd..a4c05159dca5 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -5,8 +5,6 @@ config UNICORE32
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select DMA_DIRECT_OPS
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
index 4eaa42167667..b772ed1c0f25 100644
--- a/arch/unicore32/include/asm/processor.h
+++ b/arch/unicore32/include/asm/processor.h
@@ -13,12 +13,6 @@
#ifndef __UNICORE_PROCESSOR_H__
#define __UNICORE_PROCESSOR_H__
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#ifdef __KERNEL__
#include <asm/ptrace.h>
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
index 9969ec374abb..29b71c68eb7c 100644
--- a/arch/unicore32/kernel/hibernate.c
+++ b/arch/unicore32/kernel/hibernate.c
@@ -13,7 +13,7 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index c2bffa5614a4..4b0cb68c355a 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -17,7 +17,7 @@
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/init.h>
@@ -27,7 +27,6 @@
#include <linux/smp.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
-#include <linux/memblock.h>
#include <linux/elf.h>
#include <linux/io.h>
@@ -207,7 +206,7 @@ request_standard_resources(struct meminfo *mi)
if (mi->bank[i].size == 0)
continue;
- res = alloc_bootmem_low(sizeof(*res));
+ res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM";
res->start = mi->bank[i].start;
res->end = mi->bank[i].start + mi->bank[i].size - 1;
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 8f8699e62bd5..cf4eb9481fd6 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -11,13 +11,12 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
-#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
@@ -238,7 +237,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
* free the section of the memmap array.
*/
if (pg < pgend)
- free_bootmem(pg, pgend - pg);
+ memblock_free(pg, pgend - pg);
}
/*
@@ -286,7 +285,7 @@ void __init mem_init(void)
free_unused_memmap(&meminfo);
/* this will put all unused low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index 0c94b7b4514d..040a8c279761 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -17,7 +17,6 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
#include <linux/io.h>
#include <asm/cputype.h>
@@ -144,7 +143,7 @@ static void __init build_mem_type_table(void)
static void __init *early_alloc(unsigned long sz)
{
- void *ptr = __va(memblock_alloc(sz, sz));
+ void *ptr = __va(memblock_phys_alloc(sz, sz));
memset(ptr, 0, sz);
return ptr;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ffebfc3f43c1..c51c989c19c0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -169,7 +169,6 @@ config X86
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH if X86_64
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC
@@ -834,9 +833,6 @@ config JAILHOUSE_GUEST
endif #HYPERVISOR_GUEST
-config NO_BOOTMEM
- def_bool y
-
source "arch/x86/Kconfig.cpu"
config HPET_TIMER
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 5125fca472bb..003f2daa3b0f 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
#include <linux/string.h>
+#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -132,7 +133,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
#endif
- newregs->ip = (unsigned long)current_text_addr();
+ newregs->ip = _THIS_IP_;
}
}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 617805981cce..071b2a6fff85 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -42,18 +42,6 @@ struct vm86;
#define NET_IP_ALIGN 0
#define HBP_NUM 4
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-static inline void *current_text_addr(void)
-{
- void *pc;
-
- asm volatile("mov $1f, %0; 1:":"=r" (pc));
-
- return pc;
-}
/*
* These alignment constraints are for performance in the vSMP case,
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e8fea7ffa306..92c76bf97ad8 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -32,7 +32,7 @@
#include <linux/dmi.h>
#include <linux/irq.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/efi-bgrt.h>
@@ -933,7 +933,8 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* the resource tree during the lateinit timeframe.
*/
#define HPET_RESOURCE_NAME_SIZE 9
- hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
+ hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
+ SMP_CACHE_BYTES);
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index f1915b744052..ca13851f0570 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -7,7 +7,6 @@
*/
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ab731ab09f06..32b2b7a41ef5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -20,7 +20,7 @@
#include <linux/acpi_pmtmr.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ftrace.h>
#include <linux/ioport.h>
#include <linux/export.h>
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ff0d14cd9e82..2953bbf05c08 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -47,7 +47,7 @@
#include <linux/kthread.h>
#include <linux/jiffies.h> /* time_after() */
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/irqdomain.h>
#include <asm/io.h>
@@ -2578,7 +2578,7 @@ static struct resource * __init ioapic_setup_resources(void)
n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
n *= nr_ioapics;
- mem = alloc_bootmem(n);
+ mem = memblock_alloc(n, SMP_CACHE_BYTES);
res = (void *)mem;
mem += sizeof(struct resource) * nr_ioapics;
@@ -2621,7 +2621,8 @@ void __init io_apic_init_mappings(void)
#ifdef CONFIG_X86_32
fake_ioapic_page:
#endif
- ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
+ ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
+ PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap_nocache(idx, ioapic_phys);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 660d0b22e962..cbbd57ae06ee 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,7 +1,7 @@
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d1f25c831447..50895c2f937d 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -9,11 +9,10 @@
* allocation code routines via a platform independent interface (memblock, etc.).
*/
#include <linux/crash_dump.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/firmware-map.h>
-#include <linux/memblock.h>
#include <linux/sort.h>
#include <asm/e820/api.h>
@@ -1094,7 +1093,8 @@ void __init e820__reserve_resources(void)
struct resource *res;
u64 end;
- res = alloc_bootmem(sizeof(*res) * e820_table->nr_entries);
+ res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
+ SMP_CACHE_BYTES);
e820_res = res;
for (i = 0; i < e820_table->nr_entries; i++) {
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index f1c5eb99d445..3482460d984d 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -11,7 +11,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 7ba73fe0d917..f4562fcec681 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -3,7 +3,7 @@
#include <linux/dma-debug.h>
#include <linux/dmar.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/pci.h>
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 71c0b01d93b1..bd08b9e1c9e2 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -5,7 +5,7 @@
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-direct.h>
#include <linux/mem_encrypt.h>
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 637982efecd8..9b158b4716d2 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -20,7 +20,7 @@
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/gfp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/nmi.h>
#include <asm/fixmap.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 7005f89bf3b2..b74e7bfed6ab 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -30,7 +30,6 @@
#include <linux/sfi.h>
#include <linux/apm_bios.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/console.h>
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ea554f812ee1..e8796fcd7e5a 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/percpu.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
@@ -106,20 +106,22 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
void *ptr;
if (!node_online(node) || !NODE_DATA(node)) {
- ptr = __alloc_bootmem_nopanic(size, align, goal);
+ ptr = memblock_alloc_from_nopanic(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr));
} else {
- ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
- size, align, goal);
+ ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ node);
+
pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
cpu, size, node, __pa(ptr));
}
return ptr;
#else
- return __alloc_bootmem_nopanic(size, align, goal);
+ return memblock_alloc_from_nopanic(size, align, goal);
#endif
}
@@ -133,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
static void __init pcpu_fc_free(void *ptr, size_t size)
{
- free_bootmem(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5369d7fac797..a9134d1910b9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -49,7 +49,7 @@
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/percpu.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/nmi.h>
#include <linux/tboot.h>
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
index f386bad0984e..285aaa62d153 100644
--- a/arch/x86/kernel/tce_64.c
+++ b/arch/x86/kernel/tce_64.c
@@ -30,7 +30,7 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/tce.h>
#include <asm/calgary.h>
#include <asm/proto.h>
@@ -173,7 +173,7 @@ void * __init alloc_tce_table(void)
size = table_size_to_number_of_entries(specified_table_size);
size *= TCE_ENTRY_SIZE;
- return __alloc_bootmem_low(size, size, 0);
+ return memblock_alloc_low(size, size);
}
void __init free_tce_table(void *tbl)
@@ -186,5 +186,5 @@ void __init free_tce_table(void *tbl)
size = table_size_to_number_of_entries(specified_table_size);
size *= TCE_ENTRY_SIZE;
- free_bootmem(__pa(tbl), size);
+ memblock_free(__pa(tbl), size);
}
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 048c761d97b0..058b2f36b3a6 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -12,7 +12,6 @@
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b24eb4eb9984..71d4b9d4d43f 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -8,7 +8,7 @@
#include <linux/sched/task_stack.h> /* task_stack_*(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/extable.h> /* search_exception_tables */
-#include <linux/bootmem.h> /* max_low_pfn */
+#include <linux/memblock.h> /* max_low_pfn */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 6d18b70ed5a9..0d4bdcb84da5 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,7 +1,7 @@
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/swap.h> /* for totalram_pages */
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
void *kmap(struct page *page)
{
@@ -111,7 +111,7 @@ void __init set_highmem_pages_init(void)
/*
* Explicitly reset zone->managed_pages because set_highmem_pages_init()
- * is invoked before free_all_bootmem()
+ * is invoked before memblock_free_all()
*/
reset_all_zones_managed_pages();
for_each_zone(zone) {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index faca978ebf9d..ef99f3892e1f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -3,7 +3,6 @@
#include <linux/ioport.h>
#include <linux/swap.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h> /* for max_low_pfn */
#include <linux/swapfile.h>
#include <linux/swapops.h>
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 142c7d9f89cc..49ecf5ecf6d3 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -23,7 +23,6 @@
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/poison.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/memory_hotplug.h>
@@ -771,7 +770,7 @@ void __init mem_init(void)
#endif
/*
* With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
- * be done before free_all_bootmem(). Memblock use free low memory for
+ * be done before memblock_free_all(). Memblock use free low memory for
* temporary data (see find_range_array()) and for this purpose can use
* pages that was already passed to the buddy allocator, hence marked as
* not accessible in the page tables when compiled with
@@ -781,7 +780,7 @@ void __init mem_init(void)
set_highmem_pages_init();
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dd519f372169..5fab264948c2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
@@ -197,7 +196,7 @@ static __ref void *spp_getpage(void)
if (after_bootmem)
ptr = (void *) get_zeroed_page(GFP_ATOMIC);
else
- ptr = alloc_bootmem_pages(PAGE_SIZE);
+ ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
panic("set_pte_phys: cannot allocate page data %s\n",
@@ -1188,14 +1187,14 @@ void __init mem_init(void)
/* clear_bss() already clear the empty_zero_page */
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();
/*
* Must be done after boot memory is put on freelist, because here we
* might set fields in deferred struct pages that have not yet been
- * initialized, and free_all_bootmem() initializes all the reserved
+ * initialized, and memblock_free_all() initializes all the reserved
* deferred pages for us.
*/
register_page_bootmem_info();
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 24e0920a9b25..5378d10f1d31 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -6,7 +6,7 @@
* (C) Copyright 1995 1996 Linus Torvalds
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e3e77527f8df..04a9cf6b034f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -5,10 +5,9 @@
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kasan.h>
#include <linux/kdebug.h>
-#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -28,11 +27,11 @@ static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
static __init void *early_alloc(size_t size, int nid, bool panic)
{
if (panic)
- return memblock_virt_alloc_try_nid(size, size,
- __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ return memblock_alloc_try_nid(size, size,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
else
- return memblock_virt_alloc_try_nid_nopanic(size, size,
- __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ return memblock_alloc_try_nid_nopanic(size, size,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 61db77b0eda9..3f452ffed7e9 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index fa150855647c..1308f5408bf7 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -4,7 +4,6 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/ctype.h>
@@ -196,7 +195,7 @@ static void __init alloc_node_data(int nid)
* Allocate node data. Try node-local memory and then any node.
* Never allocate in DMA zone.
*/
- nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
MEMBLOCK_ALLOC_ACCESSIBLE);
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index e8a4a09e20f1..f2bd3d61e16b 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -22,7 +22,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/init.h>
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 066f3511d5f1..59d80160fa5a 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -3,7 +3,7 @@
* Generic VM initialization for x86-64 NUMA setups.
* Copyright 2002,2003 Andi Kleen, SuSE Labs.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index b54d52a2d00a..a80fdd7fb40f 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -6,7 +6,6 @@
#include <linux/errno.h>
#include <linux/topology.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <asm/dma.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index a25588ad75ef..08f8f76a4852 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -5,7 +5,7 @@
* Clears the a test pte bit on random pages in the direct mapping,
* then reverts and compares page tables forwards and afterwards.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 62bb30b4bd2a..f799076e3d57 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -3,7 +3,7 @@
* Thanks to Ben LaHaise for precious feedback.
*/
#include <linux/highmem.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 3d0c83ef6aab..08013524fba1 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -8,7 +8,7 @@
*/
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/debugfs.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index 7f9acb68324c..bdc98150d4db 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mmdebug.h>
#include <linux/export.h>
#include <linux/mm.h>
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index ed4ac215305d..8cd66152cdb0 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -32,7 +32,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/pat.h>
#include <asm/e820/api.h>
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 9061babfbc83..7ae939e353cd 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -36,9 +36,8 @@
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
-#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/time.h>
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index e8da7f492970..cf0347f61b21 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -23,7 +23,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/mc146818rtc.h>
#include <linux/efi.h>
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 669babcaf245..95e77a667ba5 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -8,7 +8,6 @@
#include <linux/efi.h>
#include <linux/slab.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -333,7 +332,7 @@ void __init efi_reserve_boot_services(void)
/*
* Because the following memblock_reserve() is paired
- * with free_bootmem_late() for this region in
+ * with memblock_free_late() for this region in
* efi_free_boot_services(), we must be extremely
* careful not to reserve, and subsequently free,
* critical regions of memory (like the kernel image) or
@@ -364,7 +363,7 @@ void __init efi_reserve_boot_services(void)
* doesn't make sense as far as the firmware is
* concerned, but it does provide us with a way to tag
* those regions that must not be paired with
- * free_bootmem_late().
+ * memblock_free_late().
*/
md->attribute |= EFI_MEMORY_RUNTIME;
}
@@ -414,7 +413,7 @@ void __init efi_free_boot_services(void)
size -= rm_size;
}
- free_bootmem_late(start, size);
+ memblock_free_late(start, size);
}
if (!num_entries)
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index d6ee92986920..24d2175a9480 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -17,7 +17,7 @@
*/
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_pdt.h>
@@ -141,7 +141,7 @@ void * __init prom_early_alloc(unsigned long size)
* fast enough on the platforms we care about while minimizing
* wasted bootmem) and hand off chunks of it to callers.
*/
- res = alloc_bootmem(chunk_size);
+ res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
BUG_ON(!res);
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 15695e30f982..be15bdcb20df 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -8,7 +8,7 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/um/asm/processor_32.h b/arch/x86/um/asm/processor_32.h
index c112de81c9e1..5fb1b8449adf 100644
--- a/arch/x86/um/asm/processor_32.h
+++ b/arch/x86/um/asm/processor_32.h
@@ -47,14 +47,6 @@ static inline void arch_copy_thread(struct arch_thread *from,
memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
}
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter"). Stolen
- * from asm-i386/processor.h
- */
-#define current_text_addr() \
- ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
-
#define current_sp() ({ void *sp; __asm__("movl %%esp, %0" : "=r" (sp) : ); sp; })
#define current_bp() ({ unsigned long bp; __asm__("movl %%ebp, %0" : "=r" (bp) : ); bp; })
diff --git a/arch/x86/um/asm/processor_64.h b/arch/x86/um/asm/processor_64.h
index c3be85205a65..1ef9c21877bc 100644
--- a/arch/x86/um/asm/processor_64.h
+++ b/arch/x86/um/asm/processor_64.h
@@ -31,9 +31,6 @@ static inline void arch_copy_thread(struct arch_thread *from,
to->fs = from->fs;
}
-#define current_text_addr() \
- ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
-
#define current_sp() ({ void *sp; __asm__("movq %%rsp, %0" : "=r" (sp) : ); sp; })
#define current_bp() ({ unsigned long bp; __asm__("movq %%rbp, %0" : "=r" (bp) : ); bp; })
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 67b2f31a1265..e996e8e744cb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#endif
#include <linux/cpu.h>
#include <linux/kexec.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index ec7a4209f310..2f6787fc7106 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -23,7 +23,7 @@
#include <linux/start_kernel.h>
#include <linux/sched.h>
#include <linux/kprobes.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
@@ -31,7 +31,6 @@
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/gfp.h>
-#include <linux/memblock.h>
#include <linux/edd.h>
#include <linux/frame.h>
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 70ea598a37d2..0d7b3ae4960b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -864,7 +864,7 @@ static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
* The init_mm pagetable is really pinned as soon as its created, but
* that's before we have page structures to store the bits. So do all
* the book-keeping now once struct pages for allocated pages are
- * initialized. This happens only after free_all_bootmem() is called.
+ * initialized. This happens only after memblock_free_all() is called.
*/
static void __init xen_after_bootmem(void)
{
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index d6d74efd8912..b06731705529 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -67,7 +67,7 @@
#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -182,7 +182,7 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
static void * __ref alloc_p2m_page(void)
{
if (unlikely(!slab_is_available()))
- return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
return (void *)__get_free_page(GFP_KERNEL);
}
@@ -190,7 +190,7 @@ static void * __ref alloc_p2m_page(void)
static void __ref free_p2m_page(void *p)
{
if (unlikely(!slab_is_available())) {
- free_bootmem((unsigned long)p, PAGE_SIZE);
+ memblock_free((unsigned long)p, PAGE_SIZE);
return;
}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index ea5d8d03e53b..60c141af222b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -28,13 +28,11 @@ config XTENSA
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_MEMBLOCK
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
select PERF_USE_VMALLOC
select VIRT_TO_BUS
help
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index e4ccb88b7996..be9bfd9aa865 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -152,14 +152,6 @@ struct thread_struct {
int align[0] __attribute__ ((aligned(16)));
};
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 21f13e9aabe1..5ca440a74316 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/pci-bridge.h>
#include <asm/platform.h>
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 9220dcde7520..b27359e2a464 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -21,7 +21,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 34aead7dcb48..9750a48f491b 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/swap.h>
@@ -152,7 +152,7 @@ void __init mem_init(void)
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 6b532b6bd785..6b95ca43aec0 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -8,11 +8,10 @@
* Copyright (C) 2017 Cadence Design Systems Inc.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init_task.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/memblock.h>
#include <asm/initialize_mmu.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
@@ -43,7 +42,7 @@ static void __init populate(void *start, void *end)
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
- pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+ pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
pr_debug("%s: %p - %p\n", __func__, start, end);
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 9d1ecfc53670..a4dcfd39bc5c 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -4,7 +4,7 @@
*
* Extracted from init.c
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/string.h>
@@ -31,7 +31,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
__func__, vaddr, n_pages);
- pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t));
+ pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; ++i)
pte_clear(NULL, 0, pte + i);
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index d027dddc41ca..d052712373b6 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -30,7 +30,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/platform_device.h>
@@ -646,7 +646,7 @@ static int __init iss_net_setup(char *str)
return 1;
}
- new = alloc_bootmem(sizeof(*new));
+ new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
if (new == NULL) {
pr_err("Alloc_bootmem failed\n");
return 1;
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index 58709e89a8ed..c14cc673976c 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -16,7 +16,7 @@
* option) any later version.
*
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/block/blk-settings.c b/block/blk-settings.c
index ffd459969689..696c04c1ab6c 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -6,7 +6,7 @@
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
diff --git a/block/bounce.c b/block/bounce.c
index ec0d99995f5f..cf49fe02f65c 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/printk.h>
#include <asm/tlbflush.h>
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 6b0d3ef7309c..8fe0960ea572 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
- result = add_memory(node, info->start_addr, info->length);
+ result = __add_memory(node, info->start_addr, info->length);
/*
* If the memory block has been used by the kernel, add_memory()
@@ -282,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
nid = memory_add_physaddr_to_nid(info->start_addr);
acpi_unbind_memory_blocks(info);
- remove_memory(nid, info->start_addr, info->length);
+ __remove_memory(nid, info->start_addr, info->length);
list_del(&info->list);
kfree(info);
}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 85167603b9c9..274699463b4f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a3d012b08fc5..61203eebf3a1 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -31,9 +31,8 @@
#include <linux/irq.h>
#include <linux/errno.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
-#include <linux/earlycpio.h>
#include <linux/memblock.h>
+#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include "internal.h"
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 817320c7c4c1..0e5985682642 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -228,7 +228,6 @@ static bool pages_correctly_probed(unsigned long start_pfn)
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
- * Must already be protected by mem_hotplug_begin().
*/
static int
memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
@@ -294,7 +293,6 @@ static int memory_subsys_online(struct device *dev)
if (mem->online_type < 0)
mem->online_type = MMOP_ONLINE_KEEP;
- /* Already under protection of mem_hotplug_begin() */
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
/* clear online_type */
@@ -341,19 +339,11 @@ store_mem_state(struct device *dev,
goto err;
}
- /*
- * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
- * the correct memory block to online before doing device_online(dev),
- * which will take dev->mutex. Take the lock early to prevent an
- * inversion, memory_subsys_online() callbacks will be implemented by
- * assuming it's already protected.
- */
- mem_hotplug_begin();
-
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
case MMOP_ONLINE_KEEP:
+ /* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
@@ -364,7 +354,6 @@ store_mem_state(struct device *dev,
ret = -EINVAL; /* should never happen */
}
- mem_hotplug_done();
err:
unlock_device_hotplug();
@@ -519,15 +508,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ goto out;
+
nid = memory_add_physaddr_to_nid(phys_addr);
- ret = add_memory(nid, phys_addr,
- MIN_MEMORY_BLOCK_SIZE * sections_per_block);
+ ret = __add_memory(nid, phys_addr,
+ MIN_MEMORY_BLOCK_SIZE * sections_per_block);
if (ret)
goto out;
ret = count;
out:
+ unlock_device_hotplug();
return ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 23cf4427f425..41b91af95afb 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 7d22e1af2247..e205af814582 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -23,7 +23,7 @@
#include <linux/of_address.h>
#include <linux/list.h>
#include <linux/regmap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/device.h>
#include "clock.h"
@@ -342,7 +342,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
{
struct clk_iomap *io;
- io = memblock_virt_alloc(sizeof(*io), 0);
+ io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
io->mem = mem;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f2483548cde9..099d83e4e910 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -5,7 +5,7 @@
#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/efi.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/random.h>
#include <asm/dmi.h>
#include <asm/unaligned.h>
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 60a95719ecb8..ac1654f74dc7 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -20,7 +20,7 @@
#define pr_fmt(fmt) "apple-properties: " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/platform_data/x86/apple.h>
@@ -235,7 +235,7 @@ static int __init map_properties(void)
*/
data->len = 0;
memunmap(data);
- free_bootmem_late(pa_data + sizeof(*data), data_len);
+ memblock_free_late(pa_data + sizeof(*data), data_len);
return ret;
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 5fc70520e04c..fa2904fb841f 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -15,7 +15,7 @@
static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
{
- return memblock_alloc(size, 0);
+ return memblock_phys_alloc(size, SMP_CACHE_BYTES);
}
static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 2224f1dc074b..72d9ea18270b 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -18,7 +18,7 @@
* GNU General Public License for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/blkdev.h>
#include <linux/ctype.h>
#include <linux/device.h>
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 5de3ed29282c..d168c87c7d30 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -19,7 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -333,7 +333,8 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0);
+ entry = memblock_alloc(sizeof(struct firmware_map_entry),
+ SMP_CACHE_BYTES);
if (WARN_ON(!entry))
return -ENOMEM;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index f9f69f7111a9..44bd5b9166bb 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 676c029494e4..0e780848f59b 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -13,7 +13,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index e8ae2e54151c..0a0b8e1f4236 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/dmapool.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/jiffies.h>
@@ -38,7 +38,6 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
-#include <linux/memblock.h>
#include <linux/sched/signal.h>
#include <asm/byteorder.h>
@@ -493,7 +492,7 @@ int __init smu_init (void)
goto fail_np;
}
- smu = alloc_bootmem(sizeof(struct smu_device));
+ smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES);
spin_lock_init(&smu->lock);
INIT_LIST_HEAD(&smu->cmd_list);
@@ -569,7 +568,7 @@ fail_msg_node:
fail_db_node:
of_node_put(smu->db_node);
fail_bootmem:
- free_bootmem(__pa(smu), sizeof(struct smu_device));
+ memblock_free(__pa(smu), sizeof(struct smu_device));
smu = NULL;
fail_np:
of_node_put(np);
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index fc15ec58230a..0d33cf0842ad 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -25,7 +25,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <uapi/linux/magic.h>
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index a07e24970be4..11c5bad95226 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -33,7 +33,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 38fa60ddaf2e..28510e33924f 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -38,7 +38,7 @@
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/io.h>
#include "arcdevice.h"
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 4e56aaf2b984..2c546013a980 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -34,7 +34,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 76c83c1ffeda..bb532aae0d92 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,7 +11,6 @@
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -1115,7 +1114,6 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
return 1;
}
-#ifdef CONFIG_HAVE_MEMBLOCK
#ifndef MIN_MEMBLOCK_ADDR
#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
#endif
@@ -1178,29 +1176,9 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
return memblock_reserve(base, size);
}
-#else
-void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- WARN_ON(1);
-}
-
-int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
-{
- return -ENOSYS;
-}
-
-int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
- phys_addr_t size, bool nomap)
-{
- pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
- &base, &size, nomap ? " (nomap)" : "");
- return -ENOSYS;
-}
-#endif
-
static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
- return memblock_virt_alloc(size, align);
+ return memblock_alloc(size, align);
}
bool __init early_init_dt_verify(void *params)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 895c83e0c7b6..1977ee0adcb1 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -20,13 +20,12 @@
#include <linux/of_reserved_mem.h>
#include <linux/sort.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
-#if defined(CONFIG_HAVE_MEMBLOCK)
-#include <linux/memblock.h>
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
@@ -37,6 +36,7 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
* panic()s on allocation failure.
*/
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
+ align = !align ? SMP_CACHE_BYTES : align;
base = __memblock_alloc_base(size, align, end);
if (!base)
return -ENOMEM;
@@ -54,16 +54,6 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
return memblock_remove(base, size);
return 0;
}
-#else
-int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
- phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
- phys_addr_t *res_base)
-{
- pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
- size, nomap ? " (nomap)" : "");
- return -ENOSYS;
-}
-#endif
/**
* res_mem_save_node() - save fdt node for second pass initialization
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index a3a6866765f2..49ae2aa744d6 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -5,7 +5,7 @@
#define pr_fmt(fmt) "### dt-test ### " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -2192,7 +2192,7 @@ static struct device_node *overlay_base_root;
static void * __init dt_alloc_memory(u64 size, u64 align)
{
- return memblock_virt_alloc(size, align);
+ return memblock_alloc(size, align);
}
/*
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 16a4e8528bbc..8f3a2eeb28dc 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -8,7 +8,7 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5b8af2782282..2b0c36c2c568 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -19,7 +19,7 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compat.h>
#include <asm/ccwdev.h>
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 8af4948dae80..72dd2471ec1e 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -13,7 +13,7 @@
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 8f5c1d7f751a..97b6f197f007 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -9,7 +9,7 @@
#include <linux/kernel_stat.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 153b3f3cc795..a5136901dd8a 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -59,7 +59,7 @@
#define KMSG_COMPONENT "SFI"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c16dd16afe6a..0fdda6f62953 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -1,6 +1,6 @@
menuconfig ION
bool "Ion Memory Manager"
- depends on HAVE_MEMBLOCK && HAS_DMA && MMU
+ depends on HAS_DMA && MMU
select GENERIC_ALLOCATOR
select DMA_SHARED_BUFFER
help
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 79ad30d34949..b929c7ae3a27 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -24,7 +24,7 @@
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <linux/fs_uart_pd.h>
#include <linux/of_address.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 4eba17f3d293..56fc527015cb 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -19,7 +19,7 @@
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index e3bff068dc3c..6a1cd03bfe39 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -19,7 +19,7 @@
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 165653a5e45d..d2652dccc699 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -12,7 +12,7 @@
#include <linux/console.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/io.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
@@ -94,7 +94,7 @@ static void * __init xdbc_get_page(dma_addr_t *dma_addr)
{
void *virt;
- virt = alloc_bootmem_pages_nopanic(PAGE_SIZE);
+ virt = memblock_alloc_nopanic(PAGE_SIZE, PAGE_SIZE);
if (!virt)
return NULL;
@@ -191,7 +191,7 @@ static void __init xdbc_free_ring(struct xdbc_ring *ring)
if (!seg)
return;
- free_bootmem(seg->dma, PAGE_SIZE);
+ memblock_free(seg->dma, PAGE_SIZE);
ring->segment = NULL;
}
@@ -675,10 +675,10 @@ int __init early_xdbc_setup_hardware(void)
xdbc_free_ring(&xdbc.in_ring);
if (xdbc.table_dma)
- free_bootmem(xdbc.table_dma, PAGE_SIZE);
+ memblock_free(xdbc.table_dma, PAGE_SIZE);
if (xdbc.out_dma)
- free_bootmem(xdbc.out_dma, PAGE_SIZE);
+ memblock_free(xdbc.out_dma, PAGE_SIZE);
xdbc.table_base = NULL;
xdbc.out_buf = NULL;
@@ -997,8 +997,8 @@ free_and_quit:
xdbc_free_ring(&xdbc.evt_ring);
xdbc_free_ring(&xdbc.out_ring);
xdbc_free_ring(&xdbc.in_ring);
- free_bootmem(xdbc.table_dma, PAGE_SIZE);
- free_bootmem(xdbc.out_dma, PAGE_SIZE);
+ memblock_free(xdbc.table_dma, PAGE_SIZE);
+ memblock_free(xdbc.out_dma, PAGE_SIZE);
writel(0, &xdbc.xdbc_reg->control);
early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e12bb256036f..fdfc64f5acea 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -44,7 +44,7 @@
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/mutex.h>
@@ -395,7 +395,10 @@ static enum bp_state reserve_additional_memory(void)
* callers drop the mutex before trying again.
*/
mutex_unlock(&balloon_mutex);
+ /* add_memory_resource() requires the device_hotplug lock */
+ lock_device_hotplug();
rc = add_memory_resource(nid, resource, memhp_auto_online);
+ unlock_device_hotplug();
mutex_lock(&balloon_mutex);
if (rc) {
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index e6c1934734b7..93194f3e7540 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 84575baceebc..f15f89df1f36 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -33,7 +33,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f5c1af4ce9ab..2a7f545bd0b5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -35,7 +35,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-direct.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
@@ -217,7 +217,8 @@ retry:
* Get IO TLB memory from any location.
*/
if (early)
- xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
+ xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
+ PAGE_SIZE);
else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
@@ -247,7 +248,8 @@ retry:
xen_io_tlb_nslabs);
if (rc) {
if (early)
- free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
+ memblock_free(__pa(xen_io_tlb_start),
+ PAGE_ALIGN(bytes));
else {
free_pages((unsigned long)xen_io_tlb_start, order);
xen_io_tlb_start = NULL;
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 55988b8418ee..5165aa82bf7d 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -68,7 +68,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/mman.h>
diff --git a/fs/dcache.c b/fs/dcache.c
index c2e443fb76ae..2593153471cf 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -26,7 +26,7 @@
#include <linux/export.h>
#include <linux/security.h>
#include <linux/seqlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/list_lru.h>
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 7f5f3699fc6c..c8366cb8eccd 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -369,7 +369,9 @@ static int fat_parse_short(struct super_block *sb,
}
memcpy(work, de->name, sizeof(work));
- /* see namei.c, msdos_format_name */
+ /* For an explanation of the special treatment of 0x05 in
+ * filenames, see msdos_format_name in namei_msdos.c
+ */
if (work[0] == 0x05)
work[0] = 0xE5;
@@ -1071,7 +1073,7 @@ int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
}
}
- dir->i_mtime = dir->i_atime = current_time(dir);
+ fat_truncate_time(dir, NULL, S_ATIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 9d7d2d5da28b..4e1b2f6df5e6 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -416,6 +416,10 @@ extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs);
extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs);
+extern int fat_truncate_time(struct inode *inode, struct timespec64 *now,
+ int flags);
+extern int fat_update_time(struct inode *inode, struct timespec64 *now,
+ int flags);
extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
int fat_cache_init(void);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 4f3d72fb1e60..13935ee99e1e 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -227,7 +227,7 @@ static int fat_cont_expand(struct inode *inode, loff_t size)
if (err)
goto out;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
int err2;
@@ -330,7 +330,7 @@ static int fat_free(struct inode *inode, int skip)
MSDOS_I(inode)->i_logstart = 0;
}
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
if (wait) {
err = fat_sync_inode(inode);
if (err) {
@@ -542,6 +542,18 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
up_write(&MSDOS_I(inode)->truncate_lock);
}
+ /*
+ * setattr_copy can't truncate these appropriately, so we'll
+ * copy them ourselves
+ */
+ if (attr->ia_valid & ATTR_ATIME)
+ fat_truncate_time(inode, &attr->ia_atime, S_ATIME);
+ if (attr->ia_valid & ATTR_CTIME)
+ fat_truncate_time(inode, &attr->ia_ctime, S_CTIME);
+ if (attr->ia_valid & ATTR_MTIME)
+ fat_truncate_time(inode, &attr->ia_mtime, S_MTIME);
+ attr->ia_valid &= ~(ATTR_ATIME|ATTR_CTIME|ATTR_MTIME);
+
setattr_copy(inode, attr);
mark_inode_dirty(inode);
out:
@@ -552,4 +564,5 @@ EXPORT_SYMBOL_GPL(fat_setattr);
const struct inode_operations fat_file_inode_operations = {
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index d6b81e31f9f5..c0b5b5c3373b 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -244,7 +244,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
if (err < len)
fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
mark_inode_dirty(inode);
}
@@ -564,7 +564,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
de->cdate, de->ctime_cs);
fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
} else
- inode->i_ctime = inode->i_atime = inode->i_mtime;
+ fat_truncate_time(inode, &inode->i_mtime, S_ATIME|S_CTIME);
return 0;
}
@@ -1626,6 +1626,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sb->s_magic = MSDOS_SUPER_MAGIC;
sb->s_op = &fat_sops;
sb->s_export_op = &fat_export_ops;
+ /*
+ * fat timestamps are complex and truncated by fat itself, so
+ * we set 1 here to be fast
+ */
+ sb->s_time_gran = 1;
mutex_init(&sbi->nfs_build_inode_lock);
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 573836dcaefc..fce0a76f3f1e 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -7,6 +7,7 @@
*/
#include "fat.h"
+#include <linux/iversion.h>
/*
* fat_fs_error reports a file system problem that might indicate fa data
@@ -185,6 +186,13 @@ static long days_in_year[] = {
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
+static inline int fat_tz_offset(struct msdos_sb_info *sbi)
+{
+ return (sbi->options.tz_set ?
+ -sbi->options.time_offset :
+ sys_tz.tz_minuteswest) * SECS_PER_MIN;
+}
+
/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs)
@@ -210,10 +218,7 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
+ days_in_year[month] + day
+ DAYS_DELTA) * SECS_PER_DAY;
- if (!sbi->options.tz_set)
- second += sys_tz.tz_minuteswest * SECS_PER_MIN;
- else
- second -= sbi->options.time_offset * SECS_PER_MIN;
+ second += fat_tz_offset(sbi);
if (time_cs) {
ts->tv_sec = second + (time_cs / 100);
@@ -229,9 +234,7 @@ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
- time64_to_tm(ts->tv_sec,
- (sbi->options.tz_set ? sbi->options.time_offset :
- -sys_tz.tz_minuteswest) * SECS_PER_MIN, &tm);
+ time64_to_tm(ts->tv_sec, -fat_tz_offset(sbi), &tm);
/* FAT can only support year between 1980 to 2107 */
if (tm.tm_year < 1980 - 1900) {
@@ -263,6 +266,80 @@ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
}
EXPORT_SYMBOL_GPL(fat_time_unix2fat);
+static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
+{
+ return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
+}
+/*
+ * truncate the various times with appropriate granularity:
+ * root inode:
+ * all times always 0
+ * all other inodes:
+ * mtime - 2 seconds
+ * ctime
+ * msdos - 2 seconds
+ * vfat - 10 milliseconds
+ * atime - 24 hours (00:00:00 in local timezone)
+ */
+int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+ struct timespec64 ts;
+
+ if (inode->i_ino == MSDOS_ROOT_INO)
+ return 0;
+
+ if (now == NULL) {
+ now = &ts;
+ ts = current_time(inode);
+ }
+
+ if (flags & S_ATIME) {
+ /* to localtime */
+ time64_t seconds = now->tv_sec - fat_tz_offset(sbi);
+ s32 remainder;
+
+ div_s64_rem(seconds, SECS_PER_DAY, &remainder);
+ /* to day boundary, and back to unix time */
+ seconds = seconds + fat_tz_offset(sbi) - remainder;
+
+ inode->i_atime = (struct timespec64){ seconds, 0 };
+ }
+ if (flags & S_CTIME) {
+ if (sbi->options.isvfat)
+ inode->i_ctime = timespec64_trunc(*now, 10000000);
+ else
+ inode->i_ctime = fat_timespec64_trunc_2secs(*now);
+ }
+ if (flags & S_MTIME)
+ inode->i_mtime = fat_timespec64_trunc_2secs(*now);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_truncate_time);
+
+int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
+{
+ int iflags = I_DIRTY_TIME;
+ bool dirty = false;
+
+ if (inode->i_ino == MSDOS_ROOT_INO)
+ return 0;
+
+ fat_truncate_time(inode, now, flags);
+ if (flags & S_VERSION)
+ dirty = inode_maybe_inc_iversion(inode, false);
+ if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
+ !(inode->i_sb->s_flags & SB_LAZYTIME))
+ dirty = true;
+
+ if (dirty)
+ iflags |= I_DIRTY_SYNC;
+ __mark_inode_dirty(inode, iflags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_update_time);
+
int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
{
int i, err = 0;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index efb8c40c9d27..f2cd365a4e86 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -250,7 +250,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
if (err)
return err;
- dir->i_ctime = dir->i_mtime = *ts;
+ fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -294,7 +294,7 @@ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = PTR_ERR(inode);
goto out;
}
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -327,7 +327,7 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -380,7 +380,7 @@ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
}
set_nlink(inode, 2);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -413,7 +413,7 @@ static int msdos_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -478,7 +478,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
mark_inode_dirty(old_inode);
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ fat_truncate_time(old_dir, NULL, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -538,7 +538,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
if (err)
goto error_dotdot;
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = ts;
+ fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -548,7 +548,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
- new_inode->i_ctime = ts;
+ fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
@@ -637,6 +637,7 @@ static const struct inode_operations msdos_dir_inode_operations = {
.rename = msdos_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
static void setup(struct super_block *sb)
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 82cd1e69cbdf..996c8c25e9c6 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -678,7 +678,7 @@ static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
goto cleanup;
/* update timestamp */
- dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
+ fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -779,7 +779,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out;
}
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -810,7 +810,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
@@ -836,7 +836,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
@@ -876,7 +876,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
inode_inc_iversion(inode);
set_nlink(inode, 2);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -969,7 +969,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto error_dotdot;
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = ts;
+ fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -979,7 +979,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
- new_inode->i_ctime = ts;
+ fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
@@ -1032,6 +1032,7 @@ static const struct inode_operations vfat_dir_inode_operations = {
.rename = vfat_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
static void setup(struct super_block *sb)
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 9a8772465a90..896396554bcc 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -425,6 +425,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
@@ -441,6 +445,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 374b5688e29e..98b96ffb95ed 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -220,25 +220,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
-
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- u32 count;
- int res;
+ struct inode *inode = tree->inode;
+ u32 count;
+ int res;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfs_extend_file(inode);
if (res)
- return ERR_PTR(res);
+ return res;
HFS_I(inode)->phys_size = inode->i_size =
(loff_t)HFS_I(inode)->alloc_blocks *
HFS_SB(tree->sb)->alloc_blksz;
@@ -246,9 +238,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
tree->sb->s_blocksize_bits;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index c8b252dbb26c..dcc2aab1b2c4 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -82,6 +82,7 @@ struct hfs_find_data {
extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
extern void hfs_btree_close(struct hfs_btree *);
extern void hfs_btree_write(struct hfs_btree *);
+extern int hfs_bmap_reserve(struct hfs_btree *, int);
extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
extern void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 8a66405b0f8b..d365bf0b8c77 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
HFS_CDR_THD : HFS_CDR_FTH,
@@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
err = hfs_brec_find(&src_fd);
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 5d0182654580..263d5028d9d1 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
} else {
@@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
return 0;
blocks = 0;
- for (i = 0; i < 3; extent++, i++)
+ for (i = 0; i < 3; i++)
blocks += be16_to_cpu(extent[i].count);
res = hfs_free_extents(sb, extent, blocks, blocks);
@@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block,
ablock = (u32)block / HFS_SB(sb)->fs_div;
if (block >= HFS_I(inode)->fs_blocks) {
- if (block > HFS_I(inode)->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (block > HFS_I(inode)->fs_blocks)
return -EIO;
if (ablock >= HFS_I(inode)->alloc_blocks) {
res = hfs_extend_file(inode);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a2dfa1b2a89c..da243c84e93b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -642,6 +642,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
+ inode->i_atime = inode->i_mtime = inode->i_ctime =
+ current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index 2bab6b3cdba4..e6d554476db4 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -217,6 +217,11 @@ int hfsplus_create_attr(struct inode *inode,
if (err)
goto failed_init_create_attr;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1);
+ if (err)
+ goto failed_create_attr;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
@@ -313,6 +318,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
if (err)
return err;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth);
+ if (err)
+ goto out;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index ed8eacb34452..1918544a7871 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -429,6 +429,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
@@ -445,6 +449,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index de14b2b6881b..236efe51eca6 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -342,26 +342,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
+ struct inode *inode = tree->inode;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+ u32 count;
+ int res;
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
- u32 count;
- int res;
+ if (rsvd_nodes <= 0)
+ return 0;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
if (res)
- return ERR_PTR(res);
+ return res;
hip->phys_size = inode->i_size =
(loff_t)hip->alloc_blocks <<
HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
@@ -369,9 +364,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index a196369ba779..35472cba750e 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -265,6 +265,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
@@ -333,6 +341,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2);
+ if (err)
+ goto out;
+
if (!str) {
int len;
@@ -433,6 +449,14 @@ int hfsplus_rename_cat(u32 cnid,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most twice.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
src_dir->i_ino, src_name);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 8e0f59767694..a930ddd15681 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -100,6 +100,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode,
if (hip->extent_state & HFSPLUS_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, hip->cached_extents,
sizeof(hfsplus_extent_rec));
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
@@ -233,7 +237,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
ablock = iblock >> sbi->fs_shift;
if (iblock >= hip->fs_blocks) {
- if (iblock > hip->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (iblock > hip->fs_blocks)
return -EIO;
if (ablock >= hip->alloc_blocks) {
res = hfsplus_file_extend(inode, false);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 8e039435958a..dd7ad9f13e3a 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -311,6 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
#define hfs_btree_open hfsplus_btree_open
#define hfs_btree_close hfsplus_btree_close
#define hfs_btree_write hfsplus_btree_write
+#define hfs_bmap_reserve hfsplus_bmap_reserve
#define hfs_bmap_alloc hfsplus_bmap_alloc
#define hfs_bmap_free hfsplus_bmap_free
#define hfs_bnode_read hfsplus_bnode_read
@@ -395,6 +396,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
void hfs_btree_close(struct hfs_btree *tree);
int hfs_btree_write(struct hfs_btree *tree);
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 8e9427a42b81..d7ab9d8c4b67 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -261,6 +261,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/inode.c b/fs/inode.c
index 9b808986d440..9e198f00b64c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -10,7 +10,7 @@
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
diff --git a/fs/namespace.c b/fs/namespace.c
index d86830c86ce8..98d27da43304 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/task_work.h>
#include <linux/sched/task.h>
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index d297fe4472a9..bbcc185062bb 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -22,7 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/printk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 792c78a49174..6c517b11acf8 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/init.h>
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 91ae16fbd7d5..3fe90443c1bb 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/printk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/crash_dump.h>
#include <linux/list.h>
@@ -423,7 +423,7 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
if (rc < 0) {
unlock_page(page);
put_page(page);
- return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ return vmf_error(rc);
}
SetPageUptodate(page);
}
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 503086f7f7c1..0d19d191ae70 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -141,7 +141,6 @@ config PSTORE_RAM
tristate "Log panic/oops to a RAM buffer"
depends on PSTORE
depends on HAS_IOMEM
- depends on HAVE_MEMBLOCK
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index a39a562c1c10..bd29c58ccbd8 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -26,14 +26,5 @@ ifeq ($(CONFIG_REISERFS_FS_POSIX_ACL),y)
reiserfs-objs += xattr_acl.o
endif
-# gcc -O2 (the kernel default) is overaggressive on ppc32 when many inline
-# functions are used. This causes the compiler to advance the stack
-# pointer out of the available stack space, corrupting kernel space,
-# and causing a panic. Since this behavior only affects ppc32, this ifeq
-# will work around it. If any other architecture displays this behavior,
-# add it here.
-ccflags-$(CONFIG_PPC32) := $(call cc-ifversion, -lt, 0400, -O1)
-
TAGS:
etags *.c
-
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 48cdfc81fe10..32d8986c26fb 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -185,6 +185,7 @@ struct reiserfs_dentry_buf {
struct dir_context ctx;
struct dentry *xadir;
int count;
+ int err;
struct dentry *dentries[8];
};
@@ -207,6 +208,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
dentry = lookup_one_len(name, dbuf->xadir, namelen);
if (IS_ERR(dentry)) {
+ dbuf->err = PTR_ERR(dentry);
return PTR_ERR(dentry);
} else if (d_really_is_negative(dentry)) {
/* A directory entry exists, but no file? */
@@ -215,6 +217,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
"not found for file %pd.\n",
dentry, dbuf->xadir);
dput(dentry);
+ dbuf->err = -EIO;
return -EIO;
}
@@ -262,6 +265,10 @@ static int reiserfs_for_each_xattr(struct inode *inode,
err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
if (err)
break;
+ if (buf.err) {
+ err = buf.err;
+ break;
+ }
if (!buf.count)
break;
for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 1817a8415a5e..c2de013b2cf4 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -62,10 +62,6 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_ATTRIBUTES
#endif
-#ifndef PER_CPU_DEF_ATTRIBUTES
-#define PER_CPU_DEF_ATTRIBUTES
-#endif
-
#define raw_cpu_generic_read(pcp) \
({ \
*raw_cpu_ptr(&(pcp)); \
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index acf5e8df3504..f58e97446abc 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -28,8 +28,8 @@
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
- * Note that nbits should be always a compile time evaluable constant.
- * Otherwise many inlines will generate horrible code.
+ * The generated code is more efficient when nbits is known at
+ * compile-time and at most BITS_PER_LONG.
*
* ::
*
@@ -204,38 +204,31 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+/*
+ * The static inlines below do not handle constant nbits==0 correctly,
+ * so make such users (should any ever turn up) call the out-of-line
+ * versions.
+ */
#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = 0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = ~0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0xff, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = *src;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memcpy(dst, src, len);
}
/*
@@ -398,7 +391,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, int nbits)
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
deleted file mode 100644
index 42515195d7d8..000000000000
--- a/include/linux/bootmem.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- */
-#ifndef _LINUX_BOOTMEM_H
-#define _LINUX_BOOTMEM_H
-
-#include <linux/mmzone.h>
-#include <linux/mm_types.h>
-#include <asm/dma.h>
-#include <asm/processor.h>
-
-/*
- * simple boot-time physical memory area allocator.
- */
-
-extern unsigned long max_low_pfn;
-extern unsigned long min_low_pfn;
-
-/*
- * highest page
- */
-extern unsigned long max_pfn;
-/*
- * highest possible page
- */
-extern unsigned long long max_possible_pfn;
-
-#ifndef CONFIG_NO_BOOTMEM
-/**
- * struct bootmem_data - per-node information used by the bootmem allocator
- * @node_min_pfn: the starting physical address of the node's memory
- * @node_low_pfn: the end physical address of the directly addressable memory
- * @node_bootmem_map: is a bitmap pointer - the bits represent all physical
- * memory pages (including holes) on the node.
- * @last_end_off: the offset within the page of the end of the last allocation;
- * if 0, the page used is full
- * @hint_idx: the PFN of the page used with the last allocation;
- * together with using this with the @last_end_offset field,
- * a test can be made to see if allocations can be merged
- * with the page used for the last allocation rather than
- * using up a full new page.
- * @list: list entry in the linked list ordered by the memory addresses
- */
-typedef struct bootmem_data {
- unsigned long node_min_pfn;
- unsigned long node_low_pfn;
- void *node_bootmem_map;
- unsigned long last_end_off;
- unsigned long hint_idx;
- struct list_head list;
-} bootmem_data_t;
-
-extern bootmem_data_t bootmem_node_data[];
-#endif
-
-extern unsigned long bootmem_bootmap_pages(unsigned long);
-
-extern unsigned long init_bootmem_node(pg_data_t *pgdat,
- unsigned long freepfn,
- unsigned long startpfn,
- unsigned long endpfn);
-extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-
-extern unsigned long free_all_bootmem(void);
-extern void reset_node_managed_pages(pg_data_t *pgdat);
-extern void reset_all_zones_managed_pages(void);
-
-extern void free_bootmem_node(pg_data_t *pgdat,
- unsigned long addr,
- unsigned long size);
-extern void free_bootmem(unsigned long physaddr, unsigned long size);
-extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
-
-/*
- * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
- * the architecture-specific code should honor this).
- *
- * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
- * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
- * already was reserved.
- */
-#define BOOTMEM_DEFAULT 0
-#define BOOTMEM_EXCLUSIVE (1<<0)
-
-extern int reserve_bootmem(unsigned long addr,
- unsigned long size,
- int flags);
-extern int reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-
-extern void *__alloc_bootmem(unsigned long size,
- unsigned long align,
- unsigned long goal);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_node_high(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit) __malloc;
-extern void *__alloc_bootmem_low(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-
-#ifdef CONFIG_NO_BOOTMEM
-/* We are using top down, so it is safe to use 0 here */
-#define BOOTMEM_LOW_LIMIT 0
-#else
-#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
-#endif
-
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
-#define alloc_bootmem(x) \
- __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_align(x, align) \
- __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_nopanic(x) \
- __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages(x) \
- __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_nopanic(x) \
- __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-
-#define alloc_bootmem_low(x) \
- __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_low_pages_nopanic(x) \
- __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem_low(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-
-
-#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
-
-/* FIXME: use MEMBLOCK_ALLOC_* variants here */
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
-
-/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
-void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid);
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
-
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_raw(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- nid);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
-#else
-
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-
-
-/* Fall back to all the existing bootmem APIs */
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_raw(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low_nopanic(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return __alloc_bootmem_nopanic(size, align, min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
- SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
- min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_raw(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_nopanic(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- free_bootmem_node(NODE_DATA(nid), base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem_late(base, size);
-}
-#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
-
-extern void *alloc_large_system_hash(const char *tablename,
- unsigned long bucketsize,
- unsigned long numentries,
- int scale,
- int flags,
- unsigned int *_hash_shift,
- unsigned int *_hash_mask,
- unsigned long low_limit,
- unsigned long high_limit);
-
-#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
-
-/* Only NUMA needs hash distribution. 64bit NUMA architectures have
- * sufficient vmalloc space.
- */
-#ifdef CONFIG_NUMA
-#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
-#else
-#define hashdist (0)
-#endif
-
-
-#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index d30e4dbd4be2..06e77473f175 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -488,8 +488,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+ /* fall through */
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+ /* fall through */
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+ /* fall through */
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index dde947083d4e..c6fb869a81c0 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * Authors: Jérôme Glisse <jglisse@redhat.com>
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
*/
/*
* Heterogeneous Memory Management (HMM)
@@ -274,14 +274,29 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
struct hmm_mirror;
/*
- * enum hmm_update_type - type of update
+ * enum hmm_update_event - type of update
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
*/
-enum hmm_update_type {
+enum hmm_update_event {
HMM_UPDATE_INVALIDATE,
};
/*
+ * struct hmm_update - HMM update informations for callback
+ *
+ * @start: virtual start address of the range to update
+ * @end: virtual end address of the range to update
+ * @event: event triggering the update (what is happening)
+ * @blockable: can the callback block/sleep ?
+ */
+struct hmm_update {
+ unsigned long start;
+ unsigned long end;
+ enum hmm_update_event event;
+ bool blockable;
+};
+
+/*
* struct hmm_mirror_ops - HMM mirror device operations callback
*
* @update: callback to update range on a device
@@ -300,9 +315,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
- * @update_type: type of update that occurred to the CPU page table
- * @start: virtual start address of the range to update
- * @end: virtual end address of the range to update
+ * @update: update informations (see struct hmm_update)
+ * Returns: -EAGAIN if update.blockable false and callback need to
+ * block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table
@@ -313,10 +328,8 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call.
*/
- void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
- enum hmm_update_type update_type,
- unsigned long start,
- unsigned long end);
+ int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
+ const struct hmm_update *update);
};
/*
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 2acdd046df2d..aee299a6aa76 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,7 +2,6 @@
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
*
@@ -16,6 +15,19 @@
#include <linux/init.h>
#include <linux/mm.h>
+#include <asm/dma.h>
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+
+/*
+ * highest page
+ */
+extern unsigned long max_pfn;
+/*
+ * highest possible page
+ */
+extern unsigned long long max_possible_pfn;
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4
@@ -120,6 +132,10 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
enum memblock_flags choose_memblock_flags(void);
+unsigned long memblock_free_all(void);
+void reset_node_managed_pages(pg_data_t *pgdat);
+void reset_all_zones_managed_pages(void);
+
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
@@ -301,10 +317,116 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
-phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+/* Flags for memblock allocation APIs */
+#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+
+/* We are using top down, so it is safe to use 0 here */
+#define MEMBLOCK_LOW_LIMIT 0
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
+
+void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+
+static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_raw(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_from(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_low(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
+}
+static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_node(phys_addr_t size,
+ phys_addr_t align, int nid)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
+ int nid)
+{
+ return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
+ MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static inline void __init memblock_free_early(phys_addr_t base,
+ phys_addr_t size)
+{
+ __memblock_free_early(base, size);
+}
+
+static inline void __init memblock_free_early_nid(phys_addr_t base,
+ phys_addr_t size, int nid)
+{
+ __memblock_free_early(base, size);
+}
-phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
+{
+ __memblock_free_late(base, size);
+}
/*
* Set the allocation direction to bottom-up or top-down.
@@ -324,10 +446,6 @@ static inline bool memblock_bottom_up(void)
return memblock.bottom_up;
}
-/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
-#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
-#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
enum memblock_flags flags);
@@ -433,6 +551,31 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
i < memblock_type->cnt; \
i++, rgn = &memblock_type->regions[i])
+extern void *alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long low_limit,
+ unsigned long high_limit);
+
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
+#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
+ * shift passed via *_hash_shift */
+#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
+ * sufficient vmalloc space.
+ */
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
+#else
+#define hashdist (0)
+#endif
+
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
#else
@@ -440,12 +583,6 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
{
}
#endif
-#else
-static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 34a28227068d..ffd9cd10fcf3 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -301,6 +301,7 @@ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
+extern void __remove_memory(int nid, u64 start, u64 size);
#else
static inline bool is_mem_section_removable(unsigned long pfn,
@@ -317,11 +318,13 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
}
static inline void remove_memory(int nid, u64 start, u64 size) {}
+static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern void __ref free_area_init_core_hotplug(int nid);
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
+extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int arch_add_memory(int nid, u64 start, u64 size,
@@ -330,7 +333,6 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
-extern void remove_memory(int nid, u64 start, u64 size);
extern int sparse_add_one_section(struct pglist_data *pgdat,
unsigned long start_pfn, struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1e52b8fd1685..fcf9cc9d535f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2163,7 +2163,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
-#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
+#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
void zero_resv_unavail(void);
#else
static inline void zero_resv_unavail(void) {}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f0caccd5833..847705a6d0ec 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -633,9 +633,6 @@ typedef struct pglist_data {
struct page_ext *node_page_ext;
#endif
#endif
-#ifndef CONFIG_NO_BOOTMEM
- struct bootmem_data *bdata;
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn, node_present_pages
@@ -869,7 +866,7 @@ static inline int is_highmem_idx(enum zone_type idx)
}
/**
- * is_highmem - helper function to quickly check if a struct zone is a
+ * is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone - pointer to struct zone variable
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 2d2096ba1cfe..1ce8e264a269 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -91,8 +91,7 @@
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
extern __PCPU_ATTRS(sec) __typeof__(type) name; \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __weak __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
@@ -101,8 +100,7 @@
extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __typeof__(type) name
#endif
/*
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index af8a61be2d8d..9510c677ac70 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -51,8 +51,8 @@ extern void __rb_insert_augmented(struct rb_node *node,
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
- * rb_augment_inserted() instead of the usual rb_insert_color() call.
- * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * rb_insert_augmented() instead of the usual rb_insert_color() call.
+ * If rb_insert_augmented() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 200ed96a05af..f428e86f4800 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -129,9 +129,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
+ /* fall through */ \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
+ /* fall through */ \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
@@ -161,7 +163,9 @@ static inline void name(sigset_t *set) \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
+ /* fall through */ \
case 2: set->sig[1] = op(set->sig[1]); \
+ /* fall through */ \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
@@ -182,6 +186,7 @@ static inline void sigemptyset(sigset_t *set)
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
+ /* fall through */
case 1: set->sig[0] = 0;
break;
}
@@ -194,6 +199,7 @@ static inline void sigfillset(sigset_t *set)
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
+ /* fall through */
case 1: set->sig[0] = -1;
break;
}
diff --git a/init/do_mounts.c b/init/do_mounts.c
index e1c9afa9d8c9..a754e3ba9831 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -167,6 +167,24 @@ done:
}
return res;
}
+
+/**
+ * match_dev_by_label - callback for finding a partition using its label
+ * @dev: device passed in by the caller
+ * @data: opaque pointer to the label to match
+ *
+ * Returns 1 if the device matches, and 0 otherwise.
+ */
+static int match_dev_by_label(struct device *dev, const void *data)
+{
+ const char *label = data;
+ struct hd_struct *part = dev_to_part(dev);
+
+ if (part->info && !strcmp(label, part->info->volname))
+ return 1;
+
+ return 0;
+}
#endif
/*
@@ -190,6 +208,8 @@ done:
* a partition with a known unique id.
* 8) <major>:<minor> major and minor number of the device separated by
* a colon.
+ * 9) PARTLABEL=<name> with name being the GPT partition label.
+ * MSDOS partitions do not support labels!
*
* If name doesn't have fall into the categories above, we return (0,0).
* block_class is used to check if something is a disk name. If the disk
@@ -211,6 +231,17 @@ dev_t name_to_dev_t(const char *name)
if (!res)
goto fail;
goto done;
+ } else if (strncmp(name, "PARTLABEL=", 10) == 0) {
+ struct device *dev;
+
+ dev = class_find_device(&block_class, NULL, name + 10,
+ &match_dev_by_label);
+ if (!dev)
+ goto fail;
+
+ res = dev->devt;
+ put_device(dev);
+ goto done;
}
#endif
diff --git a/init/main.c b/init/main.c
index 1c3f90264280..ee147103ba1b 100644
--- a/init/main.c
+++ b/init/main.c
@@ -25,7 +25,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/nmi.h>
@@ -375,10 +375,11 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
static void __init setup_command_line(char *command_line)
{
saved_command_line =
- memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
+ memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES);
initcall_command_line =
- memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
- static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0);
+ memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES);
+ static_command_line = memblock_alloc(strlen(command_line) + 1,
+ SMP_CACHE_BYTES);
strcpy(saved_command_line, boot_command_line);
strcpy(static_command_line, command_line);
}
@@ -773,8 +774,10 @@ static int __init initcall_blacklist(char *str)
str_entry = strsep(&str, ",");
if (str_entry) {
pr_debug("blacklisting initcall %s\n", str_entry);
- entry = alloc_bootmem(sizeof(*entry));
- entry->buf = alloc_bootmem(strlen(str_entry) + 1);
+ entry = memblock_alloc(sizeof(*entry),
+ SMP_CACHE_BYTES);
+ entry->buf = memblock_alloc(strlen(str_entry) + 1,
+ SMP_CACHE_BYTES);
strcpy(entry->buf, str_entry);
list_add(&entry->next, &blacklisted_initcalls);
}
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 8ad93c29f511..49f9bf4ffc7f 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -88,17 +88,39 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
}
+static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, semmni;
+ struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+
+ semmni = ns->sem_ctls[3];
+ ret = proc_ipc_dointvec(table, write, buffer, lenp, ppos);
+
+ if (!ret)
+ ret = sem_check_semmni(current->nsproxy->ipc_ns);
+
+ /*
+ * Reset the semmni value if an error happens.
+ */
+ if (ret)
+ ns->sem_ctls[3] = semmni;
+ return ret;
+}
+
#else
#define proc_ipc_doulongvec_minmax NULL
#define proc_ipc_dointvec NULL
#define proc_ipc_dointvec_minmax NULL
#define proc_ipc_dointvec_minmax_orphans NULL
#define proc_ipc_auto_msgmni NULL
+#define proc_ipc_sem_dointvec NULL
#endif
static int zero;
static int one = 1;
static int int_max = INT_MAX;
+static int ipc_mni = IPCMNI;
static struct ctl_table ipc_kern_table[] = {
{
@@ -120,7 +142,9 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.shm_ctlmni,
.maxlen = sizeof(init_ipc_ns.shm_ctlmni),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec,
+ .proc_handler = proc_ipc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &ipc_mni,
},
{
.procname = "shm_rmid_forced",
@@ -147,7 +171,7 @@ static struct ctl_table ipc_kern_table[] = {
.mode = 0644,
.proc_handler = proc_ipc_dointvec_minmax,
.extra1 = &zero,
- .extra2 = &int_max,
+ .extra2 = &ipc_mni,
},
{
.procname = "auto_msgmni",
@@ -172,7 +196,7 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.sem_ctls,
.maxlen = 4*sizeof(int),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec,
+ .proc_handler = proc_ipc_sem_dointvec,
},
#ifdef CONFIG_CHECKPOINT_RESTORE
{
diff --git a/ipc/util.h b/ipc/util.h
index 1ee81bce25e9..d768fdbed515 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -217,6 +217,15 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
void (*free)(struct ipc_namespace *, struct kern_ipc_perm *));
+static inline int sem_check_semmni(struct ipc_namespace *ns) {
+ /*
+ * Check semmni range [0, IPCMNI]
+ * semmni is the last element of sem_ctls[4] array
+ */
+ return ((ns->sem_ctls[3] < 0) || (ns->sem_ctls[3] > IPCMNI))
+ ? -ERANGE : 0;
+}
+
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
struct compat_ipc_perm {
diff --git a/kernel/bounds.c b/kernel/bounds.c
index c373e887c066..9795d75b09b2 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -13,7 +13,7 @@
#include <linux/log2.h>
#include <linux/spinlock_types.h>
-void foo(void)
+int main(void)
{
/* The enum constants to put into include/generated/bounds.h */
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
@@ -23,4 +23,6 @@ void foo(void)
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */
+
+ return 0;
}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f14c376937e5..22a12ab5a5e9 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -4,7 +4,7 @@
*
* DMA operations that map physical memory directly without using an IOMMU.
*/
-#include <linux/bootmem.h> /* for max_pfn */
+#include <linux/memblock.h> /* for max_pfn */
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-direct.h>
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index ebecaf255ea2..5731daa09a32 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -40,7 +40,7 @@
#include <asm/dma.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/iommu-helper.h>
#define CREATE_TRACE_POINTS
@@ -204,10 +204,10 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
- io_tlb_list = memblock_virt_alloc(
+ io_tlb_list = memblock_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
- io_tlb_orig_addr = memblock_virt_alloc(
+ io_tlb_orig_addr = memblock_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
for (i = 0; i < io_tlb_nslabs; i++) {
@@ -242,7 +242,7 @@ swiotlb_init(int verbose)
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */
- vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
+ vstart = memblock_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index bc80a4e268c0..17f75b545f66 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -173,8 +173,7 @@ static void fei_debugfs_remove_attr(struct fei_attr *attr)
struct dentry *dir;
dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
- if (dir)
- debugfs_remove_recursive(dir);
+ debugfs_remove_recursive(dir);
}
static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
diff --git a/kernel/futex.c b/kernel/futex.c
index 3e2de8fc1891..f423f9b6577e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -65,7 +65,7 @@
#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
#include <linux/freezer.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/fault-inject.h>
#include <asm/futex.h>
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 0130e488ebfe..8f36c27c1794 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,7 +4,7 @@
#endif
#include <linux/hash.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/debug_locks.h>
/*
diff --git a/kernel/panic.c b/kernel/panic.c
index 8b2e002d52eb..f6d549a29a5c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -136,7 +136,7 @@ void panic(const char *fmt, ...)
{
static char buf[1024];
va_list args;
- long i, i_next = 0;
+ long i, i_next = 0, len;
int state = 0;
int old_cpu, this_cpu;
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
@@ -173,8 +173,12 @@ void panic(const char *fmt, ...)
console_verbose();
bust_spinlocks(1);
va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
+
+ if (len && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
pr_emerg("Kernel panic - not syncing: %s\n", buf);
#ifdef CONFIG_DEBUG_BUGVERBOSE
/*
@@ -631,7 +635,7 @@ device_initcall(register_warn_debugfs);
*/
__visible void __stack_chk_fail(void)
{
- panic("stack-protector: Kernel stack is corrupted in: %pB\n",
+ panic("stack-protector: Kernel stack is corrupted in: %pB",
__builtin_return_address(0));
}
EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/pid.c b/kernel/pid.c
index cdf63e53a014..b2f6c506035d 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -31,7 +31,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rculist.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/hash.h>
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3d37c279c090..b0308a2c6000 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -23,7 +23,7 @@
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/nmi.h>
#include <linux/syscalls.h>
#include <linux/console.h>
@@ -963,7 +963,8 @@ void __init __register_nosave_region(unsigned long start_pfn,
BUG_ON(!region);
} else {
/* This allocation cannot fail */
- region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
+ region = memblock_alloc(sizeof(struct nosave_region),
+ SMP_CACHE_BYTES);
}
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b77150ad1965..1b2a029360b7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/security.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/crash_core.h>
@@ -1111,9 +1110,9 @@ void __init setup_log_buf(int early)
if (early) {
new_log_buf =
- memblock_virt_alloc(new_log_buf_len, LOG_ALIGN);
+ memblock_alloc(new_log_buf_len, LOG_ALIGN);
} else {
- new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len,
+ new_log_buf = memblock_alloc_nopanic(new_log_buf_len,
LOG_ALIGN);
}
diff --git a/kernel/profile.c b/kernel/profile.c
index 9aa2a4445b0d..9c08a2c7cb1d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -16,7 +16,7 @@
#include <linux/export.h>
#include <linux/profile.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/notifier.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
diff --git a/kernel/signal.c b/kernel/signal.c
index 17565240b1c6..9a32bc2088c9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -892,7 +892,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
/*
* The first thread which returns from do_signal_stop()
* will take ->siglock, notice SIGNAL_CLD_MASK, and
- * notify its parent. See get_signal_to_deliver().
+ * notify its parent. See get_signal().
*/
signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
signal->group_stop_count = 0;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e0ba05e6f6bd..1af29b8224fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1292,7 +1292,7 @@ config DEBUG_KOBJECT
depends on DEBUG_KERNEL
help
If you say Y here, some extra kobject debugging messages will be sent
- to the syslog.
+ to the syslog.
config DEBUG_KOBJECT_RELEASE
bool "kobject release debugging"
@@ -1980,7 +1980,6 @@ endif # RUNTIME_TESTING_MENU
config MEMTEST
bool "Memtest"
- depends on HAVE_MEMBLOCK
---help---
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2fd07f6df0b8..eead55aa7170 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -36,11 +37,6 @@
* carefully filter out these unused bits from impacting their
* results.
*
- * These operations actually hold to a slightly stronger rule:
- * if you don't input any bitmaps to these ops that have some
- * unused bits set, then they won't output any set unused bits
- * in output bitmaps.
- *
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
@@ -466,20 +462,18 @@ EXPORT_SYMBOL(bitmap_parse_user);
* ranges if list is specified or hex digits grouped into comma-separated
* sets of 8 digits/set. Returns the number of characters written to buf.
*
- * It is assumed that @buf is a pointer into a PAGE_SIZE area and that
- * sufficient storage remains at @buf to accommodate the
- * bitmap_print_to_pagebuf() output.
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
*/
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits)
{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
- if (len > 1)
- n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
- scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
- return n;
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
}
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
diff --git a/lib/cpumask.c b/lib/cpumask.c
index beca6244671a..8d666ab84b5c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -4,7 +4,7 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
- *mask = memblock_virt_alloc(cpumask_size(), 0);
+ *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
}
/**
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 661a1e807bd1..1006bf70bf74 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -175,7 +175,7 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned long)tmp)
+ if (tmp != (unsigned long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -191,7 +191,7 @@ int _kstrtol(const char *s, unsigned int base, long *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(long)tmp)
+ if (tmp != (long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -222,7 +222,7 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned int)tmp)
+ if (tmp != (unsigned int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -253,7 +253,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(int)tmp)
+ if (tmp != (int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -268,7 +268,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u16)tmp)
+ if (tmp != (u16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -283,7 +283,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s16)tmp)
+ if (tmp != (s16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -298,7 +298,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u8)tmp)
+ if (tmp != (u8)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -313,7 +313,7 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s8)tmp)
+ if (tmp != (s8)tmp)
return -ERANGE;
*res = tmp;
return 0;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 141734d255e4..0c9d3ad17e0f 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -43,30 +43,36 @@
/*-*****************************
* Decompression functions
*******************************/
-/* LZ4_decompress_generic() :
- * This generic decompression function cover all use cases.
- * It shall be instantiated several times, using different sets of directives
- * Note that it is important this generic function is really inlined,
+
+#define DEBUGLOG(l, ...) {} /* disabled */
+
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+
+/*
+ * LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
* in order to remove useless branches during compilation optimization.
*/
static FORCE_INLINE int LZ4_decompress_generic(
- const char * const source,
- char * const dest,
- int inputSize,
+ const char * const src,
+ char * const dst,
+ int srcSize,
/*
* If endOnInput == endOnInputSize,
- * this value is the max size of Output Buffer.
+ * this value is `dstCapacity`
*/
int outputSize,
/* endOnOutputSize, endOnInputSize */
- int endOnInput,
+ endCondition_directive endOnInput,
/* full, partial */
- int partialDecoding,
- /* only used if partialDecoding == partial */
- int targetOutputSize,
+ earlyEnd_directive partialDecoding,
/* noDict, withPrefix64k, usingExtDict */
- int dict,
- /* == dest when no prefix */
+ dict_directive dict,
+ /* always <= dst, == dst when no prefix */
const BYTE * const lowPrefix,
/* only if dict == usingExtDict */
const BYTE * const dictStart,
@@ -74,35 +80,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
const size_t dictSize
)
{
- /* Local Variables */
- const BYTE *ip = (const BYTE *) source;
- const BYTE * const iend = ip + inputSize;
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE * const iend = ip + srcSize;
- BYTE *op = (BYTE *) dest;
+ BYTE *op = (BYTE *) dst;
BYTE * const oend = op + outputSize;
BYTE *cpy;
- BYTE *oexit = op + targetOutputSize;
- const BYTE * const lowLimit = lowPrefix - dictSize;
const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
- static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
- static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+ static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
const int safeDecode = (endOnInput == endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE *const shortiend = iend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE *const shortoend = oend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
+ srcSize, outputSize);
+
/* Special cases */
- /* targetOutputSize too high => decode everything */
- if ((partialDecoding) && (oexit > oend - MFLIMIT))
- oexit = oend - MFLIMIT;
+ assert(lowPrefix <= op);
+ assert(src != NULL);
/* Empty output buffer */
if ((endOnInput) && (unlikely(outputSize == 0)))
- return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+ return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
if ((!endOnInput) && (unlikely(outputSize == 0)))
return (*ip == 0 ? 1 : -1);
+ if ((endOnInput) && unlikely(srcSize == 0))
+ return -1;
+
/* Main Loop : decode sequences */
while (1) {
size_t length;
@@ -111,12 +125,74 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* get literal length */
unsigned int const token = *ip++;
-
length = token>>ML_BITS;
+ /* ip < iend before the increment */
+ assert(!endOnInput || ip <= iend);
+
+ /*
+ * A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough
+ * space, enter the shortcut and copy 16 bytes on behalf
+ * of the literals (in the fast mode, only 8 bytes can be
+ * safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes
+ * in a similar manner; but we ensure that there's enough
+ * space in the output for those 18 bytes earlier, upon
+ * entering the shortcut (in other words, there is a
+ * combined check for both stages).
+ */
+ if ((endOnInput ? length != RUN_MASK : length <= 8)
+ /*
+ * strictly "less than" on input, to re-enter
+ * the loop with at least one byte
+ */
+ && likely((endOnInput ? ip < shortiend : 1) &
+ (op <= shortoend))) {
+ /* Copy the literals */
+ memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /*
+ * The second stage:
+ * prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted.
+ */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ((length != ML_MASK) &&
+ (offset >= 8) &&
+ (dict == withPrefix64k || match >= lowPrefix)) {
+ /* Copy the match. */
+ memcpy(op + 0, match + 0, 8);
+ memcpy(op + 8, match + 8, 8);
+ memcpy(op + 16, match + 16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /*
+ * The second stage didn't work out, but the info
+ * is ready. Propel it right to the point of match
+ * copying.
+ */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
if (length == RUN_MASK) {
unsigned int s;
+ if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
+ /* overflow detection */
+ goto _output_error;
+ }
do {
s = *ip++;
length += s;
@@ -125,14 +201,14 @@ static FORCE_INLINE int LZ4_decompress_generic(
: 1) & (s == 255));
if ((safeDecode)
- && unlikely(
- (size_t)(op + length) < (size_t)(op))) {
+ && unlikely((uptrval)(op) +
+ length < (uptrval)(op))) {
/* overflow detection */
goto _output_error;
}
if ((safeDecode)
- && unlikely(
- (size_t)(ip + length) < (size_t)(ip))) {
+ && unlikely((uptrval)(ip) +
+ length < (uptrval)(ip))) {
/* overflow detection */
goto _output_error;
}
@@ -140,16 +216,19 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* copy literals */
cpy = op + length;
- if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+
+ if (((endOnInput) && ((cpy > oend - MFLIMIT)
|| (ip + length > iend - (2 + 1 + LASTLITERALS))))
|| ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
if (partialDecoding) {
if (cpy > oend) {
/*
- * Error :
- * write attempt beyond end of output buffer
+ * Partial decoding :
+ * stop in the middle of literal segment
*/
- goto _output_error;
+ cpy = oend;
+ length = oend - op;
}
if ((endOnInput)
&& (ip + length > iend)) {
@@ -184,29 +263,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, ip, length);
ip += length;
op += length;
+
/* Necessarily EOF, due to parsing restrictions */
- break;
+ if (!partialDecoding || (cpy == oend))
+ break;
+ } else {
+ /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
+ op = cpy;
}
- LZ4_wildCopy(op, ip, cpy);
- ip += length;
- op = cpy;
-
/* get offset */
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
- if ((checkOffset) && (unlikely(match < lowLimit))) {
+ /* get matchlength */
+ length = token & ML_MASK;
+
+_copy_match:
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
/* Error : offset outside buffers */
goto _output_error;
}
/* costs ~1%; silence an msan warning when offset == 0 */
- LZ4_write32(op, (U32)offset);
+ /*
+ * note : when partialDecoding, there is no guarantee that
+ * at least 4 bytes remain available in output buffer
+ */
+ if (!partialDecoding) {
+ assert(oend > op);
+ assert(oend - op >= 4);
+
+ LZ4_write32(op, (U32)offset);
+ }
- /* get matchlength */
- length = token & ML_MASK;
if (length == ML_MASK) {
unsigned int s;
@@ -221,7 +314,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
if ((safeDecode)
&& unlikely(
- (size_t)(op + length) < (size_t)op)) {
+ (uptrval)(op) + length < (uptrval)op)) {
/* overflow detection */
goto _output_error;
}
@@ -229,24 +322,26 @@ static FORCE_INLINE int LZ4_decompress_generic(
length += MINMATCH;
- /* check external dictionary */
+ /* match starting within external dictionary */
if ((dict == usingExtDict) && (match < lowPrefix)) {
if (unlikely(op + length > oend - LASTLITERALS)) {
/* doesn't respect parsing restriction */
- goto _output_error;
+ if (!partialDecoding)
+ goto _output_error;
+ length = min(length, (size_t)(oend - op));
}
if (length <= (size_t)(lowPrefix - match)) {
/*
- * match can be copied as a single segment
- * from external dictionary
+ * match fits entirely within external
+ * dictionary : just copy
*/
memmove(op, dictEnd - (lowPrefix - match),
length);
op += length;
} else {
/*
- * match encompass external
+ * match stretches into both external
* dictionary and current block
*/
size_t const copySize = (size_t)(lowPrefix - match);
@@ -254,7 +349,6 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, dictEnd - copySize, copySize);
op += copySize;
-
if (restSize > (size_t)(op - lowPrefix)) {
/* overlap copy */
BYTE * const endOfMatch = op + restSize;
@@ -267,23 +361,44 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += restSize;
}
}
-
continue;
}
/* copy match within block */
cpy = op + length;
- if (unlikely(offset < 8)) {
- const int dec64 = dec64table[offset];
+ /*
+ * partialDecoding :
+ * may not respect endBlock parsing restrictions
+ */
+ assert(op <= oend);
+ if (partialDecoding &&
+ (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = min(length, (size_t)(oend - op));
+ const BYTE * const matchEnd = match + mlen;
+ BYTE * const copyEnd = op + mlen;
+
+ if (matchEnd > op) {
+ /* overlap copy */
+ while (op < copyEnd)
+ *op++ = *match++;
+ } else {
+ memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend)
+ break;
+ continue;
+ }
+ if (unlikely(offset < 8)) {
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
- match += dec32table[offset];
+ match += inc32table[offset];
memcpy(op + 4, match, 4);
- match -= dec64;
+ match -= dec64table[offset];
} else {
LZ4_copy8(op, match);
match += 8;
@@ -291,7 +406,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += 8;
- if (unlikely(cpy > oend - 12)) {
+ if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
if (cpy > oend - LASTLITERALS) {
@@ -307,60 +422,139 @@ static FORCE_INLINE int LZ4_decompress_generic(
match += oCopyLimit - op;
op = oCopyLimit;
}
-
while (op < cpy)
*op++ = *match++;
} else {
LZ4_copy8(op, match);
-
if (length > 16)
LZ4_wildCopy(op + 8, match + 8, cpy);
}
-
- op = cpy; /* correction */
+ op = cpy; /* wildcopy correction */
}
/* end of decoding */
if (endOnInput) {
/* Nb of output bytes decoded */
- return (int) (((char *)op) - dest);
+ return (int) (((char *)op) - dst);
} else {
/* Nb of input bytes read */
- return (int) (((const char *)ip) - source);
+ return (int) (((const char *)ip) - src);
}
/* Overflow error detected */
_output_error:
- return -1;
+ return (int) (-(((const char *)ip) - src)) - 1;
}
int LZ4_decompress_safe(const char *source, char *dest,
int compressedSize, int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block,
+ noDict, (BYTE *)dest, NULL, 0);
}
-int LZ4_decompress_safe_partial(const char *source, char *dest,
- int compressedSize, int targetOutputSize, int maxDecompressedSize)
+int LZ4_decompress_safe_partial(const char *src, char *dst,
+ int compressedSize, int targetOutputSize, int dstCapacity)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, partial,
- targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
+ dstCapacity = min(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE *)dst, NULL, 0);
}
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
{
return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0, withPrefix64k,
- (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
+ endOnOutputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+/* ===== Instantiate a few more decoding cases, used more than once. ===== */
+
+int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
+ int compressedSize,
+ int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ noDict,
+ (BYTE *)dest - prefixSize, NULL, 0);
+}
+
+int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
}
+static int LZ4_decompress_fast_extDict(const char *source, char *dest,
+ int originalSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/*
+ * The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+static FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+static FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
+ int originalSize, size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/* ===== streaming decompression functions ===== */
+
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize)
{
- LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
lz4sd->prefixSize = (size_t) dictSize;
lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
@@ -382,35 +576,51 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int compressedSize, int maxOutputSize)
{
- LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest,
- compressedSize,
- maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict,
- lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 * KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source,
+ dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest,
+ compressedSize, maxOutputSize,
+ lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += result;
- lz4sd->prefixEnd += result;
+ lz4sd->prefixEnd += result;
} else {
+ /*
+ * The buffer wraps around, or they're
+ * switching to another buffer.
+ */
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest,
+ result = LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, (BYTE *)dest,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
- lz4sd->prefixEnd = (BYTE *)dest + result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
}
return result;
@@ -422,75 +632,66 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict,
- lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ if (lz4sd->prefixSize >= 64 * KB - 1 ||
+ lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest,
+ originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest,
+ originalSize, lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += originalSize;
- lz4sd->prefixEnd += originalSize;
+ lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, (BYTE *)dest,
- lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_fast_extDict(source, dest,
+ originalSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
- lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
-
return result;
}
-/*
- * Advanced decoding functions :
- * *_usingDict() :
- * These decoding functions work the same as "_continue" ones,
- * the dictionary must be explicitly provided within parameters
- */
-static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
- char *dest, int compressedSize, int maxOutputSize, int safe,
- const char *dictStart, int dictSize)
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
{
if (dictSize == 0)
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
- if (dictStart + dictSize == dest) {
- if (dictSize >= (int)(64 * KB - 1))
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, noDict,
- (BYTE *)dest - dictSize, NULL, 0);
+ return LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 * KB - 1)
+ return LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest,
+ compressedSize, maxOutputSize, dictSize);
}
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, usingExtDict,
- (BYTE *)dest, (const BYTE *)dictStart, dictSize);
-}
-
-int LZ4_decompress_safe_usingDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const char *dictStart, int dictSize)
-{
- return LZ4_decompress_usingDict_generic(source, dest,
- compressedSize, maxOutputSize, 1, dictStart, dictSize);
+ return LZ4_decompress_safe_forceExtDict(source, dest,
+ compressedSize, maxOutputSize, dictStart, dictSize);
}
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
- int originalSize, const char *dictStart, int dictSize)
+ int originalSize,
+ const char *dictStart, int dictSize)
{
- return LZ4_decompress_usingDict_generic(source, dest, 0,
- originalSize, 0, dictStart, dictSize);
+ if (dictSize == 0 || dictStart + dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+
+ return LZ4_decompress_fast_extDict(source, dest, originalSize,
+ dictStart, dictSize);
}
#ifndef STATIC
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 00a0b58a0871..1a7fa9d9170f 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -75,6 +75,11 @@ typedef uintptr_t uptrval;
#define WILDCOPYLENGTH 8
#define LASTLITERALS 5
#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+/*
+ * ensure it's possible to write 2 x wildcopyLength
+ * without overflowing output buffer
+ */
+#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
/* Increase this value ==> compression run slower on incompressible data */
#define LZ4_SKIPTRIGGER 6
@@ -222,6 +227,8 @@ typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
#endif
diff --git a/lib/parser.c b/lib/parser.c
index 3278958b472a..dd70e5e6c9e2 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -131,13 +131,10 @@ static int match_number(substring_t *s, int *result, int base)
char *buf;
int ret;
long val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = 0;
val = simple_strtol(buf, &endp, base);
@@ -166,13 +163,10 @@ static int match_u64int(substring_t *s, u64 *result, int base)
char *buf;
int ret;
u64 val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = kstrtoull(buf, base, &val);
if (!ret)
@@ -327,10 +321,6 @@ EXPORT_SYMBOL(match_strlcpy);
*/
char *match_strdup(const substring_t *s)
{
- size_t sz = s->to - s->from + 1;
- char *p = kmalloc(sz, GFP_KERNEL);
- if (p)
- match_strlcpy(p, s, sz);
- return p;
+ return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
}
EXPORT_SYMBOL(match_strdup);
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index 6dd30615a201..d1c1e6388eaa 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -148,10 +148,9 @@ static __init int sg_pool_init(void)
cleanup_sdb:
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct sg_pool *sgp = sg_pools + i;
- if (sgp->pool)
- mempool_destroy(sgp->pool);
- if (sgp->slab)
- kmem_cache_destroy(sgp->slab);
+
+ mempool_destroy(sgp->pool);
+ kmem_cache_destroy(sgp->slab);
}
return -ENOMEM;
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 58a733b10387..48f14cd58c77 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -382,6 +382,7 @@ int zlib_inflate(z_streamp strm, int flush)
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
+ /* fall through */
case DICT:
if (state->havedict == 0) {
RESTORE();
@@ -389,8 +390,10 @@ int zlib_inflate(z_streamp strm, int flush)
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
+ /* fall through */
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
+ /* fall through */
case TYPEDO:
if (state->last) {
BYTEBITS();
@@ -428,6 +431,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
+ /* fall through */
case COPY:
copy = state->length;
if (copy) {
@@ -461,6 +465,7 @@ int zlib_inflate(z_streamp strm, int flush)
#endif
state->have = 0;
state->mode = LENLENS;
+ /* fall through */
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
@@ -481,6 +486,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->have = 0;
state->mode = CODELENS;
+ /* fall through */
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
@@ -554,6 +560,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = LEN;
+ /* fall through */
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
@@ -593,6 +600,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
+ /* fall through */
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -600,6 +608,7 @@ int zlib_inflate(z_streamp strm, int flush)
DROPBITS(state->extra);
}
state->mode = DIST;
+ /* fall through */
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
@@ -625,6 +634,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
+ /* fall through */
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -644,6 +654,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = MATCH;
+ /* fall through */
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
@@ -694,6 +705,7 @@ int zlib_inflate(z_streamp strm, int flush)
INITBITS();
}
state->mode = DONE;
+ /* fall through */
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
diff --git a/mm/Kconfig b/mm/Kconfig
index 02301a89089e..d85e39da47ae 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -127,9 +127,6 @@ config SPARSEMEM_VMEMMAP
pfn_to_page and page_to_pfn operations. This is the most
efficient option when sufficient kernel resources are available.
-config HAVE_MEMBLOCK
- bool
-
config HAVE_MEMBLOCK_NODE_MAP
bool
@@ -142,9 +139,6 @@ config HAVE_GENERIC_GUP
config ARCH_DISCARD_MEMBLOCK
bool
-config NO_BOOTMEM
- bool
-
config MEMORY_ISOLATION
bool
@@ -481,7 +475,7 @@ config FRONTSWAP
config CMA
bool "Contiguous Memory Allocator"
- depends on HAVE_MEMBLOCK && MMU
+ depends on MMU
select MIGRATION
select MEMORY_ISOLATION
help
@@ -634,7 +628,6 @@ config MAX_STACK_SIZE_MB
config DEFERRED_STRUCT_PAGE_INIT
bool "Defer initialisation of struct pages to kthreads"
default n
- depends on NO_BOOTMEM
depends on SPARSEMEM
depends on !NEED_PER_CPU_KM
depends on 64BIT
diff --git a/mm/Makefile b/mm/Makefile
index 6485d5745dd7..d210cc9d6f80 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -42,17 +42,11 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
debug.o $(mmu-y)
obj-y += init-mm.o
-
-ifdef CONFIG_NO_BOOTMEM
- obj-y += nobootmem.o
-else
- obj-y += bootmem.o
-endif
+obj-y += memblock.o
ifdef CONFIG_MMU
obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
endif
-obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
deleted file mode 100644
index 97db0e8e362b..000000000000
--- a/mm/bootmem.c
+++ /dev/null
@@ -1,811 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bootmem - A boot-time physical memory allocator and configurator
- *
- * Copyright (C) 1999 Ingo Molnar
- * 1999 Kanoj Sarcar, SGI
- * 2008 Johannes Weiner
- *
- * Access to this subsystem has to be serialized externally (which is true
- * for the boot process anyway).
- */
-#include <linux/init.h>
-#include <linux/pfn.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/kmemleak.h>
-#include <linux/range.h>
-#include <linux/bug.h>
-#include <linux/io.h>
-#include <linux/bootmem.h>
-
-#include "internal.h"
-
-/**
- * DOC: bootmem overview
- *
- * Bootmem is a boot-time physical memory allocator and configurator.
- *
- * It is used early in the boot process before the page allocator is
- * set up.
- *
- * Bootmem is based on the most basic of allocators, a First Fit
- * allocator which uses a bitmap to represent memory. If a bit is 1,
- * the page is allocated and 0 if unallocated. To satisfy allocations
- * of sizes smaller than a page, the allocator records the Page Frame
- * Number (PFN) of the last allocation and the offset the allocation
- * ended at. Subsequent small allocations are merged together and
- * stored on the same page.
- *
- * The information used by the bootmem allocator is represented by
- * :c:type:`struct bootmem_data`. An array to hold up to %MAX_NUMNODES
- * such structures is statically allocated and then it is discarded
- * when the system initialization completes. Each entry in this array
- * corresponds to a node with memory. For UMA systems only entry 0 is
- * used.
- *
- * The bootmem allocator is initialized during early architecture
- * specific setup. Each architecture is required to supply a
- * :c:func:`setup_arch` function which, among other tasks, is
- * responsible for acquiring the necessary parameters to initialise
- * the boot memory allocator. These parameters define limits of usable
- * physical memory:
- *
- * * @min_low_pfn - the lowest PFN that is available in the system
- * * @max_low_pfn - the highest PFN that may be addressed by low
- * memory (%ZONE_NORMAL)
- * * @max_pfn - the last PFN available to the system.
- *
- * After those limits are determined, the :c:func:`init_bootmem` or
- * :c:func:`init_bootmem_node` function should be called to initialize
- * the bootmem allocator. The UMA case should use the `init_bootmem`
- * function. It will initialize ``contig_page_data`` structure that
- * represents the only memory node in the system. In the NUMA case the
- * `init_bootmem_node` function should be called to initialize the
- * bootmem allocator for each node.
- *
- * Once the allocator is set up, it is possible to use either single
- * node or NUMA variant of the allocation APIs.
- */
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = {
- .bdata = &bootmem_node_data[0]
-};
-EXPORT_SYMBOL(contig_page_data);
-#endif
-
-unsigned long max_low_pfn;
-unsigned long min_low_pfn;
-unsigned long max_pfn;
-unsigned long long max_possible_pfn;
-
-bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
-
-static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
-
-static int bootmem_debug;
-
-static int __init bootmem_debug_setup(char *buf)
-{
- bootmem_debug = 1;
- return 0;
-}
-early_param("bootmem_debug", bootmem_debug_setup);
-
-#define bdebug(fmt, args...) ({ \
- if (unlikely(bootmem_debug)) \
- pr_info("bootmem::%s " fmt, \
- __func__, ## args); \
-})
-
-static unsigned long __init bootmap_bytes(unsigned long pages)
-{
- unsigned long bytes = DIV_ROUND_UP(pages, BITS_PER_BYTE);
-
- return ALIGN(bytes, sizeof(long));
-}
-
-/**
- * bootmem_bootmap_pages - calculate bitmap size in pages
- * @pages: number of pages the bitmap has to represent
- *
- * Return: the number of pages needed to hold the bitmap.
- */
-unsigned long __init bootmem_bootmap_pages(unsigned long pages)
-{
- unsigned long bytes = bootmap_bytes(pages);
-
- return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
-}
-
-/*
- * link bdata in order
- */
-static void __init link_bootmem(bootmem_data_t *bdata)
-{
- bootmem_data_t *ent;
-
- list_for_each_entry(ent, &bdata_list, list) {
- if (bdata->node_min_pfn < ent->node_min_pfn) {
- list_add_tail(&bdata->list, &ent->list);
- return;
- }
- }
-
- list_add_tail(&bdata->list, &bdata_list);
-}
-
-/*
- * Called once to set up the allocator itself.
- */
-static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
- unsigned long mapstart, unsigned long start, unsigned long end)
-{
- unsigned long mapsize;
-
- mminit_validate_memmodel_limits(&start, &end);
- bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
- bdata->node_min_pfn = start;
- bdata->node_low_pfn = end;
- link_bootmem(bdata);
-
- /*
- * Initially all pages are reserved - setup_arch() has to
- * register free RAM areas explicitly.
- */
- mapsize = bootmap_bytes(end - start);
- memset(bdata->node_bootmem_map, 0xff, mapsize);
-
- bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
- bdata - bootmem_node_data, start, mapstart, end, mapsize);
-
- return mapsize;
-}
-
-/**
- * init_bootmem_node - register a node as boot memory
- * @pgdat: node to register
- * @freepfn: pfn where the bitmap for this node is to be placed
- * @startpfn: first pfn on the node
- * @endpfn: first pfn after the node
- *
- * Return: the number of bytes needed to hold the bitmap for this node.
- */
-unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
- unsigned long startpfn, unsigned long endpfn)
-{
- return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
-}
-
-/**
- * init_bootmem - register boot memory
- * @start: pfn where the bitmap is to be placed
- * @pages: number of available physical pages
- *
- * Return: the number of bytes needed to hold the bitmap.
- */
-unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
-{
- max_low_pfn = pages;
- min_low_pfn = start;
- return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
-}
-
-void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
-{
- unsigned long cursor, end;
-
- kmemleak_free_part_phys(physaddr, size);
-
- cursor = PFN_UP(physaddr);
- end = PFN_DOWN(physaddr + size);
-
- for (; cursor < end; cursor++) {
- __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
- totalram_pages++;
- }
-}
-
-static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
-{
- struct page *page;
- unsigned long *map, start, end, pages, cur, count = 0;
-
- if (!bdata->node_bootmem_map)
- return 0;
-
- map = bdata->node_bootmem_map;
- start = bdata->node_min_pfn;
- end = bdata->node_low_pfn;
-
- bdebug("nid=%td start=%lx end=%lx\n",
- bdata - bootmem_node_data, start, end);
-
- while (start < end) {
- unsigned long idx, vec;
- unsigned shift;
-
- idx = start - bdata->node_min_pfn;
- shift = idx & (BITS_PER_LONG - 1);
- /*
- * vec holds at most BITS_PER_LONG map bits,
- * bit 0 corresponds to start.
- */
- vec = ~map[idx / BITS_PER_LONG];
-
- if (shift) {
- vec >>= shift;
- if (end - start >= BITS_PER_LONG)
- vec |= ~map[idx / BITS_PER_LONG + 1] <<
- (BITS_PER_LONG - shift);
- }
- /*
- * If we have a properly aligned and fully unreserved
- * BITS_PER_LONG block of pages in front of us, free
- * it in one go.
- */
- if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
- int order = ilog2(BITS_PER_LONG);
-
- __free_pages_bootmem(pfn_to_page(start), start, order);
- count += BITS_PER_LONG;
- start += BITS_PER_LONG;
- } else {
- cur = start;
-
- start = ALIGN(start + 1, BITS_PER_LONG);
- while (vec && cur != start) {
- if (vec & 1) {
- page = pfn_to_page(cur);
- __free_pages_bootmem(page, cur, 0);
- count++;
- }
- vec >>= 1;
- ++cur;
- }
- }
- }
-
- cur = bdata->node_min_pfn;
- page = virt_to_page(bdata->node_bootmem_map);
- pages = bdata->node_low_pfn - bdata->node_min_pfn;
- pages = bootmem_bootmap_pages(pages);
- count += pages;
- while (pages--)
- __free_pages_bootmem(page++, cur++, 0);
- bdata->node_bootmem_map = NULL;
-
- bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
-
- return count;
-}
-
-static int reset_managed_pages_done __initdata;
-
-void reset_node_managed_pages(pg_data_t *pgdat)
-{
- struct zone *z;
-
- for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- z->managed_pages = 0;
-}
-
-void __init reset_all_zones_managed_pages(void)
-{
- struct pglist_data *pgdat;
-
- if (reset_managed_pages_done)
- return;
-
- for_each_online_pgdat(pgdat)
- reset_node_managed_pages(pgdat);
-
- reset_managed_pages_done = 1;
-}
-
-unsigned long __init free_all_bootmem(void)
-{
- unsigned long total_pages = 0;
- bootmem_data_t *bdata;
-
- reset_all_zones_managed_pages();
-
- list_for_each_entry(bdata, &bdata_list, list)
- total_pages += free_all_bootmem_core(bdata);
-
- totalram_pages += total_pages;
-
- return total_pages;
-}
-
-static void __init __free(bootmem_data_t *bdata,
- unsigned long sidx, unsigned long eidx)
-{
- unsigned long idx;
-
- bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
- sidx + bdata->node_min_pfn,
- eidx + bdata->node_min_pfn);
-
- if (WARN_ON(bdata->node_bootmem_map == NULL))
- return;
-
- if (bdata->hint_idx > sidx)
- bdata->hint_idx = sidx;
-
- for (idx = sidx; idx < eidx; idx++)
- if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
- BUG();
-}
-
-static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
- unsigned long eidx, int flags)
-{
- unsigned long idx;
- int exclusive = flags & BOOTMEM_EXCLUSIVE;
-
- bdebug("nid=%td start=%lx end=%lx flags=%x\n",
- bdata - bootmem_node_data,
- sidx + bdata->node_min_pfn,
- eidx + bdata->node_min_pfn,
- flags);
-
- if (WARN_ON(bdata->node_bootmem_map == NULL))
- return 0;
-
- for (idx = sidx; idx < eidx; idx++)
- if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
- if (exclusive) {
- __free(bdata, sidx, idx);
- return -EBUSY;
- }
- bdebug("silent double reserve of PFN %lx\n",
- idx + bdata->node_min_pfn);
- }
- return 0;
-}
-
-static int __init mark_bootmem_node(bootmem_data_t *bdata,
- unsigned long start, unsigned long end,
- int reserve, int flags)
-{
- unsigned long sidx, eidx;
-
- bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
- bdata - bootmem_node_data, start, end, reserve, flags);
-
- BUG_ON(start < bdata->node_min_pfn);
- BUG_ON(end > bdata->node_low_pfn);
-
- sidx = start - bdata->node_min_pfn;
- eidx = end - bdata->node_min_pfn;
-
- if (reserve)
- return __reserve(bdata, sidx, eidx, flags);
- else
- __free(bdata, sidx, eidx);
- return 0;
-}
-
-static int __init mark_bootmem(unsigned long start, unsigned long end,
- int reserve, int flags)
-{
- unsigned long pos;
- bootmem_data_t *bdata;
-
- pos = start;
- list_for_each_entry(bdata, &bdata_list, list) {
- int err;
- unsigned long max;
-
- if (pos < bdata->node_min_pfn ||
- pos >= bdata->node_low_pfn) {
- BUG_ON(pos != start);
- continue;
- }
-
- max = min(bdata->node_low_pfn, end);
-
- err = mark_bootmem_node(bdata, pos, max, reserve, flags);
- if (reserve && err) {
- mark_bootmem(start, pos, 0, 0);
- return err;
- }
-
- if (max == end)
- return 0;
- pos = bdata->node_low_pfn;
- }
- BUG();
-}
-
-void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size)
-{
- unsigned long start, end;
-
- kmemleak_free_part_phys(physaddr, size);
-
- start = PFN_UP(physaddr);
- end = PFN_DOWN(physaddr + size);
-
- mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
-}
-
-void __init free_bootmem(unsigned long physaddr, unsigned long size)
-{
- unsigned long start, end;
-
- kmemleak_free_part_phys(physaddr, size);
-
- start = PFN_UP(physaddr);
- end = PFN_DOWN(physaddr + size);
-
- mark_bootmem(start, end, 0, 0);
-}
-
-/**
- * reserve_bootmem_node - mark a page range as reserved
- * @pgdat: node the range resides on
- * @physaddr: starting address of the range
- * @size: size of the range in bytes
- * @flags: reservation flags (see linux/bootmem.h)
- *
- * Partial pages will be reserved.
- *
- * The range must reside completely on the specified node.
- *
- * Return: 0 on success, -errno on failure.
- */
-int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size, int flags)
-{
- unsigned long start, end;
-
- start = PFN_DOWN(physaddr);
- end = PFN_UP(physaddr + size);
-
- return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
-}
-
-/**
- * reserve_bootmem - mark a page range as reserved
- * @addr: starting address of the range
- * @size: size of the range in bytes
- * @flags: reservation flags (see linux/bootmem.h)
- *
- * Partial pages will be reserved.
- *
- * The range must be contiguous but may span node boundaries.
- *
- * Return: 0 on success, -errno on failure.
- */
-int __init reserve_bootmem(unsigned long addr, unsigned long size,
- int flags)
-{
- unsigned long start, end;
-
- start = PFN_DOWN(addr);
- end = PFN_UP(addr + size);
-
- return mark_bootmem(start, end, 1, flags);
-}
-
-static unsigned long __init align_idx(struct bootmem_data *bdata,
- unsigned long idx, unsigned long step)
-{
- unsigned long base = bdata->node_min_pfn;
-
- /*
- * Align the index with respect to the node start so that the
- * combination of both satisfies the requested alignment.
- */
-
- return ALIGN(base + idx, step) - base;
-}
-
-static unsigned long __init align_off(struct bootmem_data *bdata,
- unsigned long off, unsigned long align)
-{
- unsigned long base = PFN_PHYS(bdata->node_min_pfn);
-
- /* Same as align_idx for byte offsets */
-
- return ALIGN(base + off, align) - base;
-}
-
-static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- unsigned long fallback = 0;
- unsigned long min, max, start, sidx, midx, step;
-
- bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
- bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
- align, goal, limit);
-
- BUG_ON(!size);
- BUG_ON(align & (align - 1));
- BUG_ON(limit && goal + size > limit);
-
- if (!bdata->node_bootmem_map)
- return NULL;
-
- min = bdata->node_min_pfn;
- max = bdata->node_low_pfn;
-
- goal >>= PAGE_SHIFT;
- limit >>= PAGE_SHIFT;
-
- if (limit && max > limit)
- max = limit;
- if (max <= min)
- return NULL;
-
- step = max(align >> PAGE_SHIFT, 1UL);
-
- if (goal && min < goal && goal < max)
- start = ALIGN(goal, step);
- else
- start = ALIGN(min, step);
-
- sidx = start - bdata->node_min_pfn;
- midx = max - bdata->node_min_pfn;
-
- if (bdata->hint_idx > sidx) {
- /*
- * Handle the valid case of sidx being zero and still
- * catch the fallback below.
- */
- fallback = sidx + 1;
- sidx = align_idx(bdata, bdata->hint_idx, step);
- }
-
- while (1) {
- int merge;
- void *region;
- unsigned long eidx, i, start_off, end_off;
-find_block:
- sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
- sidx = align_idx(bdata, sidx, step);
- eidx = sidx + PFN_UP(size);
-
- if (sidx >= midx || eidx > midx)
- break;
-
- for (i = sidx; i < eidx; i++)
- if (test_bit(i, bdata->node_bootmem_map)) {
- sidx = align_idx(bdata, i, step);
- if (sidx == i)
- sidx += step;
- goto find_block;
- }
-
- if (bdata->last_end_off & (PAGE_SIZE - 1) &&
- PFN_DOWN(bdata->last_end_off) + 1 == sidx)
- start_off = align_off(bdata, bdata->last_end_off, align);
- else
- start_off = PFN_PHYS(sidx);
-
- merge = PFN_DOWN(start_off) < sidx;
- end_off = start_off + size;
-
- bdata->last_end_off = end_off;
- bdata->hint_idx = PFN_UP(end_off);
-
- /*
- * Reserve the area now:
- */
- if (__reserve(bdata, PFN_DOWN(start_off) + merge,
- PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
- BUG();
-
- region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
- start_off);
- memset(region, 0, size);
- /*
- * The min_count is set to 0 so that bootmem allocated blocks
- * are never reported as leaks.
- */
- kmemleak_alloc(region, size, 0, 0);
- return region;
- }
-
- if (fallback) {
- sidx = align_idx(bdata, fallback - 1, step);
- fallback = 0;
- goto find_block;
- }
-
- return NULL;
-}
-
-static void * __init alloc_bootmem_core(unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- bootmem_data_t *bdata;
- void *region;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
- list_for_each_entry(bdata, &bdata_list, list) {
- if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
- continue;
- if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
- break;
-
- region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
- if (region)
- return region;
- }
-
- return NULL;
-}
-
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
-restart:
- ptr = alloc_bootmem_core(size, align, goal, limit);
- if (ptr)
- return ptr;
- if (goal) {
- goal = 0;
- goto restart;
- }
-
- return NULL;
-}
-
-void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = 0;
-
- return ___alloc_bootmem_nopanic(size, align, goal, limit);
-}
-
-static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
-
- if (mem)
- return mem;
- /*
- * Whoops, we cannot satisfy the allocation request.
- */
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-void * __init __alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = 0;
-
- return ___alloc_bootmem(size, align, goal, limit);
-}
-
-void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-again:
-
- /* do not panic in alloc_bootmem_bdata() */
- if (limit && goal + size > limit)
- limit = 0;
-
- ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
- if (ptr)
- return ptr;
-
- ptr = alloc_bootmem_core(size, align, goal, limit);
- if (ptr)
- return ptr;
-
- if (goal) {
- goal = 0;
- goto again;
- }
-
- return NULL;
-}
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
-}
-
-void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
- ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
- if (ptr)
- return ptr;
-
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
-}
-
-void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
-#ifdef MAX_DMA32_PFN
- unsigned long end_pfn;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- /* update goal according ...MAX_DMA32_PFN */
- end_pfn = pgdat_end_pfn(pgdat);
-
- if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
- (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
- void *ptr;
- unsigned long new_goal;
-
- new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
- ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
- new_goal, 0);
- if (ptr)
- return ptr;
- }
-#endif
-
- return __alloc_bootmem_node(pgdat, size, align, goal);
-
-}
-
-void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
-}
-
-void * __init __alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem_nopanic(size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
-}
-
-void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
-}
diff --git a/mm/gup.c b/mm/gup.c
index 841d7ef53591..f76e77a2d34b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1817,8 +1817,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* interrupts disabled by get_futex_key.
*
* With interrupts disabled, we block page table pages from being
- * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
- * for more details.
+ * freed from under us. See struct mmu_table_batch comments in
+ * include/asm-generic/tlb.h for more details.
*
* We do not adopt an rcu_read_lock(.) here as we also want to
* block IPIs that come from THPs splitting.
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index debf11388a60..5b42d3d4b60a 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -27,6 +27,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
int nr;
struct page **pages;
+ if (gup->size > ULONG_MAX)
+ return -EINVAL;
+
nr_pages = gup->size / PAGE_SIZE;
pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
if (!pages)
diff --git a/mm/hmm.c b/mm/hmm.c
index 774d684fa2b4..90c34f3d1243 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * Authors: Jérôme Glisse <jglisse@redhat.com>
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
*/
/*
* Refer to include/linux/hmm.h for information about heterogeneous memory
@@ -43,7 +43,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
*
* @mm: mm struct this HMM struct is bound to
* @lock: lock protecting ranges list
- * @sequence: we track updates to the CPU page table with a sequence number
* @ranges: list of range being snapshotted
* @mirrors: list of mirrors for this mm
* @mmu_notifier: mmu notifier to track updates to CPU page table
@@ -52,7 +51,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
struct hmm {
struct mm_struct *mm;
spinlock_t lock;
- atomic_t sequence;
struct list_head ranges;
struct list_head mirrors;
struct mmu_notifier mmu_notifier;
@@ -85,22 +83,11 @@ static struct hmm *hmm_register(struct mm_struct *mm)
return NULL;
INIT_LIST_HEAD(&hmm->mirrors);
init_rwsem(&hmm->mirrors_sem);
- atomic_set(&hmm->sequence, 0);
hmm->mmu_notifier.ops = NULL;
INIT_LIST_HEAD(&hmm->ranges);
spin_lock_init(&hmm->lock);
hmm->mm = mm;
- /*
- * We should only get here if hold the mmap_sem in write mode ie on
- * registration of first mirror through hmm_mirror_register()
- */
- hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
- if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
- kfree(hmm);
- return NULL;
- }
-
spin_lock(&mm->page_table_lock);
if (!mm->hmm)
mm->hmm = hmm;
@@ -108,12 +95,27 @@ static struct hmm *hmm_register(struct mm_struct *mm)
cleanup = true;
spin_unlock(&mm->page_table_lock);
- if (cleanup) {
- mmu_notifier_unregister(&hmm->mmu_notifier, mm);
- kfree(hmm);
- }
+ if (cleanup)
+ goto error;
+
+ /*
+ * We should only get here if hold the mmap_sem in write mode ie on
+ * registration of first mirror through hmm_mirror_register()
+ */
+ hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
+ if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
+ goto error_mm;
return mm->hmm;
+
+error_mm:
+ spin_lock(&mm->page_table_lock);
+ if (mm->hmm == hmm)
+ mm->hmm = NULL;
+ spin_unlock(&mm->page_table_lock);
+error:
+ kfree(hmm);
+ return NULL;
}
void hmm_mm_destroy(struct mm_struct *mm)
@@ -121,10 +123,8 @@ void hmm_mm_destroy(struct mm_struct *mm)
kfree(mm->hmm);
}
-static void hmm_invalidate_range(struct hmm *hmm,
- enum hmm_update_type action,
- unsigned long start,
- unsigned long end)
+static int hmm_invalidate_range(struct hmm *hmm, bool device,
+ const struct hmm_update *update)
{
struct hmm_mirror *mirror;
struct hmm_range *range;
@@ -133,22 +133,33 @@ static void hmm_invalidate_range(struct hmm *hmm,
list_for_each_entry(range, &hmm->ranges, list) {
unsigned long addr, idx, npages;
- if (end < range->start || start >= range->end)
+ if (update->end < range->start || update->start >= range->end)
continue;
range->valid = false;
- addr = max(start, range->start);
+ addr = max(update->start, range->start);
idx = (addr - range->start) >> PAGE_SHIFT;
- npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
+ npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT;
memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
}
spin_unlock(&hmm->lock);
+ if (!device)
+ return 0;
+
down_read(&hmm->mirrors_sem);
- list_for_each_entry(mirror, &hmm->mirrors, list)
- mirror->ops->sync_cpu_device_pagetables(mirror, action,
- start, end);
+ list_for_each_entry(mirror, &hmm->mirrors, list) {
+ int ret;
+
+ ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
+ if (!update->blockable && ret == -EAGAIN) {
+ up_read(&hmm->mirrors_sem);
+ return -EAGAIN;
+ }
+ }
up_read(&hmm->mirrors_sem);
+
+ return 0;
}
static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -178,18 +189,21 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
}
static int hmm_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end,
+ bool blockable)
{
+ struct hmm_update update;
struct hmm *hmm = mm->hmm;
VM_BUG_ON(!hmm);
- atomic_inc(&hmm->sequence);
-
- return 0;
+ update.start = start;
+ update.end = end;
+ update.event = HMM_UPDATE_INVALIDATE;
+ update.blockable = blockable;
+ return hmm_invalidate_range(hmm, true, &update);
}
static void hmm_invalidate_range_end(struct mmu_notifier *mn,
@@ -197,11 +211,16 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
+ struct hmm_update update;
struct hmm *hmm = mm->hmm;
VM_BUG_ON(!hmm);
- hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
+ update.start = start;
+ update.end = end;
+ update.event = HMM_UPDATE_INVALIDATE;
+ update.blockable = true;
+ hmm_invalidate_range(hmm, false, &update);
}
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
@@ -278,12 +297,13 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror)
if (!should_unregister || mm == NULL)
return;
+ mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
+
spin_lock(&mm->page_table_lock);
if (mm->hmm == hmm)
mm->hmm = NULL;
spin_unlock(&mm->page_table_lock);
- mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
kfree(hmm);
}
EXPORT_SYMBOL(hmm_mirror_unregister);
@@ -571,22 +591,42 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
+ struct vm_area_struct *vma = walk->vma;
uint64_t *pfns = range->pfns;
unsigned long addr = start, i;
pte_t *ptep;
+ pmd_t pmd;
- i = (addr - range->start) >> PAGE_SHIFT;
again:
- if (pmd_none(*pmdp))
+ pmd = READ_ONCE(*pmdp);
+ if (pmd_none(pmd))
return hmm_vma_walk_hole(start, end, walk);
- if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB))
+ if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
return hmm_pfns_bad(start, end, walk);
- if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
- pmd_t pmd;
+ if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
+ bool fault, write_fault;
+ unsigned long npages;
+ uint64_t *pfns;
+
+ i = (addr - range->start) >> PAGE_SHIFT;
+ npages = (end - addr) >> PAGE_SHIFT;
+ pfns = &range->pfns[i];
+ hmm_range_need_fault(hmm_vma_walk, pfns, npages,
+ 0, &fault, &write_fault);
+ if (fault || write_fault) {
+ hmm_vma_walk->last = addr;
+ pmd_migration_entry_wait(vma->vm_mm, pmdp);
+ return -EAGAIN;
+ }
+ return 0;
+ } else if (!pmd_present(pmd))
+ return hmm_pfns_bad(start, end, walk);
+
+ if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
* No need to take pmd_lock here, even if some other threads
* is splitting the huge pmd we will get that event through
@@ -601,13 +641,21 @@ again:
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
goto again;
+ i = (addr - range->start) >> PAGE_SHIFT;
return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
}
- if (pmd_bad(*pmdp))
+ /*
+ * We have handled all the valid case above ie either none, migration,
+ * huge or transparent huge. At this point either it is a valid pmd
+ * entry pointing to pte directory or it is a bad pmd that will not
+ * recover.
+ */
+ if (pmd_bad(pmd))
return hmm_pfns_bad(start, end, walk);
ptep = pte_offset_map(pmdp, addr);
+ i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
int r;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7b5c0ad9a6bd..c007fb5fb8d5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -15,7 +15,7 @@
#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/mmdebug.h>
@@ -2100,9 +2100,9 @@ int __alloc_bootmem_huge_page(struct hstate *h)
for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
void *addr;
- addr = memblock_virt_alloc_try_nid_raw(
+ addr = memblock_alloc_try_nid_raw(
huge_page_size(h), huge_page_size(h),
- 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
+ 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
if (addr) {
/*
* Use the beginning of the huge page to store the
diff --git a/mm/internal.h b/mm/internal.h
index 87256ae1bef8..291eb2b6d1d8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -161,7 +161,7 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
}
extern int __isolate_free_page(struct page *page, unsigned int order);
-extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
+extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 7a2a2f13f86f..c7550eb65922 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -10,11 +10,10 @@
*
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/slab.h>
@@ -83,8 +82,8 @@ static inline bool kasan_zero_page_entry(pte_t pte)
static __init void *early_alloc(size_t size, int node)
{
- return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, node);
+ return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
}
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 4f7e4b5a2f08..877de4fa0720 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -92,7 +92,7 @@
#include <linux/stacktrace.h>
#include <linux/cache.h>
#include <linux/percpu.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/mmzone.h>
#include <linux/slab.h>
diff --git a/mm/memblock.c b/mm/memblock.c
index a85315083b5a..7df468c8ebc8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,7 +20,6 @@
#include <linux/kmemleak.h>
#include <linux/seq_file.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <asm/sections.h>
#include <linux/io.h>
@@ -82,6 +81,16 @@
* initialization compltes.
*/
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data;
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+unsigned long long max_possible_pfn;
+
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
@@ -1238,8 +1247,11 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
{
phys_addr_t found;
- if (!align)
+ if (!align) {
+ /* Can't use WARNs this early in boot on powerpc */
+ dump_stack();
align = SMP_CACHE_BYTES;
+ }
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
@@ -1269,7 +1281,7 @@ phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
}
-phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
+phys_addr_t __init memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
enum memblock_flags flags = choose_memblock_flags();
phys_addr_t ret;
@@ -1304,23 +1316,22 @@ phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys
return alloc;
}
-phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+phys_addr_t __init memblock_phys_alloc(phys_addr_t size, phys_addr_t align)
{
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
-phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
+phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
- phys_addr_t res = memblock_alloc_nid(size, align, nid);
+ phys_addr_t res = memblock_phys_alloc_nid(size, align, nid);
if (res)
return res;
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
-#if defined(CONFIG_NO_BOOTMEM)
/**
- * memblock_virt_alloc_internal - allocate boot memory block
+ * memblock_alloc_internal - allocate boot memory block
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region to allocate (phys address)
@@ -1333,9 +1344,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* hold the requested memory.
*
* The allocation is performed from memory region limited by
- * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
- *
- * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
+ * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
*
* The phys address of allocated boot memory block is converted to virtual and
* allocated memory is reset to 0.
@@ -1346,7 +1355,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-static void * __init memblock_virt_alloc_internal(
+static void * __init memblock_alloc_internal(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1361,13 +1370,15 @@ static void * __init memblock_virt_alloc_internal(
/*
* Detect any accidental use of these APIs after slab is ready, as at
* this moment memblock may be deinitialized already and its
- * internal data may be destroyed (after execution of free_all_bootmem)
+ * internal data may be destroyed (after execution of memblock_free_all)
*/
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);
- if (!align)
+ if (!align) {
+ dump_stack();
align = SMP_CACHE_BYTES;
+ }
if (max_addr > memblock.current_limit)
max_addr = memblock.current_limit;
@@ -1413,14 +1424,14 @@ done:
}
/**
- * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing
+ * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
* memory and without panicking
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
@@ -1431,7 +1442,7 @@ done:
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-void * __init memblock_virt_alloc_try_nid_raw(
+void * __init memblock_alloc_try_nid_raw(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1442,7 +1453,7 @@ void * __init memblock_virt_alloc_try_nid_raw(
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
- ptr = memblock_virt_alloc_internal(size, align,
+ ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
if (ptr && size > 0)
page_init_poison(ptr, size);
@@ -1451,13 +1462,13 @@ void * __init memblock_virt_alloc_try_nid_raw(
}
/**
- * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
+ * memblock_alloc_try_nid_nopanic - allocate boot memory block
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
@@ -1467,7 +1478,7 @@ void * __init memblock_virt_alloc_try_nid_raw(
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-void * __init memblock_virt_alloc_try_nid_nopanic(
+void * __init memblock_alloc_try_nid_nopanic(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1478,7 +1489,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
- ptr = memblock_virt_alloc_internal(size, align,
+ ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
if (ptr)
memset(ptr, 0, size);
@@ -1486,24 +1497,24 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
}
/**
- * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
+ * memblock_alloc_try_nid - allocate boot memory block with panicking
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
- * Public panicking version of memblock_virt_alloc_try_nid_nopanic()
+ * Public panicking version of memblock_alloc_try_nid_nopanic()
* which provides debug information (including caller info), if enabled,
* and panics if the request can not be satisfied.
*
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-void * __init memblock_virt_alloc_try_nid(
+void * __init memblock_alloc_try_nid(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1513,7 +1524,7 @@ void * __init memblock_virt_alloc_try_nid(
memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
- ptr = memblock_virt_alloc_internal(size, align,
+ ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
if (ptr) {
memset(ptr, 0, size);
@@ -1524,14 +1535,13 @@ void * __init memblock_virt_alloc_try_nid(
__func__, (u64)size, (u64)align, nid, &min_addr, &max_addr);
return NULL;
}
-#endif
/**
* __memblock_free_early - free boot memory block
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
*
- * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
+ * Free boot memory block previously allocated by memblock_alloc_xx() API.
* The freeing memory will not be released to the buddy allocator.
*/
void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
@@ -1565,7 +1575,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
- __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
+ memblock_free_pages(pfn_to_page(cursor), cursor, 0);
totalram_pages++;
}
}
@@ -1879,6 +1889,100 @@ static int __init early_memblock(char *p)
}
early_param("memblock", early_memblock);
+static void __init __free_pages_memory(unsigned long start, unsigned long end)
+{
+ int order;
+
+ while (start < end) {
+ order = min(MAX_ORDER - 1UL, __ffs(start));
+
+ while (start + (1UL << order) > end)
+ order--;
+
+ memblock_free_pages(pfn_to_page(start), start, order);
+
+ start += (1UL << order);
+ }
+}
+
+static unsigned long __init __free_memory_core(phys_addr_t start,
+ phys_addr_t end)
+{
+ unsigned long start_pfn = PFN_UP(start);
+ unsigned long end_pfn = min_t(unsigned long,
+ PFN_DOWN(end), max_low_pfn);
+
+ if (start_pfn >= end_pfn)
+ return 0;
+
+ __free_pages_memory(start_pfn, end_pfn);
+
+ return end_pfn - start_pfn;
+}
+
+static unsigned long __init free_low_memory_core_early(void)
+{
+ unsigned long count = 0;
+ phys_addr_t start, end;
+ u64 i;
+
+ memblock_clear_hotplug(0, -1);
+
+ for_each_reserved_mem_region(i, &start, &end)
+ reserve_bootmem_region(start, end);
+
+ /*
+ * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
+ * because in some case like Node0 doesn't have RAM installed
+ * low ram will be on Node1
+ */
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
+ NULL)
+ count += __free_memory_core(start, end);
+
+ return count;
+}
+
+static int reset_managed_pages_done __initdata;
+
+void reset_node_managed_pages(pg_data_t *pgdat)
+{
+ struct zone *z;
+
+ for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+ z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+ struct pglist_data *pgdat;
+
+ if (reset_managed_pages_done)
+ return;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_managed_pages(pgdat);
+
+ reset_managed_pages_done = 1;
+}
+
+/**
+ * memblock_free_all - release free pages to the buddy allocator
+ *
+ * Return: the number of pages actually released.
+ */
+unsigned long __init memblock_free_all(void)
+{
+ unsigned long pages;
+
+ reset_all_zones_managed_pages();
+
+ pages = free_low_memory_core_early();
+ totalram_pages += pages;
+
+ return pages;
+}
+
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
static int memblock_debug_show(struct seq_file *m, void *private)
diff --git a/mm/memory.c b/mm/memory.c
index 072139579d89..4ad2d293ddc2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1537,10 +1537,15 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
* in may not match the PFN we have mapped if the
* mapped PFN is a writeable COW page. In the mkwrite
* case we are creating a writable PTE for a shared
- * mapping and we expect the PFNs to match.
+ * mapping and we expect the PFNs to match. If they
+ * don't match, we are likely racing with block
+ * allocation and mapping invalidation so just skip the
+ * update.
*/
- if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
+ if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
goto out_unlock;
+ }
entry = *pte;
goto out_mkwrite;
} else
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7e6509a53d79..61972da38d93 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -33,7 +33,6 @@
#include <linux/stop_machine.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/compaction.h>
#include <asm/tlbflush.h>
@@ -839,7 +838,6 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid,
return zone;
}
-/* Must be protected by mem_hotplug_begin() or a device_lock */
int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
{
unsigned long flags;
@@ -851,6 +849,8 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
struct memory_notify arg;
struct memory_block *mem;
+ mem_hotplug_begin();
+
/*
* We can't use pfn_to_nid() because nid might be stored in struct page
* which is not yet initialized. Instead, we find nid from memory block.
@@ -915,6 +915,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
if (onlined_pages)
memory_notify(MEM_ONLINE, &arg);
+ mem_hotplug_done();
return 0;
failed_addition:
@@ -922,6 +923,7 @@ failed_addition:
(unsigned long long) pfn << PAGE_SHIFT,
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
+ mem_hotplug_done();
return ret;
}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
@@ -1069,7 +1071,12 @@ static int online_memory_block(struct memory_block *mem, void *arg)
return device_online(&mem->dev);
}
-/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
+/*
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations (triggered e.g. by sysfs).
+ *
+ * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
+ */
int __ref add_memory_resource(int nid, struct resource *res, bool online)
{
u64 start, size;
@@ -1121,26 +1128,26 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
/* create new memmap entry */
firmware_map_add_hotplug(start, start + size, "System RAM");
+ /* device_online() will take the lock when calling online_pages() */
+ mem_hotplug_done();
+
/* online pages if requested */
if (online)
walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
NULL, online_memory_block);
- goto out;
-
+ return ret;
error:
/* rollback pgdat allocation and others */
if (new_node)
rollback_node_hotadd(nid);
memblock_remove(start, size);
-
-out:
mem_hotplug_done();
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory_resource);
-int __ref add_memory(int nid, u64 start, u64 size)
+/* requires device_hotplug_lock, see add_memory_resource() */
+int __ref __add_memory(int nid, u64 start, u64 size)
{
struct resource *res;
int ret;
@@ -1154,6 +1161,17 @@ int __ref add_memory(int nid, u64 start, u64 size)
release_memory_resource(res);
return ret;
}
+
+int add_memory(int nid, u64 start, u64 size)
+{
+ int rc;
+
+ lock_device_hotplug();
+ rc = __add_memory(nid, start, size);
+ unlock_device_hotplug();
+
+ return rc;
+}
EXPORT_SYMBOL_GPL(add_memory);
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -1540,10 +1558,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
return -EINVAL;
if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
return -EINVAL;
+
+ mem_hotplug_begin();
+
/* This makes hotplug much easier...and readable.
we assume this for now. .*/
- if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
+ &valid_end)) {
+ mem_hotplug_done();
return -EINVAL;
+ }
zone = page_zone(pfn_to_page(valid_start));
node = zone_to_nid(zone);
@@ -1552,8 +1576,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, true);
- if (ret)
+ if (ret) {
+ mem_hotplug_done();
return ret;
+ }
arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages;
@@ -1624,6 +1650,7 @@ repeat:
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &arg);
+ mem_hotplug_done();
return 0;
failed_removal:
@@ -1633,10 +1660,10 @@ failed_removal:
memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ mem_hotplug_done();
return ret;
}
-/* Must be protected by mem_hotplug_begin() or a device_lock */
int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
{
return __offline_pages(start_pfn, start_pfn + nr_pages);
@@ -1807,7 +1834,7 @@ EXPORT_SYMBOL(try_offline_node);
* and online/offline operations before this call, as required by
* try_offline_node().
*/
-void __ref remove_memory(int nid, u64 start, u64 size)
+void __ref __remove_memory(int nid, u64 start, u64 size)
{
int ret;
@@ -1836,5 +1863,12 @@ void __ref remove_memory(int nid, u64 start, u64 size)
mem_hotplug_done();
}
+
+void remove_memory(int nid, u64 start, u64 size)
+{
+ lock_device_hotplug();
+ __remove_memory(nid, start, size);
+ unlock_device_hotplug();
+}
EXPORT_SYMBOL_GPL(remove_memory);
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
deleted file mode 100644
index 439af3b765a7..000000000000
--- a/mm/nobootmem.c
+++ /dev/null
@@ -1,445 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bootmem - A boot-time physical memory allocator and configurator
- *
- * Copyright (C) 1999 Ingo Molnar
- * 1999 Kanoj Sarcar, SGI
- * 2008 Johannes Weiner
- *
- * Access to this subsystem has to be serialized externally (which is true
- * for the boot process anyway).
- */
-#include <linux/init.h>
-#include <linux/pfn.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/kmemleak.h>
-#include <linux/range.h>
-#include <linux/memblock.h>
-#include <linux/bootmem.h>
-
-#include <asm/bug.h>
-#include <asm/io.h>
-
-#include "internal.h"
-
-#ifndef CONFIG_HAVE_MEMBLOCK
-#error CONFIG_HAVE_MEMBLOCK not defined
-#endif
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data;
-EXPORT_SYMBOL(contig_page_data);
-#endif
-
-unsigned long max_low_pfn;
-unsigned long min_low_pfn;
-unsigned long max_pfn;
-unsigned long long max_possible_pfn;
-
-static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit)
-{
- void *ptr;
- u64 addr;
- enum memblock_flags flags = choose_memblock_flags();
-
- if (limit > memblock.current_limit)
- limit = memblock.current_limit;
-
-again:
- addr = memblock_find_in_range_node(size, align, goal, limit, nid,
- flags);
- if (!addr && (flags & MEMBLOCK_MIRROR)) {
- flags &= ~MEMBLOCK_MIRROR;
- pr_warn("Could not allocate %pap bytes of mirrored memory\n",
- &size);
- goto again;
- }
- if (!addr)
- return NULL;
-
- if (memblock_reserve(addr, size))
- return NULL;
-
- ptr = phys_to_virt(addr);
- memset(ptr, 0, size);
- /*
- * The min_count is set to 0 so that bootmem allocated blocks
- * are never reported as leaks.
- */
- kmemleak_alloc(ptr, size, 0, 0);
- return ptr;
-}
-
-/**
- * free_bootmem_late - free bootmem pages directly to page allocator
- * @addr: starting address of the range
- * @size: size of the range in bytes
- *
- * This is only useful when the bootmem allocator has already been torn
- * down, but we are still initializing the system. Pages are given directly
- * to the page allocator, no bootmem metadata is updated because it is gone.
- */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
-{
- unsigned long cursor, end;
-
- kmemleak_free_part_phys(addr, size);
-
- cursor = PFN_UP(addr);
- end = PFN_DOWN(addr + size);
-
- for (; cursor < end; cursor++) {
- __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
- totalram_pages++;
- }
-}
-
-static void __init __free_pages_memory(unsigned long start, unsigned long end)
-{
- int order;
-
- while (start < end) {
- order = min(MAX_ORDER - 1UL, __ffs(start));
-
- while (start + (1UL << order) > end)
- order--;
-
- __free_pages_bootmem(pfn_to_page(start), start, order);
-
- start += (1UL << order);
- }
-}
-
-static unsigned long __init __free_memory_core(phys_addr_t start,
- phys_addr_t end)
-{
- unsigned long start_pfn = PFN_UP(start);
- unsigned long end_pfn = min_t(unsigned long,
- PFN_DOWN(end), max_low_pfn);
-
- if (start_pfn >= end_pfn)
- return 0;
-
- __free_pages_memory(start_pfn, end_pfn);
-
- return end_pfn - start_pfn;
-}
-
-static unsigned long __init free_low_memory_core_early(void)
-{
- unsigned long count = 0;
- phys_addr_t start, end;
- u64 i;
-
- memblock_clear_hotplug(0, -1);
-
- for_each_reserved_mem_region(i, &start, &end)
- reserve_bootmem_region(start, end);
-
- /*
- * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
- * because in some case like Node0 doesn't have RAM installed
- * low ram will be on Node1
- */
- for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
- NULL)
- count += __free_memory_core(start, end);
-
- return count;
-}
-
-static int reset_managed_pages_done __initdata;
-
-void reset_node_managed_pages(pg_data_t *pgdat)
-{
- struct zone *z;
-
- for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- z->managed_pages = 0;
-}
-
-void __init reset_all_zones_managed_pages(void)
-{
- struct pglist_data *pgdat;
-
- if (reset_managed_pages_done)
- return;
-
- for_each_online_pgdat(pgdat)
- reset_node_managed_pages(pgdat);
-
- reset_managed_pages_done = 1;
-}
-
-/**
- * free_all_bootmem - release free pages to the buddy allocator
- *
- * Return: the number of pages actually released.
- */
-unsigned long __init free_all_bootmem(void)
-{
- unsigned long pages;
-
- reset_all_zones_managed_pages();
-
- pages = free_low_memory_core_early();
- totalram_pages += pages;
-
- return pages;
-}
-
-/**
- * free_bootmem_node - mark a page range as usable
- * @pgdat: node the range resides on
- * @physaddr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must reside completely on the specified node.
- */
-void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size)
-{
- memblock_free(physaddr, size);
-}
-
-/**
- * free_bootmem - mark a page range as usable
- * @addr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must be contiguous but may span node boundaries.
- */
-void __init free_bootmem(unsigned long addr, unsigned long size)
-{
- memblock_free(addr, size);
-}
-
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
-restart:
-
- ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
-
- if (ptr)
- return ptr;
-
- if (goal != 0) {
- goal = 0;
- goto restart;
- }
-
- return NULL;
-}
-
-/**
- * __alloc_bootmem_nopanic - allocate boot memory without panicking
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * Return: address of the allocated region or %NULL on failure.
- */
-void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = -1UL;
-
- return ___alloc_bootmem_nopanic(size, align, goal, limit);
-}
-
-static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
-
- if (mem)
- return mem;
- /*
- * Whoops, we cannot satisfy the allocation request.
- */
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-/**
- * __alloc_bootmem - allocate boot memory
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = -1UL;
-
- return ___alloc_bootmem(size, align, goal, limit);
-}
-
-void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
-again:
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, limit);
- if (ptr)
- return ptr;
-
- ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
- goal, limit);
- if (ptr)
- return ptr;
-
- if (goal) {
- goal = 0;
- goto again;
- }
-
- return NULL;
-}
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
-}
-
-static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
- ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
- if (ptr)
- return ptr;
-
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-/**
- * __alloc_bootmem_node - allocate boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
-}
-
-void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- return __alloc_bootmem_node(pgdat, size, align, goal);
-}
-
-
-/**
- * __alloc_bootmem_low - allocate low boot memory
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
-}
-
-void * __init __alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem_nopanic(size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
-}
-
-/**
- * __alloc_bootmem_low_node - allocate low boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
-}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 863d46da6586..a919ba5cb3c8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -20,7 +20,6 @@
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
@@ -1339,7 +1338,7 @@ meminit_pfn_in_nid(unsigned long pfn, int node,
#endif
-void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+void __init memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
{
if (early_page_uninitialised(pfn))
@@ -5476,7 +5475,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
/*
* Initially all pages are reserved - free ones are freed
- * up by free_all_bootmem() once the early boot process is
+ * up by memblock_free_all() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
@@ -6209,7 +6208,7 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
zone->pageblock_flags = NULL;
if (usemapsize)
zone->pageblock_flags =
- memblock_virt_alloc_node_nopanic(usemapsize,
+ memblock_alloc_node_nopanic(usemapsize,
pgdat->node_id);
}
#else
@@ -6439,7 +6438,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
- map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
+ map = memblock_alloc_node_nopanic(size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
}
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
@@ -6508,8 +6507,7 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
free_area_init_core(pgdat);
}
-#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
-
+#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
/*
* Zero all valid struct pages in range [spfn, epfn), return number of struct
* pages zeroed
@@ -6569,7 +6567,7 @@ void __init zero_resv_unavail(void)
if (pgcnt)
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
}
-#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
+#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -7712,9 +7710,11 @@ void *__init alloc_large_system_hash(const char *tablename,
size = bucketsize << log2qty;
if (flags & HASH_EARLY) {
if (flags & HASH_ZERO)
- table = memblock_virt_alloc_nopanic(size, 0);
+ table = memblock_alloc_nopanic(size,
+ SMP_CACHE_BYTES);
else
- table = memblock_virt_alloc_raw(size, 0);
+ table = memblock_alloc_raw(size,
+ SMP_CACHE_BYTES);
} else if (hashdist) {
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
} else {
diff --git a/mm/page_ext.c b/mm/page_ext.c
index a9826da84ccb..ae44f7adbe07 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/mmzone.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/page_ext.h>
#include <linux/memory.h>
#include <linux/vmalloc.h>
@@ -161,9 +161,9 @@ static int __init alloc_node_page_ext(int nid)
table_size = get_entry_size() * nr_pages;
- base = memblock_virt_alloc_try_nid_nopanic(
+ base = memblock_alloc_try_nid_nopanic(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!base)
return -ENOMEM;
NODE_DATA(nid)->node_page_ext = base;
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 6302bc62c27d..b9e4b42b33ab 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
diff --git a/mm/page_owner.c b/mm/page_owner.c
index d80adfe702d3..87bc0dfdb52b 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/stacktrace.h>
#include <linux/page_owner.h>
#include <linux/jump_label.h>
diff --git a/mm/page_poison.c b/mm/page_poison.c
index aa2b3d34e8ea..f7e2a676365a 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -21,7 +21,7 @@ bool page_poisoning_enabled(void)
{
/*
* Assumes that debug_pagealloc_enabled is set before
- * free_all_bootmem.
+ * memblock_free_all.
* Page poisoning is debug page alloc for some arches. If
* either of those options are enabled, enable poisoning.
*/
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index ae3c2a35d61b..11df03e71288 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -21,7 +21,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
if (!is_swap_pte(*pvmw->pte))
return false;
} else {
- if (!pte_present(*pvmw->pte))
+ /*
+ * We get here when we are trying to unmap a private
+ * device page from the process address space. Such
+ * page is not CPU accessible and thus is mapped as
+ * a special swap entry, nonetheless it still does
+ * count as a valid regular mapping for the page (and
+ * is accounted as such in page maps count).
+ *
+ * So handle this special case as if it was a normal
+ * page mapping ie lock CPU page table and returns
+ * true.
+ *
+ * For more details on device private memory see HMM
+ * (include/linux/hmm.h or mm/hmm.c).
+ */
+ if (is_swap_pte(*pvmw->pte)) {
+ swp_entry_t entry;
+
+ /* Handle un-addressable ZONE_DEVICE memory */
+ entry = pte_to_swp_entry(*pvmw->pte);
+ if (!is_device_private_entry(entry))
+ return false;
+ } else if (!pte_present(*pvmw->pte))
return false;
}
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 4b90682623e9..a6b74c6fe0be 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -65,7 +65,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/lcm.h>
#include <linux/list.h>
@@ -1101,9 +1101,9 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
region_size = ALIGN(start_offset + map_size, lcm_align);
/* allocate chunk */
- chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
- BITS_TO_LONGS(region_size >> PAGE_SHIFT),
- 0);
+ chunk = memblock_alloc(sizeof(struct pcpu_chunk) +
+ BITS_TO_LONGS(region_size >> PAGE_SHIFT),
+ SMP_CACHE_BYTES);
INIT_LIST_HEAD(&chunk->list);
@@ -1114,12 +1114,12 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
chunk->nr_pages = region_size >> PAGE_SHIFT;
region_bits = pcpu_chunk_map_bits(chunk);
- chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) *
- sizeof(chunk->alloc_map[0]), 0);
- chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) *
- sizeof(chunk->bound_map[0]), 0);
- chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) *
- sizeof(chunk->md_blocks[0]), 0);
+ chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]),
+ SMP_CACHE_BYTES);
+ chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]),
+ SMP_CACHE_BYTES);
+ chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]),
+ SMP_CACHE_BYTES);
pcpu_init_md_blocks(chunk);
/* manage populated page bitmap */
@@ -1888,7 +1888,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
- ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
+ ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
if (!ptr)
return NULL;
ai = ptr;
@@ -2075,12 +2075,14 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */
- group_offsets = memblock_virt_alloc(ai->nr_groups *
- sizeof(group_offsets[0]), 0);
- group_sizes = memblock_virt_alloc(ai->nr_groups *
- sizeof(group_sizes[0]), 0);
- unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
- unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
+ group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]),
+ SMP_CACHE_BYTES);
+ group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]),
+ SMP_CACHE_BYTES);
+ unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]),
+ SMP_CACHE_BYTES);
+ unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]),
+ SMP_CACHE_BYTES);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
@@ -2144,8 +2146,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* empty chunks.
*/
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
- pcpu_slot = memblock_virt_alloc(
- pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
+ pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
+ SMP_CACHE_BYTES);
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]);
@@ -2458,7 +2460,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
- areas = memblock_virt_alloc_nopanic(areas_size, 0);
+ areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES);
if (!areas) {
rc = -ENOMEM;
goto out_free;
@@ -2599,7 +2601,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
/* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0]));
- pages = memblock_virt_alloc(pages_size, 0);
+ pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
/* allocate pages */
j = 0;
@@ -2688,7 +2690,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
size_t align)
{
- return memblock_virt_alloc_from_nopanic(
+ return memblock_alloc_from_nopanic(
size, align, __pa(MAX_DMA_ADDRESS));
}
@@ -2737,7 +2739,7 @@ void __init setup_per_cpu_areas(void)
void *fc;
ai = pcpu_alloc_alloc_info(1, 1);
- fc = memblock_virt_alloc_from_nopanic(unit_size,
+ fc = memblock_alloc_from_nopanic(unit_size,
PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
if (!ai || !fc)
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 8301293331a2..7fec05796796 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -20,7 +20,7 @@
*/
#include <linux/mm.h>
#include <linux/mmzone.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/memremap.h>
#include <linux/highmem.h>
#include <linux/slab.h>
@@ -42,8 +42,8 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
- return memblock_virt_alloc_try_nid_raw(size, align, goal,
- BOOTMEM_ALLOC_ACCESSIBLE, node);
+ return memblock_alloc_try_nid_raw(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
diff --git a/mm/sparse.c b/mm/sparse.c
index 67ad061f7fb8..33307fc05c4d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/highmem.h>
#include <linux/export.h>
@@ -68,7 +68,8 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
if (slab_is_available())
section = kzalloc_node(array_size, GFP_KERNEL, nid);
else
- section = memblock_virt_alloc_node(array_size, nid);
+ section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
+ nid);
return section;
}
@@ -216,7 +217,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
align = 1 << (INTERNODE_CACHE_SHIFT);
- mem_section = memblock_virt_alloc(size, align);
+ mem_section = memblock_alloc(size, align);
}
#endif
@@ -306,7 +307,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
- p = memblock_virt_alloc_try_nid_nopanic(size,
+ p = memblock_alloc_try_nid_nopanic(size,
SMP_CACHE_BYTES, goal, limit,
nid);
if (!p && limit) {
@@ -362,7 +363,7 @@ static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size)
{
- return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
+ return memblock_alloc_node_nopanic(size, pgdat->node_id);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -391,9 +392,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
if (map)
return map;
- map = memblock_virt_alloc_try_nid(size,
+ map = memblock_alloc_try_nid(size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -405,9 +406,9 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
{
WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
sparsemap_buf =
- memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE,
+ memblock_alloc_try_nid_raw(size, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
sparsemap_buf_end = sparsemap_buf + size;
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index f5c9ef2586de..411dd7a90046 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <net/addrconf.h>
#include <net/inet_connection_sock.h>
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1834818ed07b..9e6bc4d6daa7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -262,7 +262,7 @@
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/random.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/cache.h>
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ca3ed931f2a9..1976fddb9e00 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -81,7 +81,7 @@
#include <linux/uaccess.h>
#include <asm/ioctls.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e948db29ab53..9b277bd36d1a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -46,7 +46,7 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/slab.h>
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c
index 2ad33ce1ea17..eca8d84d99bf 100644
--- a/net/xfrm/xfrm_hash.c
+++ b/net/xfrm/xfrm_hash.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/xfrm.h>
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 161b0224d6ae..c883ec55654f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -4934,17 +4934,6 @@ sub process {
while ($line =~ m{($Constant|$Lval)}g) {
my $var = $1;
-#gcc binary extension
- if ($var =~ /^$Binary$/) {
- if (WARN("GCC_BINARY_CONSTANT",
- "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr) &&
- $fix) {
- my $hexval = sprintf("0x%x", oct($var));
- $fixed[$fixlinenr] =~
- s/\b$var\b/$hexval/;
- }
- }
-
#CamelCase
if ($var !~ /^$Constant$/ &&
$var =~ /[A-Z][a-z]|[a-z][A-Z]/ &&