diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 19:43:49 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 19:43:49 +0300 |
commit | 22b8cc3e78f5448b4c5df00303817a9137cd663f (patch) | |
tree | 4c90f7ebbf4d439ae37f879f0e0f97c2eafd434b /mm/migrate.c | |
parent | 7b664cc38ea7bdd5e3ce018bba98583741921bd4 (diff) | |
parent | 97740266de26e5dfe6e4fbecacb6995b66c2e378 (diff) | |
download | linux-22b8cc3e78f5448b4c5df00303817a9137cd663f.tar.xz |
Merge tag 'x86_mm_for_6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 LAM (Linear Address Masking) support from Dave Hansen:
"Add support for the new Linear Address Masking CPU feature.
This is similar to ARM's Top Byte Ignore and allows userspace to store
metadata in some bits of pointers without masking it out before use"
* tag 'x86_mm_for_6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm/iommu/sva: Do not allow to set FORCE_TAGGED_SVA bit from outside
x86/mm/iommu/sva: Fix error code for LAM enabling failure due to SVA
selftests/x86/lam: Add test cases for LAM vs thread creation
selftests/x86/lam: Add ARCH_FORCE_TAGGED_SVA test cases for linear-address masking
selftests/x86/lam: Add inherit test cases for linear-address masking
selftests/x86/lam: Add io_uring test cases for linear-address masking
selftests/x86/lam: Add mmap and SYSCALL test cases for linear-address masking
selftests/x86/lam: Add malloc and tag-bits test cases for linear-address masking
x86/mm/iommu/sva: Make LAM and SVA mutually exclusive
iommu/sva: Replace pasid_valid() helper with mm_valid_pasid()
mm: Expose untagging mask in /proc/$PID/status
x86/mm: Provide arch_prctl() interface for LAM
x86/mm: Reduce untagged_addr() overhead for systems without LAM
x86/uaccess: Provide untagged_addr() and remove tags before address check
mm: Introduce untagged_addr_remote()
x86/mm: Handle LAM on context switch
x86: CPUID and CR3/CR4 flags for Linear Address Masking
x86: Allow atomic MM_CONTEXT flags setting
x86/mm: Rework address range check in get_user() and put_user()
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 02cace7955d4..01cac26a3127 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2099,15 +2099,18 @@ static int do_move_pages_to_node(struct mm_struct *mm, * target node * 1 - when it has been queued */ -static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, +static int add_page_for_migration(struct mm_struct *mm, const void __user *p, int node, struct list_head *pagelist, bool migrate_all) { struct vm_area_struct *vma; + unsigned long addr; struct page *page; int err; bool isolated; mmap_read_lock(mm); + addr = (unsigned long)untagged_addr_remote(mm, p); + err = -EFAULT; vma = vma_lookup(mm, addr); if (!vma || !vma_migratable(vma)) @@ -2213,7 +2216,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, for (i = start = 0; i < nr_pages; i++) { const void __user *p; - unsigned long addr; int node; err = -EFAULT; @@ -2221,7 +2223,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, goto out_flush; if (get_user(node, nodes + i)) goto out_flush; - addr = (unsigned long)untagged_addr(p); err = -ENODEV; if (node < 0 || node >= MAX_NUMNODES) @@ -2249,8 +2250,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, * Errors in the page lookup or isolation are not fatal and we simply * report them via status */ - err = add_page_for_migration(mm, addr, current_node, - &pagelist, flags & MPOL_MF_MOVE_ALL); + err = add_page_for_migration(mm, p, current_node, &pagelist, + flags & MPOL_MF_MOVE_ALL); if (err > 0) { /* The page is successfully queued for migration */ |