summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorStefani Seibold <stefani@seibold.net>2014-03-18 02:22:02 +0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-03-18 23:51:56 +0400
commit3935ed6a3a533c1736e3ca65bff72afd1773be27 (patch)
treef8f9eb07a9b3e05f5b143e032e4b59a4b62836a6 /mm
parentd2312e3379d581d2c3603357a0181046448e1de3 (diff)
downloadlinux-3935ed6a3a533c1736e3ca65bff72afd1773be27.tar.xz
mm: Add new func _install_special_mapping() to mmap.c
The _install_special_mapping() is the new base function for install_special_mapping(). This function will return a pointer of the created VMA or a error code in an ERR_PTR() This new function will be needed by the for the vdso 32 bit support to map the additonal vvar and hpet pages into the 32 bit address space. This will be done with io_remap_pfn_range() and remap_pfn_range, which requieres a vm_area_struct. Reviewed-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Stefani Seibold <stefani@seibold.net> Link: http://lkml.kernel.org/r/1395094933-14252-3-git-send-email-stefani@seibold.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 20ff0c33274c..81ba54ff96c7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2918,7 +2918,7 @@ static const struct vm_operations_struct special_mapping_vmops = {
* The array pointer and the pages it points to are assumed to stay alive
* for as long as this mapping might exist.
*/
-int install_special_mapping(struct mm_struct *mm,
+struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
@@ -2927,7 +2927,7 @@ int install_special_mapping(struct mm_struct *mm,
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (unlikely(vma == NULL))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = mm;
@@ -2948,11 +2948,23 @@ int install_special_mapping(struct mm_struct *mm,
perf_event_mmap(vma);
- return 0;
+ return vma;
out:
kmem_cache_free(vm_area_cachep, vma);
- return ret;
+ return ERR_PTR(ret);
+}
+
+int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long vm_flags, struct page **pages)
+{
+ struct vm_area_struct *vma = _install_special_mapping(mm,
+ addr, len, vm_flags, pages);
+
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+ return 0;
}
static DEFINE_MUTEX(mm_all_locks_mutex);