diff options
| -rw-r--r-- | include/linux/memremap.h | 2 | ||||
| -rw-r--r-- | include/linux/mm_types.h | 2 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 2 | ||||
| -rw-r--r-- | mm/memory-failure.c | 2 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 4 | ||||
| -rw-r--r-- | mm/page_alloc.c | 2 | ||||
| -rw-r--r-- | mm/swapfile.c | 2 | 
7 files changed, 8 insertions, 8 deletions
| diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 45a79da89c5f..c0e9d35889e8 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,7 +26,7 @@ struct vmem_altmap {  };  /* - * Specialize ZONE_DEVICE memory into multiple types each having differents + * Specialize ZONE_DEVICE memory into multiple types each has a different   * usage.   *   * MEMORY_DEVICE_PRIVATE: diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b66d0225414e..748617780924 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -397,7 +397,7 @@ struct mm_struct {  		unsigned long mmap_base;	/* base of mmap area */  		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */  #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES -		/* Base adresses for compatible mmap() */ +		/* Base addresses for compatible mmap() */  		unsigned long mmap_compat_base;  		unsigned long mmap_compat_legacy_base;  #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7bc7e41b6c31..0ed2c23ed3fb 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -114,7 +114,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)  struct pglist_data;  /* - * Add a wild amount of padding here to ensure datas fall into separate + * Add a wild amount of padding here to ensure data fall into separate   * cachelines.  There are very few zone structures in the machine, so space   * consumption is not a concern here.   */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ee02d8b06839..eefd823deb67 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1340,7 +1340,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,  			 * could potentially call huge_pmd_unshare.  Because of  			 * this, take semaphore in write mode here and set  			 * TTU_RMAP_LOCKED to indicate we have taken the lock -			 * at this higer level. +			 * at this higher level.  			 */  			mapping = hugetlb_page_mapping_lock_write(hpage);  			if (mapping) { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 553c52751249..79f6ce92b6b6 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -783,7 +783,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z  	/*  	 * {on,off}lining is constrained to full memory sections (or more -	 * precisly to memory blocks from the user space POV). +	 * precisely to memory blocks from the user space POV).  	 * memmap_on_memory is an exception because it reserves initial part  	 * of the physical memory space for vmemmaps. That space is pageblock  	 * aligned. @@ -1580,7 +1580,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)  	/*  	 * {on,off}lining is constrained to full memory sections (or more -	 * precisly to memory blocks from the user space POV). +	 * precisely to memory blocks from the user space POV).  	 * memmap_on_memory is an exception because it reserves initial part  	 * of the physical memory space for vmemmaps. That space is pageblock  	 * aligned. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eeff64843718..6700cfcfab46 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3180,7 +3180,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)  	int cpu;  	/* -	 * Allocate in the BSS so we wont require allocation in +	 * Allocate in the BSS so we won't require allocation in  	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y  	 */  	static cpumask_t cpus_with_pcps; diff --git a/mm/swapfile.c b/mm/swapfile.c index e898c879a434..1e07d1c776f2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2967,7 +2967,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,  		return 0;  	} -	/* swap partition endianess hack... */ +	/* swap partition endianness hack... */  	if (swab32(swap_header->info.version) == 1) {  		swab32s(&swap_header->info.version);  		swab32s(&swap_header->info.last_page); | 
