diff options
author | Yixuan Cao <caoyixuan2019@email.szu.edu.cn> | 2022-04-07 11:09:58 +0300 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-04-07 12:44:47 +0300 |
commit | a8f23dd166651dcda2c02f16e524f56a4bd49084 (patch) | |
tree | a8c1d18c959d0f83d8782c450a1f5782894f217a /mm/slab.c | |
parent | a285909f471d6703a04b2b3942c352e27131c92b (diff) | |
download | linux-a8f23dd166651dcda2c02f16e524f56a4bd49084.tar.xz |
mm/slab.c: fix comments
While reading the source code,
I noticed some language errors in the comments, so I fixed them.
Signed-off-by: Yixuan Cao <caoyixuan2019@email.szu.edu.cn>
Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/r/20220407080958.3667-1-caoyixuan2019@email.szu.edu.cn
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/slab.c b/mm/slab.c index 90b16c7ae01a..e882657c1494 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -781,7 +781,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) int slab_node = slab_nid(virt_to_slab(objp)); int node = numa_mem_id(); /* - * Make sure we are not freeing a object from another node to the array + * Make sure we are not freeing an object from another node to the array * cache on this cpu. */ if (likely(node == slab_node)) @@ -832,7 +832,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) /* * The kmem_cache_nodes don't come and go as CPUs - * come and go. slab_mutex is sufficient + * come and go. slab_mutex provides sufficient * protection here. */ cachep->node[node] = n; @@ -845,7 +845,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) * Allocates and initializes node for a node on each slab cache, used for * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node * will be allocated off-node since memory is not yet online for the new node. - * When hotplugging memory or a cpu, existing node are not replaced if + * When hotplugging memory or a cpu, existing nodes are not replaced if * already in use. * * Must hold slab_mutex. @@ -1046,7 +1046,7 @@ int slab_prepare_cpu(unsigned int cpu) * offline. * * Even if all the cpus of a node are down, we don't free the - * kmem_cache_node of any cache. This to avoid a race between cpu_down, and + * kmem_cache_node of any cache. This is to avoid a race between cpu_down, and * a kmalloc allocation from another cpu for memory from the node of * the cpu going down. The kmem_cache_node structure is usually allocated from * kmem_cache_create() and gets destroyed at kmem_cache_destroy(). @@ -1890,7 +1890,7 @@ static bool set_on_slab_cache(struct kmem_cache *cachep, * @flags: SLAB flags * * Returns a ptr to the cache on success, NULL on failure. - * Cannot be called within a int, but can be interrupted. + * Cannot be called within an int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * * The flags are @@ -3138,7 +3138,7 @@ retry: } /* - * A interface to enable slab creation on nodeid + * An interface to enable slab creation on nodeid */ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |