// use coarse size classes initially when there are not yet // any groups of desired size. this allows counts of 2 or 3 // to be allocated at first rather than having to start with // 7 or 5, the min counts for even size classes. // 没找到对应的meta 会执行下面的if语句 if (!g && sc>=4 && sc<32 && sc!=6 && !(sc&1) && !ctx.usage_by_class[sc]) { size_t usage = ctx.usage_by_class[sc|1]; // if a new group may be allocated, count it toward // usage in deciding if we can use coarse class. if (!ctx.active[sc|1] || (!ctx.active[sc|1]->avail_mask && !ctx.active[sc|1]->freed_mask)) usage += 3; if (usage <= 12) sc |= 1; g = ctx.active[sc]; }
for (;;) {// 寻找对应size的maeta的group可用的chunk mask = g ? g->avail_mask : 0; first = mask&-mask; if (!first) break; if (RDLOCK_IS_EXCLUSIVE || !MT) g->avail_mask = mask-first; elseif (a_cas(&g->avail_mask, mask, mask-first)!=mask) continue; idx = a_ctz_32(first); goto success; } upgradelock();
idx = alloc_slot(sc, n);// 使用alloc_slot寻找idx if (idx < 0) { unlock(); return0; } g = ctx.active[sc];
structmeta *g = get_meta(p);// 得到chunk对应的meta int idx = get_slot_index(p);// 得到idx size_t stride = get_stride(g);// 找到size_classes中对应chunk的size unsignedchar *start = g->mem->storage + stride*idx; unsignedchar *end = start + stride - IB; get_nominal_size(p, end);// 算出chunk的真实大小 uint32_t self = 1u<<idx, all = (2u<<g->last_idx)-1; ((unsignedchar *)p)[-3] = 255; // invalidate offset to group header, and cycle offset of // used region within slot if current offset is zero. *(uint16_t *)((char *)p-2) = 0;
// release any whole pages contained in the slot to be freed // unless it's a single-slot group that will be unmapped. if (((uintptr_t)(start-1) ^ (uintptr_t)end) >= 2*PGSZ && g->last_idx) { unsignedchar *base = start + (-(uintptr_t)start & (PGSZ-1)); size_t len = (end-base) & -PGSZ; if (len) madvise(base, len, MADV_FREE); }
// atomic free without locking if this is neither first or last slot for (;;) { uint32_t freed = g->freed_mask; uint32_t avail = g->avail_mask; uint32_t mask = freed | avail; // 将free的chunk加进去 assert(!(mask&self)); if (!freed || mask+self==all) break; if (!MT) g->freed_mask = freed+self; elseif (a_cas(&g->freed_mask, freed, freed+self)!=freed) continue; return; }
static struct mapinfo nontrivial_free(struct meta *g, int i) { uint32_t self = 1u<<i; int sc = g->sizeclass; uint32_t mask = g->freed_mask | g->avail_mask;
if (mask+self == (2u<<g->last_idx)-1 && okay_to_free(g)) { // 要么释放要么可用,且该meta可以被释放 // any multi-slot group is necessarily on an active list // here, but single-slot groups might or might not be. if (g->next) { // 如果队列中 有下一个meta assert(sc < 48); int activate_new = (ctx.active[sc]==g); dequeue(&ctx.active[sc], g); // 在出队操作后 ,ctx.active[sc]==meta ->next 是指的刚刚出队meta 的下一个meta if (activate_new && ctx.active[sc]) activate_group(ctx.active[sc]); } return free_group(g); } elseif (!mask) { assert(sc < 48); // might still be active if there were no allocations // after last available slot was taken. if (ctx.active[sc] != g) { queue(&ctx.active[sc], g); } } a_or(&g->freed_mask, self); return (struct mapinfo){ 0 }; }
1 2 3 4 5 6 7 8 9 10 11
staticinlinevoiddequeue(struct meta **phead, struct meta *m) { if (m->next != m) { m->prev->next = m->next; m->next->prev = m->prev; if (*phead == m) *phead = m->next; } else { *phead = 0; } m->prev = m->next = 0; }
static struct mapinfo nontrivial_free(struct meta *g, int i) { uint32_t self = 1u<<i; int sc = g->sizeclass; uint32_t mask = g->freed_mask | g->avail_mask;
if (mask+self == (2u<<g->last_idx)-1 && okay_to_free(g)) { // any multi-slot group is necessarily on an active list // here, but single-slot groups might or might not be. if (g->next) { assert(sc < 48); int activate_new = (ctx.active[sc]==g); dequeue(&ctx.active[sc], g); if (activate_new && ctx.active[sc]) activate_group(ctx.active[sc]); } return free_group(g); } ... ... }
staticintokay_to_free(struct meta *g) { int sc = g->sizeclass;
if (!g->freeable) return0;
// always free individual mmaps not suitable for reuse if (sc >= 48 || get_stride(g) < UNIT*size_classes[sc]) return1;
// always free groups allocated inside another group's slot // since recreating them should not be expensive and they // might be blocking freeing of a much larger group. if (!g->maplen) return1;
// if there is another non-full group, free this one to // consolidate future allocations, reduce fragmentation. if (g->next != g) return1;
// free any group in a size class that's not bouncing if (!is_bouncing(sc)) return1;
// if usage is high enough that a larger count should be // used, free the low-count group so a new one will be made. if (9*cnt <= usage && cnt < 20) return1;
// otherwise, keep the last group in a bouncing class. return0; }