/* Check if this object is currently active. */ #define nft_is_active(__net, __obj) \ (((__obj)->genmask & nft_genmask_cur(__net)) == 0)
/* Check if this object is active in the next generation. */ #define nft_is_active_next(__net, __obj) \ (((__obj)->genmask & nft_genmask_next(__net)) == 0)
/* This object becomes active in the next generation. */ #define nft_activate_next(__net, __obj) \ (__obj)->genmask = nft_genmask_cur(__net)
/* This object becomes inactive in the next generation. */ #define nft_deactivate_next(__net, __obj) \ (__obj)->genmask = nft_genmask_next(__net)
/* After committing the ruleset, clear the stale generation bit. */ #define nft_clear(__net, __obj) \ (__obj)->genmask &= ~nft_genmask_next(__net) #define nft_active_genmask(__obj, __genmask) \ !((__obj)->genmask & __genmask)
staticvoidnf_tables_commit_release(struct net *net) { structnftables_pernet *nft_net = nft_pernet(net); structnft_trans *trans;
/* all side effects have to be made visible. * For example, if a chain named 'foo' has been deleted, a * new transaction must not find it anymore. * * Memory reclaim happens asynchronously from work queue * to prevent expensive synchronize_rcu() in commit phase. */ if (list_empty(&nft_net->commit_list)) { nf_tables_module_autoload_cleanup(net); mutex_unlock(&nft_net->commit_mutex); return; }
trans = list_last_entry(&nft_net->commit_list, struct nft_trans, list); get_net(trans->ctx.net); WARN_ON_ONCE(trans->put_net);
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set)) return -EBUSY;
if (binding->flags & NFT_SET_MAP) { /* If the set is already bound to the same chain all * jumps are already validated for that chain. */ list_for_each_entry(i, &set->bindings, list) { if (i->flags & NFT_SET_MAP && i->chain == binding->chain) goto bind; }
err_destroy_flow_rule: if (flow) nft_flow_rule_destroy(flow); err_release_rule: nf_tables_rule_release(&ctx, rule); err_release_expr: for (i = 0; i < n; i++) { if (expr_info[i].ops) { module_put(expr_info[i].ops->type->owner); if (expr_info[i].ops->type->release_ops) expr_info[i].ops->type->release_ops(expr_info[i].ops); } } kvfree(expr_info);
/* * Careful: some expressions might not be initialized in case this * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); while (nft_expr_more(rule, expr)) { next = nft_expr_next(expr); nf_tables_expr_destroy(ctx, expr); expr = next; } kfree(rule); }
staticinlinevoid *freelist_ptr_decode(const struct kmem_cache *s, freeptr_t ptr, unsignedlong ptr_addr, struct slab *slab) { #ifdef CONFIG_SLAB_FREELIST_HARDENED void *decoded; /* TODO: maybe let slab_to_virt load a virtual address from * struct slab instead of using arithmetic for the translation? */ unsignedlong slab_base = (unsignedlong)slab_to_virt(slab);
/* * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. * Normally, this doesn't cause any issues, as both set_freepointer() * and get_freepointer() are called with a pointer with the same tag. * However, there are some issues with CONFIG_SLUB_DEBUG code. For * example, when __free_slub() iterates over objects in a cache, it * passes untagged pointers to check_object(). check_object() in turns * calls get_freepointer() with an untagged pointer, which causes the * freepointer to be restored incorrectly. */ decoded = (void *)(ptr.v ^ s->random ^ swab((unsignedlong)kasan_reset_tag((void *)ptr_addr)));
/* * This verifies that the SLUB freepointer does not point outside the * slab. Since at that point we can basically do it for free, it also * checks that the pointer alignment looks vaguely sane. * However, we probably don't want the cost of a proper division here, * so instead we just do a cheap check whether the bottom bits that are * clear in the size are also clear in the pointer. * So for kmalloc-32, it does a perfect alignment check, but for * kmalloc-192, it just checks that the pointer is a multiple of 32. * This should probably be reconsidered - is this a good tradeoff, or * should that part be thrown out, or do we want a proper accurate * alignment check (and can we make it work with acceptable performance * cost compared to the security improvement - probably not)? * * NULL freepointer must be special-cased. * Write it in a way that gives the compiler a chance to avoid adding * an unpredictable branch. */ slab_base = decoded ? slab_base : 0; if (CHECK_DATA_CORRUPTION( ((unsignedlong)decoded & slab->align_mask) != slab_base, "bad freeptr (encoded %lx, ptr %px, base %lx, mask %lx", ptr.v, decoded, slab_base, slab->align_mask)) returnNULL; return decoded; #else return (void*)ptr.v; #endif }
staticinlinevoid *freelist_ptr(const struct kmem_cache *s, void *ptr, unsignedlong ptr_addr) { #ifdef CONFIG_SLAB_FREELIST_HARDENED /* * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. * Normally, this doesn't cause any issues, as both set_freepointer() * and get_freepointer() are called with a pointer with the same tag. * However, there are some issues with CONFIG_SLUB_DEBUG code. For * example, when __free_slub() iterates over objects in a cache, it * passes untagged pointers to check_object(). check_object() in turns * calls get_freepointer() with an untagged pointer, which causes the * freepointer to be restored incorrectly. */ return (void *)((unsignedlong)ptr ^ s->random ^ swab((unsignedlong)kasan_reset_tag((void *)ptr_addr))); #else return ptr; #endif }
v1.5.2