-
Notifications
You must be signed in to change notification settings - Fork 52
[1/3] Separate comparison logic from data loading in matchers #464
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,296 @@ | ||
| /* SPDX-License-Identifier: GPL-2.0-only */ | ||
| /* | ||
| * Copyright (c) Meta Platforms, Inc. and affiliates. | ||
yaakov-stein marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| */ | ||
|
|
||
| #include "cgen/matcher/cmp.h" | ||
|
|
||
| #include <linux/bpf.h> | ||
| #include <linux/bpf_common.h> | ||
|
|
||
| #include <assert.h> | ||
| #include <string.h> | ||
|
|
||
| #include <bpfilter/logger.h> | ||
| #include <bpfilter/matcher.h> | ||
|
|
||
| #include "cgen/jmp.h" | ||
| #include "cgen/program.h" | ||
|
|
||
| #define _BF_MASK_LAST_BYTE 15 | ||
|
|
||
| static inline uint64_t _bf_read_u64(const void *ptr) | ||
| { | ||
| uint64_t val; | ||
|
|
||
| memcpy(&val, ptr, sizeof(val)); | ||
|
|
||
| return val; | ||
| } | ||
|
|
||
| /** | ||
| * @brief Emit a 4-instruction sequence to build a 64-bit immediate from 8 bytes. | ||
| * | ||
| * Produces: | ||
| * @code | ||
yaakov-stein marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| * MOV32_IMM(dst, high32) -> LSH(dst, 32) -> MOV32_IMM(scratch, low32) -> OR(dst, scratch) | ||
| * @endcode | ||
| * | ||
| * @param program Program to emit into. Can't be NULL. | ||
| * @param dst_reg Destination register for the 64-bit value. | ||
| * @param scratch_reg Scratch register (clobbered). | ||
| * @param data 64-bit value to load. | ||
| */ | ||
| static int _bf_cmp_build_imm64(struct bf_program *program, int dst_reg, | ||
| int scratch_reg, uint64_t data) | ||
| { | ||
| EMIT(program, BPF_MOV32_IMM(dst_reg, (uint32_t)(data >> 32))); | ||
| EMIT(program, BPF_ALU64_IMM(BPF_LSH, dst_reg, 32)); | ||
| EMIT(program, BPF_MOV32_IMM(scratch_reg, (uint32_t)data)); | ||
| EMIT(program, BPF_ALU64_REG(BPF_OR, dst_reg, scratch_reg)); | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
| /** | ||
| * @brief Compute a network prefix mask. | ||
| * | ||
| * @param prefixlen Prefix length in bits. | ||
| * @param mask Output buffer. Can't be NULL. | ||
| * @param mask_len Size of mask buffer in bytes (4 or 16). | ||
| */ | ||
| static void _bf_prefix_to_mask(unsigned int prefixlen, uint8_t *mask, | ||
yaakov-stein marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| size_t mask_len) | ||
| { | ||
| assert(mask); | ||
|
|
||
| memset(mask, 0x00, mask_len); | ||
| memset(mask, 0xff, prefixlen / 8); | ||
| if (prefixlen % 8) | ||
| mask[prefixlen / 8] = (0xff << (8 - prefixlen % 8)) & 0xff; | ||
| } | ||
|
|
||
| int bf_cmp_value(struct bf_program *program, enum bf_matcher_op op, | ||
| const void *ref, unsigned int size, int reg) | ||
| { | ||
| assert(program); | ||
| assert(ref); | ||
|
|
||
| if (op != BF_MATCHER_EQ && op != BF_MATCHER_NE) | ||
| return bf_err_r(-EINVAL, "unsupported operator %d", op); | ||
|
|
||
| switch (size) { | ||
| case 1: | ||
| case 2: { | ||
| /* Small values: compare directly via JMP_IMM. | ||
| * For size 1, ref is uint8_t; for size 2, ref is uint16_t. | ||
| * Both fit in a signed 32-bit immediate. */ | ||
| uint32_t val = | ||
| (size == 1) ? *(const uint8_t *)ref : *(const uint16_t *)ref; | ||
|
|
||
| EMIT_FIXUP_JMP_NEXT_RULE( | ||
| program, | ||
| BPF_JMP_IMM(op == BF_MATCHER_EQ ? BPF_JNE : BPF_JEQ, reg, val, 0)); | ||
| break; | ||
| } | ||
qdeslandes marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| case 4: { | ||
| /* 32-bit values: may exceed signed 32-bit immediate range, so | ||
| * use MOV32_IMM into R2 + JMP_REG. */ | ||
| uint32_t val = *(const uint32_t *)ref; | ||
|
|
||
| EMIT(program, BPF_MOV32_IMM(BPF_REG_2, val)); | ||
| EMIT_FIXUP_JMP_NEXT_RULE( | ||
| program, BPF_JMP_REG(op == BF_MATCHER_EQ ? BPF_JNE : BPF_JEQ, reg, | ||
| BPF_REG_2, 0)); | ||
| break; | ||
| } | ||
yaakov-stein marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| case 8: { | ||
| /* 64-bit values: build immediate in R2 via `_bf_cmp_build_imm64`, | ||
| * then compare with `JMP_REG`. */ | ||
| int r; | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_2, BPF_REG_3, | ||
| _bf_read_u64(ref)); | ||
| if (r) | ||
| return r; | ||
| EMIT_FIXUP_JMP_NEXT_RULE( | ||
| program, BPF_JMP_REG(op == BF_MATCHER_EQ ? BPF_JNE : BPF_JEQ, reg, | ||
| BPF_REG_2, 0)); | ||
| break; | ||
| } | ||
| case 16: { | ||
| /* 128-bit values: reg holds low 64 bits, reg+1 holds high 64 bits. | ||
| * Compare each half against the reference. */ | ||
| const uint8_t *addr = ref; | ||
| int r; | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(addr)); | ||
| if (r) | ||
| return r; | ||
|
|
||
| if (op == BF_MATCHER_EQ) { | ||
| EMIT_FIXUP_JMP_NEXT_RULE(program, | ||
| BPF_JMP_REG(BPF_JNE, reg, BPF_REG_3, 0)); | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(addr + 8)); | ||
| if (r) | ||
| return r; | ||
| EMIT_FIXUP_JMP_NEXT_RULE( | ||
| program, BPF_JMP_REG(BPF_JNE, reg + 1, BPF_REG_3, 0)); | ||
| } else { | ||
| /* NE: the address must differ in at least one half. | ||
| * If the first half differs, the matcher matched — jump | ||
| * past the second half check and the unconditional | ||
| * jump-to-next-rule. If the first half matches, check the | ||
| * second half: if it also matches, the full address is | ||
| * equal, so the NE matcher fails — jump to next rule. */ | ||
| _clean_bf_jmpctx_ struct bf_jmpctx j0 = bf_jmpctx_default(); | ||
| _clean_bf_jmpctx_ struct bf_jmpctx j1 = bf_jmpctx_default(); | ||
|
|
||
| j0 = | ||
| bf_jmpctx_get(program, BPF_JMP_REG(BPF_JNE, reg, BPF_REG_3, 0)); | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(addr + 8)); | ||
| if (r) | ||
| return r; | ||
| j1 = bf_jmpctx_get(program, | ||
| BPF_JMP_REG(BPF_JNE, reg + 1, BPF_REG_3, 0)); | ||
|
|
||
| EMIT_FIXUP_JMP_NEXT_RULE(program, BPF_JMP_A(0)); | ||
| } | ||
| break; | ||
| } | ||
| default: | ||
| return bf_err_r(-EINVAL, "unsupported comparison size %u", size); | ||
| } | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
| int bf_cmp_masked_value(struct bf_program *program, enum bf_matcher_op op, | ||
| const void *ref, unsigned int prefixlen, | ||
| unsigned int size, int reg) | ||
| { | ||
| assert(program); | ||
| assert(ref); | ||
|
|
||
| if (op != BF_MATCHER_EQ && op != BF_MATCHER_NE) | ||
| return bf_err_r(-EINVAL, "unsupported operator %d", op); | ||
|
|
||
| switch (size) { | ||
| case 4: { | ||
| uint32_t mask; | ||
| const uint32_t *addr = ref; | ||
|
|
||
| _bf_prefix_to_mask(prefixlen, (uint8_t *)&mask, 4); | ||
|
|
||
| EMIT(program, BPF_MOV32_IMM(BPF_REG_2, *addr)); | ||
|
|
||
| if (mask != ~0U) { | ||
| EMIT(program, BPF_MOV32_IMM(BPF_REG_3, mask)); | ||
| EMIT(program, BPF_ALU32_REG(BPF_AND, reg, BPF_REG_3)); | ||
| EMIT(program, BPF_ALU32_REG(BPF_AND, BPF_REG_2, BPF_REG_3)); | ||
| } | ||
|
|
||
| EMIT_FIXUP_JMP_NEXT_RULE( | ||
| program, BPF_JMP_REG(op == BF_MATCHER_EQ ? BPF_JNE : BPF_JEQ, reg, | ||
| BPF_REG_2, 0)); | ||
| break; | ||
| } | ||
| case 16: { | ||
| uint8_t mask[16]; | ||
| uint8_t masked_lo[8], masked_hi[8]; | ||
| const uint8_t *addr = ref; | ||
| int r; | ||
|
|
||
| _bf_prefix_to_mask(prefixlen, mask, 16); | ||
|
|
||
| // Apply mask to loaded reg/reg+1 if not a full /128 | ||
| if (mask[_BF_MASK_LAST_BYTE] != (uint8_t)~0) { | ||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(mask)); | ||
| if (r) | ||
| return r; | ||
| EMIT(program, BPF_ALU64_REG(BPF_AND, reg, BPF_REG_3)); | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(mask + 8)); | ||
| if (r) | ||
| return r; | ||
| EMIT(program, BPF_ALU64_REG(BPF_AND, reg + 1, BPF_REG_3)); | ||
| } | ||
|
|
||
| for (int i = 0; i < 8; i++) | ||
| masked_lo[i] = addr[i] & mask[i]; | ||
| for (int i = 0; i < 8; i++) | ||
| masked_hi[i] = addr[i + 8] & mask[i + 8]; | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(masked_lo)); | ||
| if (r) | ||
| return r; | ||
|
|
||
| if (op == BF_MATCHER_EQ) { | ||
| EMIT_FIXUP_JMP_NEXT_RULE(program, | ||
| BPF_JMP_REG(BPF_JNE, reg, BPF_REG_3, 0)); | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(masked_hi)); | ||
| if (r) | ||
| return r; | ||
|
Comment on lines
+239
to
+243
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same as above, this can be done before the
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same question as above
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure which question you're refering to :/
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This one - #464 (comment) |
||
| EMIT_FIXUP_JMP_NEXT_RULE( | ||
| program, BPF_JMP_REG(BPF_JNE, reg + 1, BPF_REG_3, 0)); | ||
| } else { | ||
| _clean_bf_jmpctx_ struct bf_jmpctx j0 = bf_jmpctx_default(); | ||
| _clean_bf_jmpctx_ struct bf_jmpctx j1 = bf_jmpctx_default(); | ||
|
|
||
| j0 = | ||
| bf_jmpctx_get(program, BPF_JMP_REG(BPF_JNE, reg, BPF_REG_3, 0)); | ||
|
|
||
| r = _bf_cmp_build_imm64(program, BPF_REG_3, BPF_REG_4, | ||
| _bf_read_u64(masked_hi)); | ||
| if (r) | ||
| return r; | ||
| j1 = bf_jmpctx_get(program, | ||
| BPF_JMP_REG(BPF_JNE, reg + 1, BPF_REG_3, 0)); | ||
|
|
||
| EMIT_FIXUP_JMP_NEXT_RULE(program, BPF_JMP_A(0)); | ||
| } | ||
| break; | ||
| } | ||
| default: | ||
| return bf_err_r(-EINVAL, "unsupported masked comparison size %u", size); | ||
| } | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
| int bf_cmp_range(struct bf_program *program, uint32_t min, uint32_t max, | ||
| int reg) | ||
| { | ||
| assert(program); | ||
|
|
||
| EMIT_FIXUP_JMP_NEXT_RULE(program, BPF_JMP_IMM(BPF_JLT, reg, min, 0)); | ||
| EMIT_FIXUP_JMP_NEXT_RULE(program, BPF_JMP_IMM(BPF_JGT, reg, max, 0)); | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
| int bf_cmp_bitfield(struct bf_program *program, enum bf_matcher_op op, | ||
qdeslandes marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| uint32_t flags, int reg) | ||
| { | ||
| assert(program); | ||
|
|
||
| if (op != BF_MATCHER_ANY && op != BF_MATCHER_ALL) | ||
| return bf_err_r(-EINVAL, "unsupported operator %d", op); | ||
|
|
||
| EMIT(program, BPF_ALU32_IMM(BPF_AND, reg, flags)); | ||
| EMIT_FIXUP_JMP_NEXT_RULE( | ||
| program, BPF_JMP_IMM(op == BF_MATCHER_ANY ? BPF_JEQ : BPF_JNE, reg, | ||
| op == BF_MATCHER_ANY ? 0 : flags, 0)); | ||
|
|
||
| return 0; | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.