nft_cmp.c (10300B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> 4 * 5 * Development of this code funded by Astaro AG (http://www.astaro.com/) 6 */ 7 8#include <linux/kernel.h> 9#include <linux/init.h> 10#include <linux/module.h> 11#include <linux/netlink.h> 12#include <linux/netfilter.h> 13#include <linux/if_arp.h> 14#include <linux/netfilter/nf_tables.h> 15#include <net/netfilter/nf_tables_core.h> 16#include <net/netfilter/nf_tables_offload.h> 17#include <net/netfilter/nf_tables.h> 18 19struct nft_cmp_expr { 20 struct nft_data data; 21 u8 sreg; 22 u8 len; 23 enum nft_cmp_ops op:8; 24}; 25 26void nft_cmp_eval(const struct nft_expr *expr, 27 struct nft_regs *regs, 28 const struct nft_pktinfo *pkt) 29{ 30 const struct nft_cmp_expr *priv = nft_expr_priv(expr); 31 int d; 32 33 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len); 34 switch (priv->op) { 35 case NFT_CMP_EQ: 36 if (d != 0) 37 goto mismatch; 38 break; 39 case NFT_CMP_NEQ: 40 if (d == 0) 41 goto mismatch; 42 break; 43 case NFT_CMP_LT: 44 if (d == 0) 45 goto mismatch; 46 fallthrough; 47 case NFT_CMP_LTE: 48 if (d > 0) 49 goto mismatch; 50 break; 51 case NFT_CMP_GT: 52 if (d == 0) 53 goto mismatch; 54 fallthrough; 55 case NFT_CMP_GTE: 56 if (d < 0) 57 goto mismatch; 58 break; 59 } 60 return; 61 62mismatch: 63 regs->verdict.code = NFT_BREAK; 64} 65 66static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = { 67 [NFTA_CMP_SREG] = { .type = NLA_U32 }, 68 [NFTA_CMP_OP] = { .type = NLA_U32 }, 69 [NFTA_CMP_DATA] = { .type = NLA_NESTED }, 70}; 71 72static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 73 const struct nlattr * const tb[]) 74{ 75 struct nft_cmp_expr *priv = nft_expr_priv(expr); 76 struct nft_data_desc desc; 77 int err; 78 79 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc, 80 tb[NFTA_CMP_DATA]); 81 if (err < 0) 82 return err; 83 84 if (desc.type != NFT_DATA_VALUE) { 85 err = -EINVAL; 86 nft_data_release(&priv->data, desc.type); 87 return err; 88 } 89 90 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len); 91 if (err < 0) 92 return err; 93 94 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP])); 95 priv->len = desc.len; 96 return 0; 97} 98 99static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr) 100{ 101 const struct nft_cmp_expr *priv = nft_expr_priv(expr); 102 103 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg)) 104 goto nla_put_failure; 105 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op))) 106 goto nla_put_failure; 107 108 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data, 109 NFT_DATA_VALUE, priv->len) < 0) 110 goto nla_put_failure; 111 return 0; 112 113nla_put_failure: 114 return -1; 115} 116 117union nft_cmp_offload_data { 118 u16 val16; 119 u32 val32; 120 u64 val64; 121}; 122 123static void nft_payload_n2h(union nft_cmp_offload_data *data, 124 const u8 *val, u32 len) 125{ 126 switch (len) { 127 case 2: 128 data->val16 = ntohs(*((u16 *)val)); 129 break; 130 case 4: 131 data->val32 = ntohl(*((u32 *)val)); 132 break; 133 case 8: 134 data->val64 = be64_to_cpu(*((u64 *)val)); 135 break; 136 default: 137 WARN_ON_ONCE(1); 138 break; 139 } 140} 141 142static int __nft_cmp_offload(struct nft_offload_ctx *ctx, 143 struct nft_flow_rule *flow, 144 const struct nft_cmp_expr *priv) 145{ 146 struct nft_offload_reg *reg = &ctx->regs[priv->sreg]; 147 union nft_cmp_offload_data _data, _datamask; 148 u8 *mask = (u8 *)&flow->match.mask; 149 u8 *key = (u8 *)&flow->match.key; 150 u8 *data, *datamask; 151 152 if (priv->op != NFT_CMP_EQ || priv->len > reg->len) 153 return -EOPNOTSUPP; 154 155 if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) { 156 nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len); 157 nft_payload_n2h(&_datamask, (u8 *)®->mask, reg->len); 158 data = (u8 *)&_data; 159 datamask = (u8 *)&_datamask; 160 } else { 161 data = (u8 *)&priv->data; 162 datamask = (u8 *)®->mask; 163 } 164 165 memcpy(key + reg->offset, data, reg->len); 166 memcpy(mask + reg->offset, datamask, reg->len); 167 168 flow->match.dissector.used_keys |= BIT(reg->key); 169 flow->match.dissector.offset[reg->key] = reg->base_offset; 170 171 if (reg->key == FLOW_DISSECTOR_KEY_META && 172 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) && 173 nft_reg_load16(priv->data.data) != ARPHRD_ETHER) 174 return -EOPNOTSUPP; 175 176 nft_offload_update_dependency(ctx, &priv->data, reg->len); 177 178 return 0; 179} 180 181static int nft_cmp_offload(struct nft_offload_ctx *ctx, 182 struct nft_flow_rule *flow, 183 const struct nft_expr *expr) 184{ 185 const struct nft_cmp_expr *priv = nft_expr_priv(expr); 186 187 return __nft_cmp_offload(ctx, flow, priv); 188} 189 190static const struct nft_expr_ops nft_cmp_ops = { 191 .type = &nft_cmp_type, 192 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)), 193 .eval = nft_cmp_eval, 194 .init = nft_cmp_init, 195 .dump = nft_cmp_dump, 196 .reduce = NFT_REDUCE_READONLY, 197 .offload = nft_cmp_offload, 198}; 199 200static int nft_cmp_fast_init(const struct nft_ctx *ctx, 201 const struct nft_expr *expr, 202 const struct nlattr * const tb[]) 203{ 204 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); 205 struct nft_data_desc desc; 206 struct nft_data data; 207 int err; 208 209 err = nft_data_init(NULL, &data, sizeof(data), &desc, 210 tb[NFTA_CMP_DATA]); 211 if (err < 0) 212 return err; 213 214 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len); 215 if (err < 0) 216 return err; 217 218 desc.len *= BITS_PER_BYTE; 219 220 priv->mask = nft_cmp_fast_mask(desc.len); 221 priv->data = data.data[0] & priv->mask; 222 priv->len = desc.len; 223 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ; 224 return 0; 225} 226 227static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx, 228 struct nft_flow_rule *flow, 229 const struct nft_expr *expr) 230{ 231 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); 232 struct nft_cmp_expr cmp = { 233 .data = { 234 .data = { 235 [0] = priv->data, 236 }, 237 }, 238 .sreg = priv->sreg, 239 .len = priv->len / BITS_PER_BYTE, 240 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ, 241 }; 242 243 return __nft_cmp_offload(ctx, flow, &cmp); 244} 245 246static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr) 247{ 248 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); 249 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ; 250 struct nft_data data; 251 252 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg)) 253 goto nla_put_failure; 254 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op))) 255 goto nla_put_failure; 256 257 data.data[0] = priv->data; 258 if (nft_data_dump(skb, NFTA_CMP_DATA, &data, 259 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0) 260 goto nla_put_failure; 261 return 0; 262 263nla_put_failure: 264 return -1; 265} 266 267const struct nft_expr_ops nft_cmp_fast_ops = { 268 .type = &nft_cmp_type, 269 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)), 270 .eval = NULL, /* inlined */ 271 .init = nft_cmp_fast_init, 272 .dump = nft_cmp_fast_dump, 273 .reduce = NFT_REDUCE_READONLY, 274 .offload = nft_cmp_fast_offload, 275}; 276 277static u32 nft_cmp_mask(u32 bitlen) 278{ 279 return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen)); 280} 281 282static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen) 283{ 284 int len = bitlen / BITS_PER_BYTE; 285 int i, words = len / sizeof(u32); 286 287 for (i = 0; i < words; i++) { 288 data->data[i] = 0xffffffff; 289 bitlen -= sizeof(u32) * BITS_PER_BYTE; 290 } 291 292 if (len % sizeof(u32)) 293 data->data[i++] = nft_cmp_mask(bitlen); 294 295 for (; i < 4; i++) 296 data->data[i] = 0; 297} 298 299static int nft_cmp16_fast_init(const struct nft_ctx *ctx, 300 const struct nft_expr *expr, 301 const struct nlattr * const tb[]) 302{ 303 struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); 304 struct nft_data_desc desc; 305 int err; 306 307 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc, 308 tb[NFTA_CMP_DATA]); 309 if (err < 0) 310 return err; 311 312 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len); 313 if (err < 0) 314 return err; 315 316 nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE); 317 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ; 318 priv->len = desc.len; 319 320 return 0; 321} 322 323static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx, 324 struct nft_flow_rule *flow, 325 const struct nft_expr *expr) 326{ 327 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); 328 struct nft_cmp_expr cmp = { 329 .data = priv->data, 330 .sreg = priv->sreg, 331 .len = priv->len, 332 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ, 333 }; 334 335 return __nft_cmp_offload(ctx, flow, &cmp); 336} 337 338static int nft_cmp16_fast_dump(struct sk_buff *skb, const struct nft_expr *expr) 339{ 340 const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); 341 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ; 342 343 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg)) 344 goto nla_put_failure; 345 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op))) 346 goto nla_put_failure; 347 348 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data, 349 NFT_DATA_VALUE, priv->len) < 0) 350 goto nla_put_failure; 351 return 0; 352 353nla_put_failure: 354 return -1; 355} 356 357 358const struct nft_expr_ops nft_cmp16_fast_ops = { 359 .type = &nft_cmp_type, 360 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)), 361 .eval = NULL, /* inlined */ 362 .init = nft_cmp16_fast_init, 363 .dump = nft_cmp16_fast_dump, 364 .reduce = NFT_REDUCE_READONLY, 365 .offload = nft_cmp16_fast_offload, 366}; 367 368static const struct nft_expr_ops * 369nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) 370{ 371 struct nft_data_desc desc; 372 struct nft_data data; 373 enum nft_cmp_ops op; 374 u8 sreg; 375 int err; 376 377 if (tb[NFTA_CMP_SREG] == NULL || 378 tb[NFTA_CMP_OP] == NULL || 379 tb[NFTA_CMP_DATA] == NULL) 380 return ERR_PTR(-EINVAL); 381 382 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP])); 383 switch (op) { 384 case NFT_CMP_EQ: 385 case NFT_CMP_NEQ: 386 case NFT_CMP_LT: 387 case NFT_CMP_LTE: 388 case NFT_CMP_GT: 389 case NFT_CMP_GTE: 390 break; 391 default: 392 return ERR_PTR(-EINVAL); 393 } 394 395 err = nft_data_init(NULL, &data, sizeof(data), &desc, 396 tb[NFTA_CMP_DATA]); 397 if (err < 0) 398 return ERR_PTR(err); 399 400 if (desc.type != NFT_DATA_VALUE) 401 goto err1; 402 403 sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG])); 404 405 if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) { 406 if (desc.len <= sizeof(u32)) 407 return &nft_cmp_fast_ops; 408 else if (desc.len <= sizeof(data) && 409 ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) || 410 (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0))) 411 return &nft_cmp16_fast_ops; 412 } 413 return &nft_cmp_ops; 414err1: 415 nft_data_release(&data, desc.type); 416 return ERR_PTR(-EINVAL); 417} 418 419struct nft_expr_type nft_cmp_type __read_mostly = { 420 .name = "cmp", 421 .select_ops = nft_cmp_select_ops, 422 .policy = nft_cmp_policy, 423 .maxattr = NFTA_CMP_MAX, 424 .owner = THIS_MODULE, 425};