summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-02 15:04:10 -0700
committerDavid S. Miller <davem@davemloft.net>2014-08-02 15:04:10 -0700
commite339756c9995648eecd015391f66baf2fd251fec (patch)
treedcb1aba57530e6c9426a81758173ca146ffafcaf /lib
parent4330487acfff0cf1d7b14d238583a182e0a444bb (diff)
parent7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb (diff)
downloadcachepc-linux-e339756c9995648eecd015391f66baf2fd251fec.tar.gz
cachepc-linux-e339756c9995648eecd015391f66baf2fd251fec.zip
Merge branch 'filter-next'
Alexei Starovoitov says: ==================== net: filter: split sk_filter into socket and bpf, cleanup names The main goal of the series is to split 'struct sk_filter' into socket and bpf parts and cleanup names in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases: isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf which don't need refcnt/rcu fields. It's a follow up to the rcu cleanup started by Pablo in commit 34c5bd66e5 ("net: filter: don't release unattached filter through call_rcu()") Patch 1 - cleans up socket memory charging and makes it possible for functions sk(bpf)_migrate_filter(), sk(bpf)_prepare_filter() to be socket independent Patches 2-4 - trivial renames Patch 5 - sk_filter split and renames of related sk_*() functions ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/test_bpf.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5f48623ee1a7..89e0345733bd 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
return len + 1;
}
-static struct sk_filter *generate_filter(int which, int *err)
+static struct bpf_prog *generate_filter(int which, int *err)
{
- struct sk_filter *fp;
+ struct bpf_prog *fp;
struct sock_fprog_kern fprog;
unsigned int flen = probe_filter_length(tests[which].u.insns);
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
fprog.filter = tests[which].u.insns;
fprog.len = flen;
- *err = sk_unattached_filter_create(&fp, &fprog);
+ *err = bpf_prog_create(&fp, &fprog);
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
if (*err == -EINVAL) {
pr_cont("PASS\n");
@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
break;
case INTERNAL:
- fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
+ fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
if (fp == NULL) {
pr_cont("UNEXPECTED_FAIL no memory left\n");
*err = -ENOMEM;
@@ -1809,7 +1809,7 @@ static struct sk_filter *generate_filter(int which, int *err)
memcpy(fp->insnsi, tests[which].u.insns_int,
fp->len * sizeof(struct bpf_insn));
- sk_filter_select_runtime(fp);
+ bpf_prog_select_runtime(fp);
break;
}
@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
return fp;
}
-static void release_filter(struct sk_filter *fp, int which)
+static void release_filter(struct bpf_prog *fp, int which)
{
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
switch (test_type) {
case CLASSIC:
- sk_unattached_filter_destroy(fp);
+ bpf_prog_destroy(fp);
break;
case INTERNAL:
- sk_filter_free(fp);
+ bpf_prog_free(fp);
break;
}
}
-static int __run_one(const struct sk_filter *fp, const void *data,
+static int __run_one(const struct bpf_prog *fp, const void *data,
int runs, u64 *duration)
{
u64 start, finish;
@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
start = ktime_to_us(ktime_get());
for (i = 0; i < runs; i++)
- ret = SK_RUN_FILTER(fp, data);
+ ret = BPF_PROG_RUN(fp, data);
finish = ktime_to_us(ktime_get());
@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
return ret;
}
-static int run_one(const struct sk_filter *fp, struct bpf_test *test)
+static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{
int err_cnt = 0, i, runs = MAX_TESTRUNS;
@@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
int i, err_cnt = 0, pass_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- struct sk_filter *fp;
+ struct bpf_prog *fp;
int err;
pr_info("#%d %s ", i, tests[i].descr);