summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorZi Shen Lim <zlim.lnx@gmail.com>2014-07-03 16:56:54 +0200
committerDavid S. Miller <davem@davemloft.net>2014-07-08 23:20:00 +0200
commit9f12fbe603f7ae346b2b46008e325f0c9a68e55d (patch)
tree440fb57e08880b9a7fe5f9180dd7da9dae50a19c /net/core
parentdeclance: Fix 64-bit compilation warnings (diff)
downloadlinux-9f12fbe603f7ae346b2b46008e325f0c9a68e55d.tar.xz
linux-9f12fbe603f7ae346b2b46008e325f0c9a68e55d.zip
net: filter: move load_pointer() into filter.h
load_pointer() is already a static inline function. Let's move it into filter.h so BPF JIT implementations can reuse this function. Since we're exporting this function, let's also rename it to bpf_load_pointer() for clarity. Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com> Reviewed-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c15
1 files changed, 3 insertions, 12 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 1dbf6462f766..87af1e3e56c0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -84,15 +84,6 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
return NULL;
}
-static inline void *load_pointer(const struct sk_buff *skb, int k,
- unsigned int size, void *buffer)
-{
- if (k >= 0)
- return skb_header_pointer(skb, k, size, buffer);
-
- return bpf_internal_load_pointer_neg_helper(skb, k, size);
-}
-
/**
* sk_filter - run a packet through a socket filter
* @sk: sock associated with &sk_buff
@@ -537,7 +528,7 @@ load_word:
* BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
*/
- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
+ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
if (likely(ptr != NULL)) {
BPF_R0 = get_unaligned_be32(ptr);
CONT;
@@ -547,7 +538,7 @@ load_word:
LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
off = IMM;
load_half:
- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
+ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
if (likely(ptr != NULL)) {
BPF_R0 = get_unaligned_be16(ptr);
CONT;
@@ -557,7 +548,7 @@ load_half:
LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
off = IMM;
load_byte:
- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
+ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
if (likely(ptr != NULL)) {
BPF_R0 = *(u8 *)ptr;
CONT;