cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pppoatm.c (15358B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* net/atm/pppoatm.c - RFC2364 PPP over ATM/AAL5 */
      3
      4/* Copyright 1999-2000 by Mitchell Blank Jr */
      5/* Based on clip.c; 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
      6/* And on ppp_async.c; Copyright 1999 Paul Mackerras */
      7/* And help from Jens Axboe */
      8
      9/*
     10 *
     11 * This driver provides the encapsulation and framing for sending
     12 * and receiving PPP frames in ATM AAL5 PDUs.
     13 */
     14
     15/*
     16 * One shortcoming of this driver is that it does not comply with
     17 * section 8 of RFC2364 - we are supposed to detect a change
     18 * in encapsulation and immediately abort the connection (in order
     19 * to avoid a black-hole being created if our peer loses state
     20 * and changes encapsulation unilaterally.  However, since the
     21 * ppp_generic layer actually does the decapsulation, we need
     22 * a way of notifying it when we _think_ there might be a problem)
     23 * There's two cases:
     24 *   1.	LLC-encapsulation was missing when it was enabled.  In
     25 *	this case, we should tell the upper layer "tear down
     26 *	this session if this skb looks ok to you"
     27 *   2.	LLC-encapsulation was present when it was disabled.  Then
     28 *	we need to tell the upper layer "this packet may be
     29 *	ok, but if its in error tear down the session"
     30 * These hooks are not yet available in ppp_generic
     31 */
     32
     33#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
     34
     35#include <linux/module.h>
     36#include <linux/init.h>
     37#include <linux/interrupt.h>
     38#include <linux/skbuff.h>
     39#include <linux/slab.h>
     40#include <linux/atm.h>
     41#include <linux/atmdev.h>
     42#include <linux/capability.h>
     43#include <linux/ppp_defs.h>
     44#include <linux/ppp-ioctl.h>
     45#include <linux/ppp_channel.h>
     46#include <linux/atmppp.h>
     47
     48#include "common.h"
     49
     50enum pppoatm_encaps {
     51	e_autodetect = PPPOATM_ENCAPS_AUTODETECT,
     52	e_vc = PPPOATM_ENCAPS_VC,
     53	e_llc = PPPOATM_ENCAPS_LLC,
     54};
     55
     56struct pppoatm_vcc {
     57	struct atm_vcc	*atmvcc;	/* VCC descriptor */
     58	void (*old_push)(struct atm_vcc *, struct sk_buff *);
     59	void (*old_pop)(struct atm_vcc *, struct sk_buff *);
     60	void (*old_release_cb)(struct atm_vcc *);
     61	struct module *old_owner;
     62					/* keep old push/pop for detaching */
     63	enum pppoatm_encaps encaps;
     64	atomic_t inflight;
     65	unsigned long blocked;
     66	int flags;			/* SC_COMP_PROT - compress protocol */
     67	struct ppp_channel chan;	/* interface to generic ppp layer */
     68	struct tasklet_struct wakeup_tasklet;
     69};
     70
     71/*
     72 * We want to allow two packets in the queue. The one that's currently in
     73 * flight, and *one* queued up ready for the ATM device to send immediately
     74 * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
     75 * inflight == -2 represents an empty queue, -1 one packet, and zero means
     76 * there are two packets in the queue.
     77 */
     78#define NONE_INFLIGHT -2
     79
     80#define BLOCKED 0
     81
     82/*
     83 * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
     84 * ID (0xC021) used in autodetection
     85 */
     86static const unsigned char pppllc[6] = { 0xFE, 0xFE, 0x03, 0xCF, 0xC0, 0x21 };
     87#define LLC_LEN		(4)
     88
     89static inline struct pppoatm_vcc *atmvcc_to_pvcc(const struct atm_vcc *atmvcc)
     90{
     91	return (struct pppoatm_vcc *) (atmvcc->user_back);
     92}
     93
     94static inline struct pppoatm_vcc *chan_to_pvcc(const struct ppp_channel *chan)
     95{
     96	return (struct pppoatm_vcc *) (chan->private);
     97}
     98
     99/*
    100 * We can't do this directly from our _pop handler, since the ppp code
    101 * doesn't want to be called in interrupt context, so we do it from
    102 * a tasklet
    103 */
    104static void pppoatm_wakeup_sender(struct tasklet_struct *t)
    105{
    106	struct pppoatm_vcc *pvcc = from_tasklet(pvcc, t, wakeup_tasklet);
    107
    108	ppp_output_wakeup(&pvcc->chan);
    109}
    110
    111static void pppoatm_release_cb(struct atm_vcc *atmvcc)
    112{
    113	struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
    114
    115	/*
    116	 * As in pppoatm_pop(), it's safe to clear the BLOCKED bit here because
    117	 * the wakeup *can't* race with pppoatm_send(). They both hold the PPP
    118	 * channel's ->downl lock. And the potential race with *setting* it,
    119	 * which leads to the double-check dance in pppoatm_may_send(), doesn't
    120	 * exist here. In the sock_owned_by_user() case in pppoatm_send(), we
    121	 * set the BLOCKED bit while the socket is still locked. We know that
    122	 * ->release_cb() can't be called until that's done.
    123	 */
    124	if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
    125		tasklet_schedule(&pvcc->wakeup_tasklet);
    126	if (pvcc->old_release_cb)
    127		pvcc->old_release_cb(atmvcc);
    128}
    129/*
    130 * This gets called every time the ATM card has finished sending our
    131 * skb.  The ->old_pop will take care up normal atm flow control,
    132 * but we also need to wake up the device if we blocked it
    133 */
    134static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
    135{
    136	struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
    137
    138	pvcc->old_pop(atmvcc, skb);
    139	atomic_dec(&pvcc->inflight);
    140
    141	/*
    142	 * We always used to run the wakeup tasklet unconditionally here, for
    143	 * fear of race conditions where we clear the BLOCKED flag just as we
    144	 * refuse another packet in pppoatm_send(). This was quite inefficient.
    145	 *
    146	 * In fact it's OK. The PPP core will only ever call pppoatm_send()
    147	 * while holding the channel->downl lock. And ppp_output_wakeup() as
    148	 * called by the tasklet will *also* grab that lock. So even if another
    149	 * CPU is in pppoatm_send() right now, the tasklet isn't going to race
    150	 * with it. The wakeup *will* happen after the other CPU is safely out
    151	 * of pppoatm_send() again.
    152	 *
    153	 * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
    154	 * it about to return, that's fine. We trigger a wakeup which will
    155	 * happen later. And if the CPU in pppoatm_send() *hasn't* set the
    156	 * BLOCKED bit yet, that's fine too because of the double check in
    157	 * pppoatm_may_send() which is commented there.
    158	 */
    159	if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
    160		tasklet_schedule(&pvcc->wakeup_tasklet);
    161}
    162
    163/*
    164 * Unbind from PPP - currently we only do this when closing the socket,
    165 * but we could put this into an ioctl if need be
    166 */
    167static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
    168{
    169	struct pppoatm_vcc *pvcc;
    170	pvcc = atmvcc_to_pvcc(atmvcc);
    171	atmvcc->push = pvcc->old_push;
    172	atmvcc->pop = pvcc->old_pop;
    173	atmvcc->release_cb = pvcc->old_release_cb;
    174	tasklet_kill(&pvcc->wakeup_tasklet);
    175	ppp_unregister_channel(&pvcc->chan);
    176	atmvcc->user_back = NULL;
    177	kfree(pvcc);
    178}
    179
    180/* Called when an AAL5 PDU comes in */
    181static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
    182{
    183	struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
    184	pr_debug("\n");
    185	if (skb == NULL) {			/* VCC was closed */
    186		struct module *module;
    187
    188		pr_debug("removing ATMPPP VCC %p\n", pvcc);
    189		module = pvcc->old_owner;
    190		pppoatm_unassign_vcc(atmvcc);
    191		atmvcc->push(atmvcc, NULL);	/* Pass along bad news */
    192		module_put(module);
    193		return;
    194	}
    195	atm_return(atmvcc, skb->truesize);
    196	switch (pvcc->encaps) {
    197	case e_llc:
    198		if (skb->len < LLC_LEN ||
    199		    memcmp(skb->data, pppllc, LLC_LEN))
    200			goto error;
    201		skb_pull(skb, LLC_LEN);
    202		break;
    203	case e_autodetect:
    204		if (pvcc->chan.ppp == NULL) {	/* Not bound yet! */
    205			kfree_skb(skb);
    206			return;
    207		}
    208		if (skb->len >= sizeof(pppllc) &&
    209		    !memcmp(skb->data, pppllc, sizeof(pppllc))) {
    210			pvcc->encaps = e_llc;
    211			skb_pull(skb, LLC_LEN);
    212			break;
    213		}
    214		if (skb->len >= (sizeof(pppllc) - LLC_LEN) &&
    215		    !memcmp(skb->data, &pppllc[LLC_LEN],
    216		    sizeof(pppllc) - LLC_LEN)) {
    217			pvcc->encaps = e_vc;
    218			pvcc->chan.mtu += LLC_LEN;
    219			break;
    220		}
    221		pr_debug("Couldn't autodetect yet (skb: %6ph)\n", skb->data);
    222		goto error;
    223	case e_vc:
    224		break;
    225	}
    226	ppp_input(&pvcc->chan, skb);
    227	return;
    228
    229error:
    230	kfree_skb(skb);
    231	ppp_input_error(&pvcc->chan, 0);
    232}
    233
    234static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
    235{
    236	/*
    237	 * It's not clear that we need to bother with using atm_may_send()
    238	 * to check we don't exceed sk->sk_sndbuf. If userspace sets a
    239	 * value of sk_sndbuf which is lower than the MTU, we're going to
    240	 * block for ever. But the code always did that before we introduced
    241	 * the packet count limit, so...
    242	 */
    243	if (atm_may_send(pvcc->atmvcc, size) &&
    244	    atomic_inc_not_zero(&pvcc->inflight))
    245		return 1;
    246
    247	/*
    248	 * We use test_and_set_bit() rather than set_bit() here because
    249	 * we need to ensure there's a memory barrier after it. The bit
    250	 * *must* be set before we do the atomic_inc() on pvcc->inflight.
    251	 * There's no smp_mb__after_set_bit(), so it's this or abuse
    252	 * smp_mb__after_atomic().
    253	 */
    254	test_and_set_bit(BLOCKED, &pvcc->blocked);
    255
    256	/*
    257	 * We may have raced with pppoatm_pop(). If it ran for the
    258	 * last packet in the queue, *just* before we set the BLOCKED
    259	 * bit, then it might never run again and the channel could
    260	 * remain permanently blocked. Cope with that race by checking
    261	 * *again*. If it did run in that window, we'll have space on
    262	 * the queue now and can return success. It's harmless to leave
    263	 * the BLOCKED flag set, since it's only used as a trigger to
    264	 * run the wakeup tasklet. Another wakeup will never hurt.
    265	 * If pppoatm_pop() is running but hasn't got as far as making
    266	 * space on the queue yet, then it hasn't checked the BLOCKED
    267	 * flag yet either, so we're safe in that case too. It'll issue
    268	 * an "immediate" wakeup... where "immediate" actually involves
    269	 * taking the PPP channel's ->downl lock, which is held by the
    270	 * code path that calls pppoatm_send(), and is thus going to
    271	 * wait for us to finish.
    272	 */
    273	if (atm_may_send(pvcc->atmvcc, size) &&
    274	    atomic_inc_not_zero(&pvcc->inflight))
    275		return 1;
    276
    277	return 0;
    278}
    279/*
    280 * Called by the ppp_generic.c to send a packet - returns true if packet
    281 * was accepted.  If we return false, then it's our job to call
    282 * ppp_output_wakeup(chan) when we're feeling more up to it.
    283 * Note that in the ENOMEM case (as opposed to the !atm_may_send case)
    284 * we should really drop the packet, but the generic layer doesn't
    285 * support this yet.  We just return 'DROP_PACKET' which we actually define
    286 * as success, just to be clear what we're really doing.
    287 */
    288#define DROP_PACKET 1
    289static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
    290{
    291	struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
    292	struct atm_vcc *vcc;
    293	int ret;
    294
    295	ATM_SKB(skb)->vcc = pvcc->atmvcc;
    296	pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
    297	if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
    298		(void) skb_pull(skb, 1);
    299
    300	vcc = ATM_SKB(skb)->vcc;
    301	bh_lock_sock(sk_atm(vcc));
    302	if (sock_owned_by_user(sk_atm(vcc))) {
    303		/*
    304		 * Needs to happen (and be flushed, hence test_and_) before we unlock
    305		 * the socket. It needs to be seen by the time our ->release_cb gets
    306		 * called.
    307		 */
    308		test_and_set_bit(BLOCKED, &pvcc->blocked);
    309		goto nospace;
    310	}
    311	if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
    312	    test_bit(ATM_VF_CLOSE, &vcc->flags) ||
    313	    !test_bit(ATM_VF_READY, &vcc->flags)) {
    314		bh_unlock_sock(sk_atm(vcc));
    315		kfree_skb(skb);
    316		return DROP_PACKET;
    317	}
    318
    319	switch (pvcc->encaps) {		/* LLC encapsulation needed */
    320	case e_llc:
    321		if (skb_headroom(skb) < LLC_LEN) {
    322			struct sk_buff *n;
    323			n = skb_realloc_headroom(skb, LLC_LEN);
    324			if (n != NULL &&
    325			    !pppoatm_may_send(pvcc, n->truesize)) {
    326				kfree_skb(n);
    327				goto nospace;
    328			}
    329			consume_skb(skb);
    330			skb = n;
    331			if (skb == NULL) {
    332				bh_unlock_sock(sk_atm(vcc));
    333				return DROP_PACKET;
    334			}
    335		} else if (!pppoatm_may_send(pvcc, skb->truesize))
    336			goto nospace;
    337		memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
    338		break;
    339	case e_vc:
    340		if (!pppoatm_may_send(pvcc, skb->truesize))
    341			goto nospace;
    342		break;
    343	case e_autodetect:
    344		bh_unlock_sock(sk_atm(vcc));
    345		pr_debug("Trying to send without setting encaps!\n");
    346		kfree_skb(skb);
    347		return 1;
    348	}
    349
    350	atm_account_tx(vcc, skb);
    351	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
    352		 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
    353	ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
    354	    ? DROP_PACKET : 1;
    355	bh_unlock_sock(sk_atm(vcc));
    356	return ret;
    357nospace:
    358	bh_unlock_sock(sk_atm(vcc));
    359	/*
    360	 * We don't have space to send this SKB now, but we might have
    361	 * already applied SC_COMP_PROT compression, so may need to undo
    362	 */
    363	if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
    364	    skb->data[-1] == '\0')
    365		(void) skb_push(skb, 1);
    366	return 0;
    367}
    368
    369/* This handles ioctls sent to the /dev/ppp interface */
    370static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
    371	unsigned long arg)
    372{
    373	switch (cmd) {
    374	case PPPIOCGFLAGS:
    375		return put_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
    376		    ? -EFAULT : 0;
    377	case PPPIOCSFLAGS:
    378		return get_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
    379		    ? -EFAULT : 0;
    380	}
    381	return -ENOTTY;
    382}
    383
    384static const struct ppp_channel_ops pppoatm_ops = {
    385	.start_xmit = pppoatm_send,
    386	.ioctl = pppoatm_devppp_ioctl,
    387};
    388
    389static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
    390{
    391	struct atm_backend_ppp be;
    392	struct pppoatm_vcc *pvcc;
    393	int err;
    394
    395	if (copy_from_user(&be, arg, sizeof be))
    396		return -EFAULT;
    397	if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
    398	    be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
    399		return -EINVAL;
    400	pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
    401	if (pvcc == NULL)
    402		return -ENOMEM;
    403	pvcc->atmvcc = atmvcc;
    404
    405	/* Maximum is zero, so that we can use atomic_inc_not_zero() */
    406	atomic_set(&pvcc->inflight, NONE_INFLIGHT);
    407	pvcc->old_push = atmvcc->push;
    408	pvcc->old_pop = atmvcc->pop;
    409	pvcc->old_owner = atmvcc->owner;
    410	pvcc->old_release_cb = atmvcc->release_cb;
    411	pvcc->encaps = (enum pppoatm_encaps) be.encaps;
    412	pvcc->chan.private = pvcc;
    413	pvcc->chan.ops = &pppoatm_ops;
    414	pvcc->chan.mtu = atmvcc->qos.txtp.max_sdu - PPP_HDRLEN -
    415	    (be.encaps == e_vc ? 0 : LLC_LEN);
    416	tasklet_setup(&pvcc->wakeup_tasklet, pppoatm_wakeup_sender);
    417	err = ppp_register_channel(&pvcc->chan);
    418	if (err != 0) {
    419		kfree(pvcc);
    420		return err;
    421	}
    422	atmvcc->user_back = pvcc;
    423	atmvcc->push = pppoatm_push;
    424	atmvcc->pop = pppoatm_pop;
    425	atmvcc->release_cb = pppoatm_release_cb;
    426	__module_get(THIS_MODULE);
    427	atmvcc->owner = THIS_MODULE;
    428
    429	/* re-process everything received between connection setup and
    430	   backend setup */
    431	vcc_process_recv_queue(atmvcc);
    432	return 0;
    433}
    434
    435/*
    436 * This handles ioctls actually performed on our vcc - we must return
    437 * -ENOIOCTLCMD for any unrecognized ioctl
    438 */
    439static int pppoatm_ioctl(struct socket *sock, unsigned int cmd,
    440	unsigned long arg)
    441{
    442	struct atm_vcc *atmvcc = ATM_SD(sock);
    443	void __user *argp = (void __user *)arg;
    444
    445	if (cmd != ATM_SETBACKEND && atmvcc->push != pppoatm_push)
    446		return -ENOIOCTLCMD;
    447	switch (cmd) {
    448	case ATM_SETBACKEND: {
    449		atm_backend_t b;
    450		if (get_user(b, (atm_backend_t __user *) argp))
    451			return -EFAULT;
    452		if (b != ATM_BACKEND_PPP)
    453			return -ENOIOCTLCMD;
    454		if (!capable(CAP_NET_ADMIN))
    455			return -EPERM;
    456		if (sock->state != SS_CONNECTED)
    457			return -EINVAL;
    458		return pppoatm_assign_vcc(atmvcc, argp);
    459		}
    460	case PPPIOCGCHAN:
    461		return put_user(ppp_channel_index(&atmvcc_to_pvcc(atmvcc)->
    462		    chan), (int __user *) argp) ? -EFAULT : 0;
    463	case PPPIOCGUNIT:
    464		return put_user(ppp_unit_number(&atmvcc_to_pvcc(atmvcc)->
    465		    chan), (int __user *) argp) ? -EFAULT : 0;
    466	}
    467	return -ENOIOCTLCMD;
    468}
    469
    470static struct atm_ioctl pppoatm_ioctl_ops = {
    471	.owner	= THIS_MODULE,
    472	.ioctl	= pppoatm_ioctl,
    473};
    474
    475static int __init pppoatm_init(void)
    476{
    477	register_atm_ioctl(&pppoatm_ioctl_ops);
    478	return 0;
    479}
    480
    481static void __exit pppoatm_exit(void)
    482{
    483	deregister_atm_ioctl(&pppoatm_ioctl_ops);
    484}
    485
    486module_init(pppoatm_init);
    487module_exit(pppoatm_exit);
    488
    489MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
    490MODULE_DESCRIPTION("RFC2364 PPP over ATM/AAL5");
    491MODULE_LICENSE("GPL");