int debug; /* debug flags 70 */
struct slcompress *vj; /* state for VJ header compression */
enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
- struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
struct compressor *xcomp; /* transmit packet compressor 8c */
void *xc_state; /* its internal state 90 */
struct compressor *rcomp; /* receive decompressor 94 */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
-static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
-static void ppp_push(struct ppp *ppp);
+static int ppp_prepare_tx_skb(struct ppp *ppp, struct sk_buff **pskb);
+static int ppp_push(struct ppp *ppp, struct sk_buff *skb);
static void ppp_channel_push(struct channel *pch);
static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
struct channel *pch);
*/
/* Called to do any work queued up on the transmit side that can now be done */
+static void ppp_xmit_flush(struct ppp *ppp)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ppp->file.xq))) {
+ if (unlikely(!ppp_push(ppp, skb))) {
+ skb_queue_head(&ppp->file.xq, skb);
+ return;
+ }
+ }
+ /* If there's no work left to do, tell the core net code that we can
+ * accept some more.
+ */
+ netif_wake_queue(ppp->dev);
+}
+
static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
ppp_xmit_lock(ppp);
- if (!ppp->closing) {
- ppp_push(ppp);
-
- if (skb)
+ if (unlikely(ppp->closing)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ if (unlikely(ppp_prepare_tx_skb(ppp, &skb)))
+ goto out;
+ /* Fastpath: No backlog, just send the new skb. */
+ if (likely(skb_queue_empty(&ppp->file.xq))) {
+ if (unlikely(!ppp_push(ppp, skb))) {
skb_queue_tail(&ppp->file.xq, skb);
- while (!ppp->xmit_pending &&
- (skb = skb_dequeue(&ppp->file.xq)))
- ppp_send_frame(ppp, skb);
- /* If there's no work left to do, tell the core net
- code that we can accept some more. */
- if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
- netif_wake_queue(ppp->dev);
- else
netif_stop_queue(ppp->dev);
- } else {
- kfree_skb(skb);
+ }
+ goto out;
}
+
+ /* Slowpath: Enqueue the new skb and process backlog */
+ skb_queue_tail(&ppp->file.xq, skb);
+ ppp_xmit_flush(ppp);
+out:
ppp_xmit_unlock(ppp);
}
}
/*
- * Compress and send a frame.
- * The caller should have locked the xmit path,
- * and xmit_pending should be 0.
+ * Compress and prepare to send a frame.
+ * The caller should have locked the xmit path.
+ * Returns 1 if the skb was consumed, 0 if it can be passed to ppp_push().
+ * @pskb is updated if a compressor is in use.
*/
-static void
-ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+static int
+ppp_prepare_tx_skb(struct ppp *ppp, struct sk_buff **pskb)
{
+ struct sk_buff *skb = *pskb;
int proto = PPP_PROTO(skb);
struct sk_buff *new_skb;
int len;
"PPP: outbound frame "
"not passed\n");
kfree_skb(skb);
- return;
+ return 1;
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
}
consume_skb(skb);
skb = new_skb;
+ *pskb = skb;
cp = skb_put(skb, len + 2);
cp[0] = 0;
cp[1] = proto;
if (!new_skb)
goto drop;
skb = new_skb;
+ *pskb = skb;
}
/*
goto drop;
skb_queue_tail(&ppp->file.rq, skb);
wake_up_interruptible(&ppp->file.rwait);
- return;
+ return 1;
}
- ppp->xmit_pending = skb;
- ppp_push(ppp);
- return;
+ return 0;
drop:
kfree_skb(skb);
++ppp->dev->stats.tx_errors;
+ return 1;
}
/*
- * Try to send the frame in xmit_pending.
+ * Try to send the frame.
* The caller should have the xmit path locked.
+ * Returns 1 if the skb was consumed, 0 if not.
*/
-static void
-ppp_push(struct ppp *ppp)
+static int
+ppp_push(struct ppp *ppp, struct sk_buff *skb)
{
struct list_head *list;
struct channel *pch;
- struct sk_buff *skb = ppp->xmit_pending;
-
- if (!skb)
- return;
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
- ppp->xmit_pending = NULL;
kfree_skb(skb);
- return;
+ return 1;
}
if ((ppp->flags & SC_MULTILINK) == 0) {
struct ppp_channel *chan;
+ int ret;
/* not doing multilink: send it down the first channel */
list = list->next;
pch = list_entry(list, struct channel, clist);
* skb but linearization failed
*/
kfree_skb(skb);
- ppp->xmit_pending = NULL;
+ ret = 1;
goto out;
}
- if (chan->ops->start_xmit(chan, skb))
- ppp->xmit_pending = NULL;
+ ret = chan->ops->start_xmit(chan, skb);
out:
spin_unlock(&pch->downl);
- return;
+ return ret;
}
#ifdef CONFIG_PPP_MULTILINK
/* Multilink: fragment the packet over as many links
as can take the packet at the moment. */
if (!ppp_mp_explode(ppp, skb))
- return;
+ return 0;
#endif /* CONFIG_PPP_MULTILINK */
- ppp->xmit_pending = NULL;
kfree_skb(skb);
+ return 1;
}
#ifdef CONFIG_PPP_MULTILINK
* performance if we have a lot of channels.
*/
if (nfree == 0 || nfree < navail / 2)
- return 0; /* can't take now, leave it in xmit_pending */
+ return 0; /* can't take now, leave it in transmit queue */
/* Do protocol field compression */
if (skb_linearize(skb))
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- if (ppp)
- __ppp_xmit_process(ppp, NULL);
+ if (ppp) {
+ ppp_xmit_lock(ppp);
+ if (!ppp->closing)
+ ppp_xmit_flush(ppp);
+ ppp_xmit_unlock(ppp);
+ }
}
}
}
#endif /* CONFIG_PPP_FILTER */
- kfree_skb(ppp->xmit_pending);
free_percpu(ppp->xmit_recursion);
free_netdev(ppp->dev);