For any hardstart failure resulting from insufficient resources, set requeue.
authormtaylor <mtaylor@0192ed92-7a03-0410-a25b-9323aeb14dbd>
Fri, 18 Jan 2008 21:32:17 +0000 (21:32 +0000)
committermtaylor <mtaylor@0192ed92-7a03-0410-a25b-9323aeb14dbd>
Fri, 18 Jan 2008 21:32:17 +0000 (21:32 +0000)
On requeue, stop the kernel queue and flag the fact that we stopped it in sc_dev_stopped so that it can be restarted automatically when resources become available.  This is already done in the case of taking a txbuf, but can/should be done for every code path leading to requeuing.

git-svn-id: http://madwifi-project.org/svn/madwifi/trunk@3208 0192ed92-7a03-0410-a25b-9323aeb14dbd

ath/if_ath.c

index ca15575d86350d658a77dbddc2dd41e4005e1f55..bd95a1c6abcfc115a3002fd134a61ec8a519cee1 100644 (file)
@@ -3102,10 +3102,7 @@ ath_hardstart(struct sk_buff *skb, struct net_device *dev)
 
        if (txq->axq_depth > TAIL_DROP_COUNT) {
                sc->sc_stats.ast_tx_discard++;
-               /* queue is full, let the kernel backlog the skb */
-               netif_stop_queue(dev);
                requeue = 1;
-               
                goto hardstart_fail;
        }
 #endif
@@ -3118,7 +3115,6 @@ ath_hardstart(struct sk_buff *skb, struct net_device *dev)
                skb = skb_copy(skb, GFP_ATOMIC);
                if (skb == NULL) {
                        requeue = 1;
-                       netif_stop_queue(dev);
                        goto hardstart_fail;
                }
                /* If the clone works, bump the reference count for our copy. */
@@ -3167,6 +3163,7 @@ ath_hardstart(struct sk_buff *skb, struct net_device *dev)
                        bf = ath_take_txbuf(sc);
                        if (bf == NULL) {
                                ATH_TXQ_UNLOCK_IRQ_EARLY(txq);
+                               requeue = 1;
                                goto hardstart_fail;
                        }
                        DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
@@ -3217,7 +3214,6 @@ ath_hardstart(struct sk_buff *skb, struct net_device *dev)
                        }
                        bf = ath_take_txbuf(sc);
                        if (bf == NULL) {
-                               netif_stop_queue(dev);
                                requeue = 1;
                                goto hardstart_fail;
                        }
@@ -3235,6 +3231,7 @@ ath_hardstart(struct sk_buff *skb, struct net_device *dev)
                bf = ath_take_txbuf(sc);
                if (bf == NULL) {
                        ATH_TXQ_UNLOCK_IRQ_EARLY(txq);
+                       requeue = 1;
                        goto hardstart_fail;
                }
        }
@@ -3248,6 +3245,7 @@ ff_bypass:
 
        bf = ath_take_txbuf(sc);
        if (bf == NULL) {
+               requeue = 1;
                goto hardstart_fail;
        }
 
@@ -3335,6 +3333,9 @@ hardstart_fail:
        /* Pass control of the skb to the caller (i.e., resources are their 
         * problem). */
        if (requeue) {
+               /* queue is full, let the kernel backlog the skb */
+               netif_stop_queue(dev);
+               sc->sc_devstopped = 1;
                /* Stop tracking again we are giving it back*/
                ieee80211_skb_untrack(skb);
                return NETDEV_TX_BUSY;