if (txq->axq_depth > TAIL_DROP_COUNT) {
sc->sc_stats.ast_tx_discard++;
- /* queue is full, let the kernel backlog the skb */
- netif_stop_queue(dev);
requeue = 1;
-
goto hardstart_fail;
}
#endif
skb = skb_copy(skb, GFP_ATOMIC);
if (skb == NULL) {
requeue = 1;
- netif_stop_queue(dev);
goto hardstart_fail;
}
/* If the clone works, bump the reference count for our copy. */
bf = ath_take_txbuf(sc);
if (bf == NULL) {
ATH_TXQ_UNLOCK_IRQ_EARLY(txq);
+ requeue = 1;
goto hardstart_fail;
}
DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
}
bf = ath_take_txbuf(sc);
if (bf == NULL) {
- netif_stop_queue(dev);
requeue = 1;
goto hardstart_fail;
}
bf = ath_take_txbuf(sc);
if (bf == NULL) {
ATH_TXQ_UNLOCK_IRQ_EARLY(txq);
+ requeue = 1;
goto hardstart_fail;
}
}
bf = ath_take_txbuf(sc);
if (bf == NULL) {
+ requeue = 1;
goto hardstart_fail;
}
/* Pass control of the skb to the caller (i.e., resources are their
* problem). */
if (requeue) {
+ /* queue is full, let the kernel backlog the skb */
+ netif_stop_queue(dev);
+ sc->sc_devstopped = 1;
/* Stop tracking again we are giving it back*/
ieee80211_skb_untrack(skb);
return NETDEV_TX_BUSY;