============================================================================== First attempt: /* hard_start_xmit for devices that don't have any bugs and * support TG3_FLG2_HW_TSO_2 only. */ static int tg3_start_xmit(PacketBuf_s *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); dma_addr_t mapping; u32 len, entry, base_flags, mss; kerndbg( KERN_DEBUG, "%s(): ENTER\n", __FUNCTION__ ); #if 0 len = skb_headlen(skb); #else len = skb->pb_nSize; #endif entry = tp->tx_prod; base_flags = 0; mss = 0; /* Queue skb data, a.k.a. the main skb fragment. */ mapping = pci_map_single(tp->pdev, skb->pb_pData, len, PCI_DMA_TODEVICE); tp->tx_buffers[entry].skb = skb; //pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); tg3_set_txd(tp, entry, mapping, len, base_flags, (skb->pb_psNext == NULL) | (mss << 1)); entry = NEXT_TX(entry); #if 0 /* Now loop through additional data fragments, and queue them. */ if (skb_shinfo(skb)->nr_frags > 0) { unsigned int i, last; last = skb_shinfo(skb)->nr_frags - 1; for (i = 0; i <= last; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = frag->size; mapping = pci_map_page(tp->pdev, frag->page, frag->page_offset, len, PCI_DMA_TODEVICE); tp->tx_buffers[entry].skb = NULL; pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); tg3_set_txd(tp, entry, mapping, len, base_flags, (i == last) | (mss << 1)); entry = NEXT_TX(entry); } } #endif /* Packets are ready, update Tx producer idx local and on card. */ tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); #if 0 tp->tx_prod = entry; if (tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1)) { netif_stop_queue(dev); if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); } #endif out_unlock: smp_wmb(); dev->trans_start = jiffies; return 0; } ============================================================================== Second attempt: /* hard_start_xmit for devices that don't have any bugs and * support TG3_FLG2_HW_TSO_2 only. */ static int tg3_start_xmit(PacketBuf_s *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); dma_addr_t mapping; u32 len, entry, base_flags, mss; kerndbg( KERN_DEBUG, "%s(): ENTER\n", __FUNCTION__ ); #if 0 len = skb_headlen(skb); #else len = skb->pb_nSize; #endif entry = tp->tx_prod; base_flags = 0; mss = 0; /* Queue skb data, a.k.a. the main skb fragment. */ mapping = pci_map_single(tp->pdev, skb->pb_pData, len, PCI_DMA_TODEVICE); tp->tx_buffers[entry].skb = skb; #if 0 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); tg3_set_txd(tp, entry, mapping, len, base_flags, (skb->pb_psNext == NULL) | (mss << 1)); #else tg3_set_txd(tp, entry, skb->pb_pData, len, base_flags, 1 | (mss << 1)); #endif entry = NEXT_TX(entry); #if 0 /* Now loop through additional data fragments, and queue them. */ if (skb_shinfo(skb)->nr_frags > 0) { unsigned int i, last; last = skb_shinfo(skb)->nr_frags - 1; for (i = 0; i <= last; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = frag->size; mapping = pci_map_page(tp->pdev, frag->page, frag->page_offset, len, PCI_DMA_TODEVICE); tp->tx_buffers[entry].skb = NULL; pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); tg3_set_txd(tp, entry, mapping, len, base_flags, (i == last) | (mss << 1)); entry = NEXT_TX(entry); } } #endif /* Packets are ready, update Tx producer idx local and on card. */ tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; #if 0 if (tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1)) { netif_stop_queue(dev); if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); } #endif out_unlock: smp_wmb(); dev->trans_start = jiffies; return 0; } ============================================================================== Final version: /* hard_start_xmit for devices that don't have any bugs and * support TG3_FLG2_HW_TSO_2 only. */ static int tg3_start_xmit(PacketBuf_s *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry; kerndbg( KERN_DEBUG, "%s(): ENTER\n", __FUNCTION__ ); len = skb->pb_nSize; entry = tp->tx_prod; tp->tx_buffers[entry].skb = skb; tg3_set_txd(tp, entry, (dma_addr_t)skb->pb_pData, len, 0, 1 ); entry = NEXT_TX(entry); /* Packets are ready, update Tx producer idx local and on card. */ tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; out_unlock: smp_wmb(); dev->trans_start = jiffies; return 0; }