diff -Naur linux-2.6.16.orig/drivers/net/via-velocity.c linux-2.6.16/drivers/net/via-velocity.c --- linux-2.6.16.orig/drivers/net/via-velocity.c 2006-03-19 23:53:29.000000000 -0600 +++ linux-2.6.16/drivers/net/via-velocity.c 2006-06-12 12:10:33.000000000 -0500 @@ -98,6 +98,17 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"); +//#define HAILON +#ifdef HAILON +#define HAIL(S) \ + printk(KERN_NOTICE "%s\n", (S)); +#define HAILS(S,T) \ + printk(KERN_NOTICE "%s -> status=0x%x\n", (S), (T)); +#else +#define HAIL(S) +#define HAILS(S,T) +#endif + #define VELOCITY_PARAM(N,D) \ static int N[MAX_UNITS]=OPTION_DEFAULT;\ module_param_array(N, int, NULL, 0); \ @@ -505,13 +516,15 @@ struct mac_regs __iomem * regs = vptr->mac_regs; int i; + HAIL("velocity_rx_reset"); vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; /* * Init state, all RD entries belong to the NIC */ for (i = 0; i < vptr->options.numrx; ++i) - vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; + /* vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; BE */ + vptr->rd_ring[i].rdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */ writew(vptr->options.numrx, ®s->RBRDU); writel(vptr->rd_pool_dma, ®s->RDBaseLo); @@ -534,6 +547,7 @@ struct mac_regs __iomem * regs = vptr->mac_regs; int i, mii_status; + HAIL("velocity_init_registers"); mac_wol_reset(regs); switch (type) { @@ -607,7 +621,8 @@ vptr->int_mask = INT_MASK_DEF; - writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo); + /* writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo); BE */ + writel((vptr->rd_pool_dma), ®s->RDBaseLo); /* BE */ writew(vptr->options.numrx - 1, ®s->RDCSize); mac_rx_queue_run(regs); mac_rx_queue_wake(regs); @@ -615,7 +630,8 @@ writew(vptr->options.numtx - 1, ®s->TDCSize); for (i = 0; i < vptr->num_txq; i++) { - writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); + /* writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); BE */ + writel((vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); /* BE */ mac_tx_queue_run(regs, i); } @@ -656,6 +672,7 @@ struct mac_regs __iomem * regs = vptr->mac_regs; int i = 0; + HAIL("velocity_soft_reset"); writel(CR0_SFRST, ®s->CR0Set); for (i = 0; i < W_MAX_TIMEOUT; i++) { @@ -717,6 +734,7 @@ VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); + printk(KERN_INFO "(UNSTABLE! Big-endian hacks MJW June 11, 2006)\n"); /* BE */ first = 0; } @@ -931,6 +949,7 @@ dma_addr_t pool_dma; u8 *pool; + HAIL("velocity_init_rings"); /* * Allocate all RD/TD rings a single pool */ @@ -993,6 +1012,7 @@ static void velocity_free_rings(struct velocity_info *vptr) { int size; + HAIL("velocity_free_rings"); size = vptr->options.numrx * sizeof(struct rx_desc) + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; @@ -1009,6 +1029,7 @@ struct mac_regs __iomem *regs = vptr->mac_regs; int avail, dirty, unusable; + HAIL("velocity_give_many_rx_descs"); /* * RD number must be equal to 4X per hardware spec * (programming guide rev 1.20, p.13) @@ -1022,7 +1043,8 @@ dirty = vptr->rd_dirty - unusable; for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; - vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; + /* vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; BE */ + vptr->rd_ring[dirty].rdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */ } writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); @@ -1032,12 +1054,14 @@ static int velocity_rx_refill(struct velocity_info *vptr) { int dirty = vptr->rd_dirty, done = 0, ret = 0; + HAIL("velocity_rx_refill"); do { struct rx_desc *rd = vptr->rd_ring + dirty; /* Fine for an all zero Rx desc at init time as well */ - if (rd->rdesc0.owner == OWNED_BY_NIC) + /* if (rd->rdesc0.owner == OWNED_BY_NIC) BE */ + if (rd->rdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */ break; if (!vptr->rd_info[dirty].skb) { @@ -1072,6 +1096,7 @@ unsigned int rsize = sizeof(struct velocity_rd_info) * vptr->options.numrx; + HAIL("velocity_init_rd_ring"); vptr->rd_info = kmalloc(rsize, GFP_KERNEL); if(vptr->rd_info == NULL) goto out; @@ -1101,6 +1126,7 @@ { int i; + HAIL("velocity_free_rd_ring"); if (vptr->rd_info == NULL) return; @@ -1142,6 +1168,7 @@ unsigned int tsize = sizeof(struct velocity_td_info) * vptr->options.numtx; + HAIL("velocity_init_td_ring"); /* Init the TD ring entries */ for (j = 0; j < vptr->num_txq; j++) { curr = vptr->td_pool_dma[j]; @@ -1178,6 +1205,7 @@ struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); int i; + HAIL("velocity_free_rd_ring_entry"); if (td_info == NULL) return; @@ -1207,6 +1235,7 @@ { int i, j; + HAIL("velocity_free_td_ring"); for (j = 0; j < vptr->num_txq; j++) { if (vptr->td_infos[j] == NULL) continue; @@ -1234,34 +1263,42 @@ struct net_device_stats *stats = &vptr->stats; int rd_curr = vptr->rd_curr; int works = 0; + u16 wRSR; /* BE */ + HAILS("velocity_rx_srv", status); do { struct rx_desc *rd = vptr->rd_ring + rd_curr; if (!vptr->rd_info[rd_curr].skb) break; - if (rd->rdesc0.owner == OWNED_BY_NIC) + /* if (rd->rdesc0.owner == OWNED_BY_NIC) BE */ + if (rd->rdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */ break; rmb(); + wRSR = (u16)(cpu_to_le32(rd->rdesc0)); /* BE */ /* * Don't drop CE or RL error frame although RXOK is off */ - if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { + /* if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { BE */ + if ((wRSR & RSR_RXOK) || (!(wRSR & RSR_RXOK) && (wRSR & (RSR_CE | RSR_RL)))) { /* BE */ if (velocity_receive_frame(vptr, rd_curr) < 0) stats->rx_dropped++; } else { - if (rd->rdesc0.RSR & RSR_CRC) + /* if (rd->rdesc0.RSR & RSR_CRC) BE */ + if (wRSR & RSR_CRC) /* BE */ stats->rx_crc_errors++; - if (rd->rdesc0.RSR & RSR_FAE) + /* if (rd->rdesc0.RSR & RSR_FAE) BE */ + if (wRSR & RSR_FAE) /* BE */ stats->rx_frame_errors++; stats->rx_dropped++; } - rd->inten = 1; + /* rd->inten = 1; BE */ + rd->ltwo |= cpu_to_le32(BE_INT_ENABLE); /* BE */ vptr->dev->last_rx = jiffies; @@ -1292,13 +1329,21 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) { + u8 bCSM; + HAIL("velocity_rx_csum"); skb->ip_summed = CHECKSUM_NONE; - if (rd->rdesc1.CSM & CSM_IPKT) { + /* if (rd->rdesc1.CSM & CSM_IPKT) { if (rd->rdesc1.CSM & CSM_IPOK) { if ((rd->rdesc1.CSM & CSM_TCPKT) || (rd->rdesc1.CSM & CSM_UDPKT)) { - if (!(rd->rdesc1.CSM & CSM_TUPOK)) { + if (!(rd->rdesc1.CSM & CSM_TUPOK)) { BE */ + bCSM = (u8)(cpu_to_le32(rd->rdesc1) >> 16); /* BE */ + if (bCSM & CSM_IPKT) { + if (bCSM & CSM_IPOK) { + if ((bCSM & CSM_TCPKT) || + (bCSM & CSM_UDPKT)) { + if (!(bCSM & CSM_TUPOK)) { /* BE */ return; } } @@ -1324,9 +1369,11 @@ { int ret = -1; + HAIL("velocity_rx_copy"); if (pkt_size < rx_copybreak) { struct sk_buff *new_skb; + HAIL("velocity_rx_copy (working...)"); new_skb = dev_alloc_skb(pkt_size + 2); if (new_skb) { new_skb->dev = vptr->dev; @@ -1356,10 +1403,12 @@ static inline void velocity_iph_realign(struct velocity_info *vptr, struct sk_buff *skb, int pkt_size) { + HAIL("velocity_iph_realign"); /* FIXME - memmove ? */ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { int i; + HAIL("velocity_iph_realign (working...)"); for (i = pkt_size; i >= 0; i--) *(skb->data + i + 2) = *(skb->data + i); skb_reserve(skb, 2); @@ -1381,16 +1430,21 @@ struct net_device_stats *stats = &vptr->stats; struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); struct rx_desc *rd = &(vptr->rd_ring[idx]); - int pkt_len = rd->rdesc0.len; + /* int pkt_len = rd->rdesc0.len BE */; + u16 pkt_len = ((cpu_to_le32(rd->rdesc0) >> 16) & 0x00003FFFUL); /* BE */ + u16 wRSR = (u16)(cpu_to_le32(rd->rdesc0)); /* BE */ struct sk_buff *skb; - if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { + HAIL("velocity_receive_frame"); + /* if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { BE */ + if (wRSR & (RSR_STP | RSR_EDP)) { /* BE */ VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); stats->rx_length_errors++; return -EINVAL; } - if (rd->rdesc0.RSR & RSR_MAR) + /* if (rd->rdesc0.RSR & RSR_MAR) BE */ + if (wRSR & RSR_MAR) /* BE */ vptr->stats.multicast++; skb = rd_info->skb; @@ -1404,7 +1458,8 @@ */ if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { - if (rd->rdesc0.RSR & RSR_RL) { + /* if (rd->rdesc0.RSR & RSR_RL) { BE */ + if (wRSR & RSR_RL) { /* BE */ stats->rx_length_errors++; return -EINVAL; } @@ -1448,6 +1503,7 @@ struct rx_desc *rd = &(vptr->rd_ring[idx]); struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); + HAIL("velocity_alloc_rx_buf"); rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); if (rd_info->skb == NULL) return -ENOMEM; @@ -1465,10 +1521,14 @@ */ *((u32 *) & (rd->rdesc0)) = 0; - rd->len = cpu_to_le32(vptr->rx_buf_sz); - rd->inten = 1; + /* rd->len = cpu_to_le32(vptr->rx_buf_sz); BE */ + /* rd->inten = 1; BE */ rd->pa_low = cpu_to_le32(rd_info->skb_dma); - rd->pa_high = 0; + /* rd->pa_high = 0; BE */ + rd->ltwo &= cpu_to_le32(0xC000FFFFUL); /* BE */ + rd->ltwo |= cpu_to_le32((vptr->rx_buf_sz << 16)); /* BE */ + rd->ltwo |= cpu_to_le32(BE_INT_ENABLE); /* BE */ + rd->ltwo &= cpu_to_le32(0xFFFF0000UL); /* BE */ return 0; } @@ -1489,9 +1549,11 @@ int full = 0; int idx; int works = 0; + u16 wTSR; /* BE */ struct velocity_td_info *tdinfo; struct net_device_stats *stats = &vptr->stats; + HAILS("velocity_tx_srv", status); for (qnum = 0; qnum < vptr->num_txq; qnum++) { for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; idx = (idx + 1) % vptr->options.numtx) { @@ -1502,22 +1564,29 @@ td = &(vptr->td_rings[qnum][idx]); tdinfo = &(vptr->td_infos[qnum][idx]); - if (td->tdesc0.owner == OWNED_BY_NIC) + /* if (td->tdesc0.owner == OWNED_BY_NIC) BE */ + if (td->tdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */ break; if ((works++ > 15)) break; - if (td->tdesc0.TSR & TSR0_TERR) { + wTSR = (u16)cpu_to_le32(td->tdesc0); + /* if (td->tdesc0.TSR & TSR0_TERR) { BE */ + if (wTSR & TSR0_TERR) { /* BE */ stats->tx_errors++; stats->tx_dropped++; - if (td->tdesc0.TSR & TSR0_CDH) + /* if (td->tdesc0.TSR & TSR0_CDH) BE */ + if (wTSR & TSR0_CDH) /* BE */ stats->tx_heartbeat_errors++; - if (td->tdesc0.TSR & TSR0_CRS) + /* if (td->tdesc0.TSR & TSR0_CRS) BE */ + if (wTSR & TSR0_CRS) /* BE */ stats->tx_carrier_errors++; - if (td->tdesc0.TSR & TSR0_ABT) + /* if (td->tdesc0.TSR & TSR0_ABT) BE */ + if (wTSR & TSR0_ABT) /* BE */ stats->tx_aborted_errors++; - if (td->tdesc0.TSR & TSR0_OWC) + /* if (td->tdesc0.TSR & TSR0_OWC) BE */ + if (wTSR & TSR0_OWC) /* BE */ stats->tx_window_errors++; } else { stats->tx_packets++; @@ -1606,6 +1675,7 @@ static void velocity_error(struct velocity_info *vptr, int status) { + HAILS("velocity_error", status); if (status & ISR_TXSTLI) { struct mac_regs __iomem * regs = vptr->mac_regs; @@ -1693,6 +1763,7 @@ struct sk_buff *skb = tdinfo->skb; int i; + HAIL("velocity_free_tx_buf"); /* * Don't unmap the pre-allocated tx_bufs */ @@ -1896,6 +1967,7 @@ struct velocity_td_info *tdinfo; unsigned long flags; int index; + u32 lbufsz; /* BE */ int pktlen = skb->len; @@ -1905,9 +1977,18 @@ td_ptr = &(vptr->td_rings[qnum][index]); tdinfo = &(vptr->td_infos[qnum][index]); - td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; - td_ptr->tdesc1.TCR = TCR0_TIC; - td_ptr->td_buf[0].queue = 0; + td_ptr->tdesc0 = 0x00000000UL; /* BE */ + td_ptr->tdesc1 = 0x00000000UL; /* BE */ + + /* td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; BE */ + td_ptr->tdesc1 &= cpu_to_le32(0xfcffffffUL); /* BE */ + td_ptr->tdesc1 |= cpu_to_le32(((u32)TCPLS_NORMAL) << 24); /* BE */ + + /* td_ptr->tdesc1.TCR = TCR0_TIC; BE */ + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_TIC); /* BE */ + + /* td_ptr->td_buf[0].queue = 0; BE */ + td_ptr->td_buf[0].ltwo &= cpu_to_le32(~BE_QUEUE_ENABLE); /* BE */ /* * Pad short frames. @@ -1921,14 +2002,28 @@ memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); tdinfo->skb = skb; tdinfo->skb_dma[0] = tdinfo->buf_dma; - td_ptr->tdesc0.pktsize = pktlen; + /* td_ptr->tdesc0.pktsize = pktlen; BE */ + td_ptr->tdesc0 &= cpu_to_le32(0xc000ffffUL); /* BE */ + lbufsz = pktlen; /* Assign, and make sure it's unsigned 32 bits - BE */ + lbufsz = lbufsz << 16; /* BE - shift over */ + td_ptr->tdesc0 |= cpu_to_le32(lbufsz); /* BE */ td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); - td_ptr->td_buf[0].pa_high = 0; - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + /* td_ptr->td_buf[0].pa_high = 0; BE */ + /* td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; BE */ + td_ptr->td_buf[0].ltwo = cpu_to_le32(lbufsz); /* BE */ tdinfo->nskb_dma = 1; - td_ptr->tdesc1.CMDZ = 2; + /* td_ptr->tdesc1.CMDZ = 2; BE */ + td_ptr->tdesc1 &= cpu_to_le32(0x0fffffffUL); /* BE */ + td_ptr->tdesc1 |= cpu_to_le32(((u32)0x2) << 28); /* BE */ } else #ifdef VELOCITY_ZERO_COPY_SUPPORT + /* + * BE - NOTE on the VELOCITY_ZERO_COPY_SUPPORT: + * This block of code has NOT been patched up for BE support, as + * it is certainly broken -- if it compiles at all. Since the BE + * fixes depend on the broken code, attempts to convert to BE support + * would almost certainly confuse more than help. + */ if (skb_shinfo(skb)->nr_frags > 0) { int nfrags = skb_shinfo(skb)->nr_frags; tdinfo->skb = skb; @@ -1936,6 +2031,7 @@ skb_linearize(skb, GFP_ATOMIC); memcpy(tdinfo->buf, skb->data, skb->len); tdinfo->skb_dma[0] = tdinfo->buf_dma; + /* BE: Er, exactly what value are we assigning in this next line? */ td_ptr->tdesc0.pktsize = td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_high = 0; @@ -1952,6 +2048,7 @@ /* FIXME: support 48bit DMA later */ td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); td_ptr->td_buf[i].pa_high = 0; + /* BE: This next line can't be right: */ td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; for (i = 0; i < nfrags; i++) { @@ -1969,7 +2066,7 @@ } } else -#endif +#endif /* (broken) VELOCITY_ZERO_COPY_SUPPORT */ { /* * Map the linear network buffer into PCI space and @@ -1977,19 +2074,30 @@ */ tdinfo->skb = skb; tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); - td_ptr->tdesc0.pktsize = pktlen; + /* td_ptr->tdesc0.pktsize = pktlen; BE */ + td_ptr->tdesc0 &= cpu_to_le32(0xc000ffffUL); /* BE */ + lbufsz = pktlen; /* Assign, and make sure it's unsigned 32 bits - BE */ + lbufsz = lbufsz << 16; /* BE */ + td_ptr->tdesc0 |= cpu_to_le32(lbufsz); /* BE */ td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); - td_ptr->td_buf[0].pa_high = 0; - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + /* td_ptr->td_buf[0].pa_high = 0; BE */ + /* td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; BE */ + td_ptr->td_buf[0].ltwo = cpu_to_le32(lbufsz); /* BE */ + tdinfo->nskb_dma = 1; - td_ptr->tdesc1.CMDZ = 2; + /* td_ptr->tdesc1.CMDZ = 2; BE */ + td_ptr->tdesc1 &= cpu_to_le32(0x0fffffffUL); /* BE */ + td_ptr->tdesc1 |= cpu_to_le32(((u32)0x2) << 28);/* BE */ } if (vptr->flags & VELOCITY_FLAGS_TAGGING) { - td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); - td_ptr->tdesc1.pqinf.priority = 0; - td_ptr->tdesc1.pqinf.CFI = 0; - td_ptr->tdesc1.TCR |= TCR0_VETAG; + /* td_ptr->tdesc1.pqinf.priority = 0; BE */ + /* td_ptr->tdesc1.pqinf.CFI = 0; BE */ + td_ptr->tdesc1 &= cpu_to_le32(0xFFFF0000UL); /* BE */ + /* td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); BE */ + td_ptr->tdesc1 |= cpu_to_le32((vptr->options.vid & 0xfff)); /* BE */ + /* td_ptr->tdesc1.TCR |= TCR0_VETAG; BE */ + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_VETAG); /* BE */ } /* @@ -1999,26 +2107,36 @@ && (skb->ip_summed == CHECKSUM_HW)) { struct iphdr *ip = skb->nh.iph; if (ip->protocol == IPPROTO_TCP) - td_ptr->tdesc1.TCR |= TCR0_TCPCK; + /* td_ptr->tdesc1.TCR |= TCR0_TCPCK; BE */ + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_TCPCK); /* BE */ else if (ip->protocol == IPPROTO_UDP) - td_ptr->tdesc1.TCR |= (TCR0_UDPCK); - td_ptr->tdesc1.TCR |= TCR0_IPCK; - } + /* td_ptr->tdesc1.TCR |= (TCR0_UDPCK); BE */ + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_UDPCK); /* BE */ + /* td_ptr->tdesc1.TCR |= TCR0_IPCK; BE */ + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_IPCK); /* BE */ + } { int prev = index - 1; if (prev < 0) prev = vptr->options.numtx - 1; - td_ptr->tdesc0.owner = OWNED_BY_NIC; + /* td_ptr->tdesc0.owner = OWNED_BY_NIC; BE */ + td_ptr->tdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */ vptr->td_used[qnum]++; vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; if (AVAIL_TD(vptr, qnum) < 1) netif_stop_queue(dev); - td_ptr = &(vptr->td_rings[qnum][prev]); - td_ptr->td_buf[0].queue = 1; + td_ptr = &(vptr->td_rings[qnum][prev]); + /* td_ptr->td_buf[0].queue = 1; BE */ + td_ptr->td_buf[0].ltwo |= cpu_to_le32(BE_QUEUE_ENABLE); /* BE */ +#ifdef HAILON + printk(KERN_NOTICE "velocity_xmit: (%s) len=%d idx=%d tdesc0=0x%x tdesc1=0x%x ltwo=0x%x\n", + (pktlentdesc0, td_ptr->tdesc1, td_ptr->td_buf[0].ltwo); +#endif mac_tx_queue_wake(vptr->mac_regs, qnum); } dev->trans_start = jiffies; @@ -2045,7 +2163,7 @@ u32 isr_status; int max_count = 0; - + HAIL("velocity_intr"); spin_lock(&vptr->lock); isr_status = mac_read_isr(vptr->mac_regs); @@ -2064,7 +2182,10 @@ while (isr_status != 0) { mac_write_isr(vptr->mac_regs, isr_status); - if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) + HAILS("velocity_intr",isr_status); + /* MJW - velocity_error is ALWAYS called; need to mask off some other flags */ + /* if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) */ + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI | ISR_PTX0I | ISR_ISR0))) velocity_error(vptr, isr_status); if (isr_status & (ISR_PRXI | ISR_PPRXI)) max_count += velocity_rx_srv(vptr, isr_status); @@ -2102,6 +2223,7 @@ int i; struct dev_mc_list *mclist; + HAIL("velocity_set_multi"); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ /* Unconditionally log net taps. */ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); @@ -2147,6 +2269,7 @@ { struct velocity_info *vptr = dev->priv; + HAIL("net_device_stats"); /* If the hardware is down, don't touch MII */ if(!netif_running(dev)) return &vptr->stats; @@ -2191,6 +2314,7 @@ struct velocity_info *vptr = dev->priv; int ret; + HAIL("velocity_ioctl"); /* If we are asked for information and the device is power saving then we need to bring the device back up to talk to it */ @@ -2409,6 +2533,7 @@ { u16 ww; + HAIL("velocity_mii_read"); /* * Disable MIICR_MAUTO, so that mii addr can be set normally */ @@ -2445,6 +2570,7 @@ { u16 ww; + HAIL("velocity_mii_write"); /* * Disable MIICR_MAUTO, so that mii addr can be set normally */ diff -Naur linux-2.6.16.orig/drivers/net/via-velocity.h linux-2.6.16/drivers/net/via-velocity.h --- linux-2.6.16.orig/drivers/net/via-velocity.h 2006-03-19 23:53:29.000000000 -0600 +++ linux-2.6.16/drivers/net/via-velocity.h 2006-06-12 12:10:33.000000000 -0500 @@ -194,64 +194,70 @@ * Receive descriptor */ -struct rdesc0 { - u16 RSR; /* Receive status */ - u16 len:14; /* Received packet length */ - u16 reserved:1; - u16 owner:1; /* Who owns this buffer ? */ -}; - -struct rdesc1 { - u16 PQTAG; - u8 CSM; - u8 IPKT; -}; +//struct rdesc0 { +// u16 RSR; /* Receive status */ +// u16 len:14; /* Received packet length */ +// u16 reserved:1; +// u16 owner:1; /* Who owns this buffer ? */ +//}; + +//struct rdesc1 { +// u16 PQTAG; +// u8 CSM; +// u8 IPKT; +//}; struct rx_desc { - struct rdesc0 rdesc0; - struct rdesc1 rdesc1; +// struct rdesc0 rdesc0; +// struct rdesc1 rdesc1; + u32 rdesc0; + u32 rdesc1; u32 pa_low; /* Low 32 bit PCI address */ - u16 pa_high; /* Next 16 bit PCI address (48 total) */ - u16 len:15; /* Frame size */ - u16 inten:1; /* Enable interrupt */ +// u16 pa_high; /* Next 16 bit PCI address (48 total) */ +// u16 len:15; /* Frame size */ +// u16 inten:1; /* Enable interrupt */ + u32 ltwo; } __attribute__ ((__packed__)); /* * Transmit descriptor */ -struct tdesc0 { - u16 TSR; /* Transmit status register */ - u16 pktsize:14; /* Size of frame */ - u16 reserved:1; - u16 owner:1; /* Who owns the buffer */ -}; - -struct pqinf { /* Priority queue info */ - u16 VID:12; - u16 CFI:1; - u16 priority:3; -} __attribute__ ((__packed__)); - -struct tdesc1 { - struct pqinf pqinf; - u8 TCR; - u8 TCPLS:2; - u8 reserved:2; - u8 CMDZ:4; -} __attribute__ ((__packed__)); +//struct tdesc0 { +// u16 TSR; /* Transmit status register */ +// u16 pktsize:14; /* Size of frame */ +// u16 reserved:1; +// u16 owner:1; /* Who owns the buffer */ +//}; + +//struct pqinf { /* Priority queue info */ +// u16 VID:12; +// u16 CFI:1; +// u16 priority:3; +//} __attribute__ ((__packed__)); + +//struct tdesc1 { +// struct pqinf pqinf; +// u8 TCR; +// u8 TCPLS:2; +// u8 reserved:2; +// u8 CMDZ:4; +//} __attribute__ ((__packed__)); struct td_buf { u32 pa_low; - u16 pa_high; - u16 bufsize:14; - u16 reserved:1; - u16 queue:1; +// u16 pa_high; +// u16 bufsize:14; +// u16 reserved:1; +// u16 queue:1; + u32 ltwo; } __attribute__ ((__packed__)); struct tx_desc { - struct tdesc0 tdesc0; - struct tdesc1 tdesc1; +// struct tdesc0 tdesc0; +// struct tdesc1 tdesc1; + u32 tdesc0; + u32 tdesc1; struct td_buf td_buf[7]; }; @@ -296,6 +302,16 @@ OWNED_BY_NIC = 1 }; +/* Constants added for the BE fixes */ +#define BE_OWNED_BY_NIC 0x80000000UL +#define BE_INT_ENABLE 0x80000000UL +#define BE_QUEUE_ENABLE 0x80000000UL +#define BE_TCR_TIC 0x00800000UL +#define BE_TCR_VETAG 0x00200000UL +#define BE_TCR_TCPCK 0x00040000UL +#define BE_TCR_UDPCK 0x00080000UL +#define BE_TCR_IPCK 0x00100000UL + /* * MAC registers and macros.