Linux kernel & device driver programming

Cross-Referenced Linux and Device Driver Code

[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ]
Version: [ 2.6.11.8 ] [ 2.6.25 ] [ 2.6.25.8 ] [ 2.6.31.13 ] Architecture: [ i386 ]
  1 /*
  2  * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
  3  *
  4  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  5  */
  6 
  7 #undef DEBUG
  8 
  9 #include <linux/kernel.h>
 10 #include <linux/module.h>
 11 #include <linux/errno.h>
 12 #include <linux/init.h>
 13 #include <linux/types.h>
 14 #include <linux/interrupt.h>
 15 #include <linux/string.h>
 16 #include <linux/delay.h>
 17 #include <linux/netdevice.h>
 18 #include <linux/platform_device.h>
 19 #include <linux/etherdevice.h>
 20 #include <linux/skbuff.h>
 21 
 22 #include <asm/sgi/hpc3.h>
 23 #include <asm/sgi/ip22.h>
 24 #include <asm/sgi/seeq.h>
 25 
 26 #include "sgiseeq.h"
 27 
 28 static char *sgiseeqstr = "SGI Seeq8003";
 29 
 30 /*
 31  * If you want speed, you do something silly, it always has worked for me.  So,
 32  * with that in mind, I've decided to make this driver look completely like a
 33  * stupid Lance from a driver architecture perspective.  Only difference is that
 34  * here our "ring buffer" looks and acts like a real Lance one does but is
 35  * layed out like how the HPC DMA and the Seeq want it to.  You'd be surprised
 36  * how a stupid idea like this can pay off in performance, not to mention
 37  * making this driver 2,000 times easier to write. ;-)
 38  */
 39 
 40 /* Tune these if we tend to run out often etc. */
 41 #define SEEQ_RX_BUFFERS  16
 42 #define SEEQ_TX_BUFFERS  16
 43 
 44 #define PKT_BUF_SZ       1584
 45 
 46 #define NEXT_RX(i)  (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
 47 #define NEXT_TX(i)  (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
 48 #define PREV_RX(i)  (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
 49 #define PREV_TX(i)  (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
 50 
 51 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
 52                             sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
 53                             sp->tx_old - sp->tx_new - 1)
 54 
 55 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma +                                 \
 56                                   (dma_addr_t)((unsigned long)(v) -            \
 57                                                (unsigned long)((sp)->rx_desc)))
 58 
 59 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
 60  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
 61  */
 62 static int rx_copybreak = 100;
 63 
 64 #define PAD_SIZE    (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
 65 
 66 struct sgiseeq_rx_desc {
 67         volatile struct hpc_dma_desc rdma;
 68         u8 padding[PAD_SIZE];
 69         struct sk_buff *skb;
 70 };
 71 
 72 struct sgiseeq_tx_desc {
 73         volatile struct hpc_dma_desc tdma;
 74         u8 padding[PAD_SIZE];
 75         struct sk_buff *skb;
 76 };
 77 
 78 /*
 79  * Warning: This structure is layed out in a certain way because HPC dma
 80  *          descriptors must be 8-byte aligned.  So don't touch this without
 81  *          some care.
 82  */
 83 struct sgiseeq_init_block { /* Note the name ;-) */
 84         struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
 85         struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
 86 };
 87 
 88 struct sgiseeq_private {
 89         struct sgiseeq_init_block *srings;
 90         dma_addr_t srings_dma;
 91 
 92         /* Ptrs to the descriptors in uncached space. */
 93         struct sgiseeq_rx_desc *rx_desc;
 94         struct sgiseeq_tx_desc *tx_desc;
 95 
 96         char *name;
 97         struct hpc3_ethregs *hregs;
 98         struct sgiseeq_regs *sregs;
 99 
100         /* Ring entry counters. */
101         unsigned int rx_new, tx_new;
102         unsigned int rx_old, tx_old;
103 
104         int is_edlc;
105         unsigned char control;
106         unsigned char mode;
107 
108         spinlock_t tx_lock;
109 };
110 
111 static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
112 {
113         dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
114                        DMA_FROM_DEVICE);
115 }
116 
117 static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
118 {
119         dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
120                        DMA_TO_DEVICE);
121 }
122 
123 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
124 {
125         hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
126         udelay(20);
127         hregs->reset = 0;
128 }
129 
130 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
131                                        struct sgiseeq_regs *sregs)
132 {
133         hregs->rx_ctrl = hregs->tx_ctrl = 0;
134         hpc3_eth_reset(hregs);
135 }
136 
137 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
138                        SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
139 
140 static inline void seeq_go(struct sgiseeq_private *sp,
141                            struct hpc3_ethregs *hregs,
142                            struct sgiseeq_regs *sregs)
143 {
144         sregs->rstat = sp->mode | RSTAT_GO_BITS;
145         hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
146 }
147 
148 static inline void __sgiseeq_set_mac_address(struct net_device *dev)
149 {
150         struct sgiseeq_private *sp = netdev_priv(dev);
151         struct sgiseeq_regs *sregs = sp->sregs;
152         int i;
153 
154         sregs->tstat = SEEQ_TCMD_RB0;
155         for (i = 0; i < 6; i++)
156                 sregs->rw.eth_addr[i] = dev->dev_addr[i];
157 }
158 
159 static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
160 {
161         struct sgiseeq_private *sp = netdev_priv(dev);
162         struct sockaddr *sa = addr;
163 
164         memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
165 
166         spin_lock_irq(&sp->tx_lock);
167         __sgiseeq_set_mac_address(dev);
168         spin_unlock_irq(&sp->tx_lock);
169 
170         return 0;
171 }
172 
173 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
174 #define RCNTCFG_INIT  (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
175 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
176 
177 static int seeq_init_ring(struct net_device *dev)
178 {
179         struct sgiseeq_private *sp = netdev_priv(dev);
180         int i;
181 
182         netif_stop_queue(dev);
183         sp->rx_new = sp->tx_new = 0;
184         sp->rx_old = sp->tx_old = 0;
185 
186         __sgiseeq_set_mac_address(dev);
187 
188         /* Setup tx ring. */
189         for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
190                 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
191                 dma_sync_desc_dev(dev, &sp->tx_desc[i]);
192         }
193 
194         /* And now the rx ring. */
195         for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
196                 if (!sp->rx_desc[i].skb) {
197                         dma_addr_t dma_addr;
198                         struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
199 
200                         if (skb == NULL)
201                                 return -ENOMEM;
202                         skb_reserve(skb, 2);
203                         dma_addr = dma_map_single(dev->dev.parent,
204                                                   skb->data - 2,
205                                                   PKT_BUF_SZ, DMA_FROM_DEVICE);
206                         sp->rx_desc[i].skb = skb;
207                         sp->rx_desc[i].rdma.pbuf = dma_addr;
208                 }
209                 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
210                 dma_sync_desc_dev(dev, &sp->rx_desc[i]);
211         }
212         sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
213         dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
214         return 0;
215 }
216 
217 static void seeq_purge_ring(struct net_device *dev)
218 {
219         struct sgiseeq_private *sp = netdev_priv(dev);
220         int i;
221 
222         /* clear tx ring. */
223         for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
224                 if (sp->tx_desc[i].skb) {
225                         dev_kfree_skb(sp->tx_desc[i].skb);
226                         sp->tx_desc[i].skb = NULL;
227                 }
228         }
229 
230         /* And now the rx ring. */
231         for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
232                 if (sp->rx_desc[i].skb) {
233                         dev_kfree_skb(sp->rx_desc[i].skb);
234                         sp->rx_desc[i].skb = NULL;
235                 }
236         }
237 }
238 
239 #ifdef DEBUG
240 static struct sgiseeq_private *gpriv;
241 static struct net_device *gdev;
242 
243 static void sgiseeq_dump_rings(void)
244 {
245         static int once;
246         struct sgiseeq_rx_desc *r = gpriv->rx_desc;
247         struct sgiseeq_tx_desc *t = gpriv->tx_desc;
248         struct hpc3_ethregs *hregs = gpriv->hregs;
249         int i;
250 
251         if (once)
252                 return;
253         once++;
254         printk("RING DUMP:\n");
255         for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
256                 printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
257                        i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
258                        r[i].rdma.pnext);
259                 i += 1;
260                 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
261                        i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
262                        r[i].rdma.pnext);
263         }
264         for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
265                 printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
266                        i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
267                        t[i].tdma.pnext);
268                 i += 1;
269                 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
270                        i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
271                        t[i].tdma.pnext);
272         }
273         printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
274                gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
275         printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
276                hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
277         printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
278                hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
279 }
280 #endif
281 
282 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
283 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
284 
285 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
286                      struct sgiseeq_regs *sregs)
287 {
288         struct hpc3_ethregs *hregs = sp->hregs;
289         int err;
290 
291         reset_hpc3_and_seeq(hregs, sregs);
292         err = seeq_init_ring(dev);
293         if (err)
294                 return err;
295 
296         /* Setup to field the proper interrupt types. */
297         if (sp->is_edlc) {
298                 sregs->tstat = TSTAT_INIT_EDLC;
299                 sregs->rw.wregs.control = sp->control;
300                 sregs->rw.wregs.frame_gap = 0;
301         } else {
302                 sregs->tstat = TSTAT_INIT_SEEQ;
303         }
304 
305         hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
306         hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
307 
308         seeq_go(sp, hregs, sregs);
309         return 0;
310 }
311 
312 static void record_rx_errors(struct net_device *dev, unsigned char status)
313 {
314         if (status & SEEQ_RSTAT_OVERF ||
315             status & SEEQ_RSTAT_SFRAME)
316                 dev->stats.rx_over_errors++;
317         if (status & SEEQ_RSTAT_CERROR)
318                 dev->stats.rx_crc_errors++;
319         if (status & SEEQ_RSTAT_DERROR)
320                 dev->stats.rx_frame_errors++;
321         if (status & SEEQ_RSTAT_REOF)
322                 dev->stats.rx_errors++;
323 }
324 
325 static inline void rx_maybe_restart(struct sgiseeq_private *sp,
326                                     struct hpc3_ethregs *hregs,
327                                     struct sgiseeq_regs *sregs)
328 {
329         if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
330                 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
331                 seeq_go(sp, hregs, sregs);
332         }
333 }
334 
335 static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
336                               struct hpc3_ethregs *hregs,
337                               struct sgiseeq_regs *sregs)
338 {
339         struct sgiseeq_rx_desc *rd;
340         struct sk_buff *skb = NULL;
341         struct sk_buff *newskb;
342         unsigned char pkt_status;
343         int len = 0;
344         unsigned int orig_end = PREV_RX(sp->rx_new);
345 
346         /* Service every received packet. */
347         rd = &sp->rx_desc[sp->rx_new];
348         dma_sync_desc_cpu(dev, rd);
349         while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
350                 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
351                 dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
352                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
353                 pkt_status = rd->skb->data[len];
354                 if (pkt_status & SEEQ_RSTAT_FIG) {
355                         /* Packet is OK. */
356                         /* We don't want to receive our own packets */
357                         if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
358                                 if (len > rx_copybreak) {
359                                         skb = rd->skb;
360                                         newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
361                                         if (!newskb) {
362                                                 newskb = skb;
363                                                 skb = NULL;
364                                                 goto memory_squeeze;
365                                         }
366                                         skb_reserve(newskb, 2);
367                                 } else {
368                                         skb = netdev_alloc_skb(dev, len + 2);
369                                         if (skb) {
370                                                 skb_reserve(skb, 2);
371                                                 skb_copy_to_linear_data(skb, rd->skb->data, len);
372                                         }
373                                         newskb = rd->skb;
374                                 }
375 memory_squeeze:
376                                 if (skb) {
377                                         skb_put(skb, len);
378                                         skb->protocol = eth_type_trans(skb, dev);
379                                         netif_rx(skb);
380                                         dev->last_rx = jiffies;
381                                         dev->stats.rx_packets++;
382                                         dev->stats.rx_bytes += len;
383                                 } else {
384                                         printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
385                                                 dev->name);
386                                         dev->stats.rx_dropped++;
387                                 }
388                         } else {
389                                 /* Silently drop my own packets */
390                                 newskb = rd->skb;
391                         }
392                 } else {
393                         record_rx_errors(dev, pkt_status);
394                         newskb = rd->skb;
395                 }
396                 rd->skb = newskb;
397                 rd->rdma.pbuf = dma_map_single(dev->dev.parent,
398                                                newskb->data - 2,
399                                                PKT_BUF_SZ, DMA_FROM_DEVICE);
400 
401                 /* Return the entry to the ring pool. */
402                 rd->rdma.cntinfo = RCNTINFO_INIT;
403                 sp->rx_new = NEXT_RX(sp->rx_new);
404                 dma_sync_desc_dev(dev, rd);
405                 rd = &sp->rx_desc[sp->rx_new];
406                 dma_sync_desc_cpu(dev, rd);
407         }
408         dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
409         sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
410         dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
411         dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
412         sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
413         dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
414         rx_maybe_restart(sp, hregs, sregs);
415 }
416 
417 static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
418                                              struct sgiseeq_regs *sregs)
419 {
420         if (sp->is_edlc) {
421                 sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
422                 sregs->rw.wregs.control = sp->control;
423         }
424 }
425 
426 static inline void kick_tx(struct net_device *dev,
427                            struct sgiseeq_private *sp,
428                            struct hpc3_ethregs *hregs)
429 {
430         struct sgiseeq_tx_desc *td;
431         int i = sp->tx_old;
432 
433         /* If the HPC aint doin nothin, and there are more packets
434          * with ETXD cleared and XIU set we must make very certain
435          * that we restart the HPC else we risk locking up the
436          * adapter.  The following code is only safe iff the HPCDMA
437          * is not active!
438          */
439         td = &sp->tx_desc[i];
440         dma_sync_desc_cpu(dev, td);
441         while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
442               (HPCDMA_XIU | HPCDMA_ETXD)) {
443                 i = NEXT_TX(i);
444                 td = &sp->tx_desc[i];
445                 dma_sync_desc_cpu(dev, td);
446         }
447         if (td->tdma.cntinfo & HPCDMA_XIU) {
448                 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
449                 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
450         }
451 }
452 
453 static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
454                               struct hpc3_ethregs *hregs,
455                               struct sgiseeq_regs *sregs)
456 {
457         struct sgiseeq_tx_desc *td;
458         unsigned long status = hregs->tx_ctrl;
459         int j;
460 
461         tx_maybe_reset_collisions(sp, sregs);
462 
463         if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
464                 /* Oops, HPC detected some sort of error. */
465                 if (status & SEEQ_TSTAT_R16)
466                         dev->stats.tx_aborted_errors++;
467                 if (status & SEEQ_TSTAT_UFLOW)
468                         dev->stats.tx_fifo_errors++;
469                 if (status & SEEQ_TSTAT_LCLS)
470                         dev->stats.collisions++;
471         }
472 
473         /* Ack 'em... */
474         for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
475                 td = &sp->tx_desc[j];
476 
477                 dma_sync_desc_cpu(dev, td);
478                 if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
479                         break;
480                 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
481                         if (!(status & HPC3_ETXCTRL_ACTIVE)) {
482                                 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
483                                 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
484                         }
485                         break;
486                 }
487                 dev->stats.tx_packets++;
488                 sp->tx_old = NEXT_TX(sp->tx_old);
489                 td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
490                 td->tdma.cntinfo |= HPCDMA_EOX;
491                 if (td->skb) {
492                         dev_kfree_skb_any(td->skb);
493                         td->skb = NULL;
494                 }
495                 dma_sync_desc_dev(dev, td);
496         }
497 }
498 
499 static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
500 {
501         struct net_device *dev = (struct net_device *) dev_id;
502         struct sgiseeq_private *sp = netdev_priv(dev);
503         struct hpc3_ethregs *hregs = sp->hregs;
504         struct sgiseeq_regs *sregs = sp->sregs;
505 
506         spin_lock(&sp->tx_lock);
507 
508         /* Ack the IRQ and set software state. */
509         hregs->reset = HPC3_ERST_CLRIRQ;
510 
511         /* Always check for received packets. */
512         sgiseeq_rx(dev, sp, hregs, sregs);
513 
514         /* Only check for tx acks if we have something queued. */
515         if (sp->tx_old != sp->tx_new)
516                 sgiseeq_tx(dev, sp, hregs, sregs);
517 
518         if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
519                 netif_wake_queue(dev);
520         }
521         spin_unlock(&sp->tx_lock);
522 
523         return IRQ_HANDLED;
524 }
525 
526 static int sgiseeq_open(struct net_device *dev)
527 {
528         struct sgiseeq_private *sp = netdev_priv(dev);
529         struct sgiseeq_regs *sregs = sp->sregs;
530         unsigned int irq = dev->irq;
531         int err;
532 
533         if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
534                 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
535                 err = -EAGAIN;
536         }
537 
538         err = init_seeq(dev, sp, sregs);
539         if (err)
540                 goto out_free_irq;
541 
542         netif_start_queue(dev);
543 
544         return 0;
545 
546 out_free_irq:
547         free_irq(irq, dev);
548 
549         return err;
550 }
551 
552 static int sgiseeq_close(struct net_device *dev)
553 {
554         struct sgiseeq_private *sp = netdev_priv(dev);
555         struct sgiseeq_regs *sregs = sp->sregs;
556         unsigned int irq = dev->irq;
557 
558         netif_stop_queue(dev);
559 
560         /* Shutdown the Seeq. */
561         reset_hpc3_and_seeq(sp->hregs, sregs);
562         free_irq(irq, dev);
563         seeq_purge_ring(dev);
564 
565         return 0;
566 }
567 
568 static inline int sgiseeq_reset(struct net_device *dev)
569 {
570         struct sgiseeq_private *sp = netdev_priv(dev);
571         struct sgiseeq_regs *sregs = sp->sregs;
572         int err;
573 
574         err = init_seeq(dev, sp, sregs);
575         if (err)
576                 return err;
577 
578         dev->trans_start = jiffies;
579         netif_wake_queue(dev);
580 
581         return 0;
582 }
583 
584 static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
585 {
586         struct sgiseeq_private *sp = netdev_priv(dev);
587         struct hpc3_ethregs *hregs = sp->hregs;
588         unsigned long flags;
589         struct sgiseeq_tx_desc *td;
590         int len, entry;
591 
592         spin_lock_irqsave(&sp->tx_lock, flags);
593 
594         /* Setup... */
595         len = skb->len;
596         if (len < ETH_ZLEN) {
597                 if (skb_padto(skb, ETH_ZLEN))
598                         return 0;
599                 len = ETH_ZLEN;
600         }
601 
602         dev->stats.tx_bytes += len;
603         entry = sp->tx_new;
604         td = &sp->tx_desc[entry];
605         dma_sync_desc_cpu(dev, td);
606 
607         /* Create entry.  There are so many races with adding a new
608          * descriptor to the chain:
609          * 1) Assume that the HPC is off processing a DMA chain while
610          *    we are changing all of the following.
611          * 2) Do no allow the HPC to look at a new descriptor until
612          *    we have completely set up it's state.  This means, do
613          *    not clear HPCDMA_EOX in the current last descritptor
614          *    until the one we are adding looks consistent and could
615          *    be processes right now.
616          * 3) The tx interrupt code must notice when we've added a new
617          *    entry and the HPC got to the end of the chain before we
618          *    added this new entry and restarted it.
619          */
620         td->skb = skb;
621         td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
622                                        len, DMA_TO_DEVICE);
623         td->tdma.cntinfo = (len & HPCDMA_BCNT) |
624                            HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
625         dma_sync_desc_dev(dev, td);
626         if (sp->tx_old != sp->tx_new) {
627                 struct sgiseeq_tx_desc *backend;
628 
629                 backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
630                 dma_sync_desc_cpu(dev, backend);
631                 backend->tdma.cntinfo &= ~HPCDMA_EOX;
632                 dma_sync_desc_dev(dev, backend);
633         }
634         sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
635 
636         /* Maybe kick the HPC back into motion. */
637         if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
638                 kick_tx(dev, sp, hregs);
639 
640         dev->trans_start = jiffies;
641 
642         if (!TX_BUFFS_AVAIL(sp))
643                 netif_stop_queue(dev);
644         spin_unlock_irqrestore(&sp->tx_lock, flags);
645 
646         return 0;
647 }
648 
649 static void timeout(struct net_device *dev)
650 {
651         printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
652         sgiseeq_reset(dev);
653 
654         dev->trans_start = jiffies;
655         netif_wake_queue(dev);
656 }
657 
658 static void sgiseeq_set_multicast(struct net_device *dev)
659 {
660         struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv;
661         unsigned char oldmode = sp->mode;
662 
663         if(dev->flags & IFF_PROMISC)
664                 sp->mode = SEEQ_RCMD_RANY;
665         else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
666                 sp->mode = SEEQ_RCMD_RBMCAST;
667         else
668                 sp->mode = SEEQ_RCMD_RBCAST;
669 
670         /* XXX I know this sucks, but is there a better way to reprogram
671          * XXX the receiver? At least, this shouldn't happen too often.
672          */
673 
674         if (oldmode != sp->mode)
675                 sgiseeq_reset(dev);
676 }
677 
678 static inline void setup_tx_ring(struct net_device *dev,
679                                  struct sgiseeq_tx_desc *buf,
680                                  int nbufs)
681 {
682         struct sgiseeq_private *sp = netdev_priv(dev);
683         int i = 0;
684 
685         while (i < (nbufs - 1)) {
686                 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
687                 buf[i].tdma.pbuf = 0;
688                 dma_sync_desc_dev(dev, &buf[i]);
689                 i++;
690         }
691         buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
692         dma_sync_desc_dev(dev, &buf[i]);
693 }
694 
695 static inline void setup_rx_ring(struct net_device *dev,
696                                  struct sgiseeq_rx_desc *buf,
697                                  int nbufs)
698 {
699         struct sgiseeq_private *sp = netdev_priv(dev);
700         int i = 0;
701 
702         while (i < (nbufs - 1)) {
703                 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
704                 buf[i].rdma.pbuf = 0;
705                 dma_sync_desc_dev(dev, &buf[i]);
706                 i++;
707         }
708         buf[i].rdma.pbuf = 0;
709         buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
710         dma_sync_desc_dev(dev, &buf[i]);
711 }
712 
713 static int __init sgiseeq_probe(struct platform_device *pdev)
714 {
715         struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
716         struct hpc3_regs *hpcregs = pd->hpc;
717         struct sgiseeq_init_block *sr;
718         unsigned int irq = pd->irq;
719         struct sgiseeq_private *sp;
720         struct net_device *dev;
721         int err;
722         DECLARE_MAC_BUF(mac);
723 
724         dev = alloc_etherdev(sizeof (struct sgiseeq_private));
725         if (!dev) {
726                 printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n");
727                 err = -ENOMEM;
728                 goto err_out;
729         }
730 
731         platform_set_drvdata(pdev, dev);
732         sp = netdev_priv(dev);
733 
734         /* Make private data page aligned */
735         sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
736                                 &sp->srings_dma, GFP_KERNEL);
737         if (!sr) {
738                 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
739                 err = -ENOMEM;
740                 goto err_out_free_dev;
741         }
742         sp->srings = sr;
743         sp->rx_desc = sp->srings->rxvector;
744         sp->tx_desc = sp->srings->txvector;
745 
746         /* A couple calculations now, saves many cycles later. */
747         setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
748         setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
749 
750         memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
751 
752 #ifdef DEBUG
753         gpriv = sp;
754         gdev = dev;
755 #endif
756         sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
757         sp->hregs = &hpcregs->ethregs;
758         sp->name = sgiseeqstr;
759         sp->mode = SEEQ_RCMD_RBCAST;
760 
761         /* Setup PIO and DMA transfer timing */
762         sp->hregs->pconfig = 0x161;
763         sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
764                              HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
765 
766         /* Setup PIO and DMA transfer timing */
767         sp->hregs->pconfig = 0x161;
768         sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
769                              HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
770 
771         /* Reset the chip. */
772         hpc3_eth_reset(sp->hregs);
773 
774         sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
775         if (sp->is_edlc)
776                 sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
777                               SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
778                               SEEQ_CTRL_ENCARR;
779 
780         dev->open               = sgiseeq_open;
781         dev->stop               = sgiseeq_close;
782         dev->hard_start_xmit    = sgiseeq_start_xmit;
783         dev->tx_timeout         = timeout;
784         dev->watchdog_timeo     = (200 * HZ) / 1000;
785         dev->set_multicast_list = sgiseeq_set_multicast;
786         dev->set_mac_address    = sgiseeq_set_mac_address;
787         dev->irq                = irq;
788 
789         if (register_netdev(dev)) {
790                 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
791                        "aborting.\n");
792                 err = -ENODEV;
793                 goto err_out_free_page;
794         }
795 
796         printk(KERN_INFO "%s: %s %s\n",
797                dev->name, sgiseeqstr, print_mac(mac, dev->dev_addr));
798 
799         return 0;
800 
801 err_out_free_page:
802         free_page((unsigned long) sp->srings);
803 err_out_free_dev:
804         kfree(dev);
805 
806 err_out:
807         return err;
808 }
809 
810 static int __exit sgiseeq_remove(struct platform_device *pdev)
811 {
812         struct net_device *dev = platform_get_drvdata(pdev);
813         struct sgiseeq_private *sp = netdev_priv(dev);
814 
815         unregister_netdev(dev);
816         dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
817                              sp->srings_dma);
818         free_netdev(dev);
819         platform_set_drvdata(pdev, NULL);
820 
821         return 0;
822 }
823 
824 static struct platform_driver sgiseeq_driver = {
825         .probe  = sgiseeq_probe,
826         .remove = __devexit_p(sgiseeq_remove),
827         .driver = {
828                 .name   = "sgiseeq"
829         }
830 };
831 
832 static int __init sgiseeq_module_init(void)
833 {
834         if (platform_driver_register(&sgiseeq_driver)) {
835                 printk(KERN_ERR "Driver registration failed\n");
836                 return -ENODEV;
837         }
838 
839         return 0;
840 }
841 
842 static void __exit sgiseeq_module_exit(void)
843 {
844         platform_driver_unregister(&sgiseeq_driver);
845 }
846 
847 module_init(sgiseeq_module_init);
848 module_exit(sgiseeq_module_exit);
849 
850 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
851 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
852 MODULE_LICENSE("GPL");
853 
  This page was automatically generated by the LXR engine.