/[dynamips]/upstream/dynamips-0.2.5/dev_mueslix.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.5/dev_mueslix.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (show annotations)
Sat Oct 6 16:01:44 2007 UTC (12 years, 1 month ago) by dpavlin
File MIME type: text/plain
File size: 26922 byte(s)
import 0.2.5 from upstream

1 /*
2 * Cisco C7200 (Predator) Simulation Platform.
3 * Copyright (C) 2005,2006 Christophe Fillot. All rights reserved.
4 *
5 * Serial Interfaces (Mueslix).
6 *
7 * Note: "debug serial mueslix" gives more technical info.
8 *
9 * Chip mode: Cisco models 36xx and 72xx don't seem to use the same microcode,
10 * so there are code variants to make things work properly.
11 *
12 * Chip mode 0 => 3600
13 * Chip mode 1 => 7200
14 *
15 * 2 points noticed until now:
16 * - RX/TX ring wrapping checks are done differently,
17 * - TX packet sizes are not specified in the same way.
18 *
19 * Test methodology:
20 * - Connect two virtual routers together ;
21 * - Do pings by sending 10 packets by 10 packets. If this stops working,
22 * count the number of transmitted packets and check with RX/TX rings
23 * sizes. This is problably a ring wrapping problem.
24 * - Do multiple pings with various sizes (padding checks);
25 * - Check if CDP is working, with various hostname sizes. Since CDP
26 * contains a checksum, it is a good way to determine if packets are
27 * sent/received correctly.
28 * - Do a Telnet from both virtual router to the other one, and do a
29 * "sh run".
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <assert.h>
38
39 #include "mips64.h"
40 #include "dynamips.h"
41 #include "memory.h"
42 #include "device.h"
43 #include "net.h"
44 #include "net_io.h"
45 #include "ptask.h"
46 #include "dev_mueslix.h"
47
48 /* Debugging flags */
49 #define DEBUG_ACCESS 0
50 #define DEBUG_UNKNOWN 0
51 #define DEBUG_PCI_REGS 0
52 #define DEBUG_TRANSMIT 0
53 #define DEBUG_RECEIVE 0
54
55 /* Mueslix PCI vendor/product codes */
56 #define MUESLIX_PCI_VENDOR_ID 0x1137
57 #define MUESLIX_PCI_PRODUCT_ID 0x0001
58
59 /* Number of channels (4 interfaces) */
60 #define MUESLIX_NR_CHANNELS 4
61 #define MUESLIX_CHANNEL_LEN 0x100
62
63 /* RX/TX status for a channel */
64 #define MUESLIX_CHANNEL_STATUS_RX 0x01
65 #define MUESLIX_CHANNEL_STATUS_TX 0x02
66
67 /* RX/TX enable masks (XXX check if bit position is correct) */
68 #define MUESLIX_TX_ENABLE 0x01
69 #define MUESLIX_RX_ENABLE 0x02
70
71 /* RX/TX IRQ masks */
72 #define MUESLIX_TX_IRQ 0x01
73 #define MUESLIX_RX_IRQ 0x10
74
75 /* Addresses of ports */
76 #define MUESLIX_CHANNEL0_OFFSET 0x100
77 #define MUESLIX_CHANNEL1_OFFSET 0x200
78 #define MUESLIX_CHANNEL2_OFFSET 0x300
79 #define MUESLIX_CHANNEL3_OFFSET 0x400
80
81 /* TPU Registers */
82 #define MUESLIX_TPU_CMD_OFFSET 0x2c24
83 #define MUESLIX_TPU_CMD_RSP_OFFSET 0x2c2c
84
85 /* General and channels registers */
86 #define MUESLIX_GEN_CHAN_LEN 0x500
87
88 /* TPU microcode */
89 #define MUESLIX_UCODE_OFFSET 0x2000
90 #define MUESLIX_UCODE_LEN 0x800
91
92 /* TPU Xmem and YMem */
93 #define MUESLIX_XMEM_OFFSET 0x2a00
94 #define MUESLIX_YMEM_OFFSET 0x2b00
95 #define MUESLIX_XYMEM_LEN 0x100
96
97 /* Maximum packet size */
98 #define MUESLIX_MAX_PKT_SIZE 2048
99
100 /* Send up to 16 packets in a TX ring scan pass */
101 #define MUESLIX_TXRING_PASS_COUNT 16
102
103 /* RX descriptors */
104 #define MUESLIX_RXDESC_OWN 0x80000000 /* Ownership */
105 #define MUESLIX_RXDESC_FS 0x40000000 /* First Segment */
106 #define MUESLIX_RXDESC_LS 0x20000000 /* Last Segment */
107 #define MUESLIX_RXDESC_OVERRUN 0x10000000 /* Overrun */
108 #define MUESLIX_RXDESC_IGNORED 0x08000000 /* Ignored */
109 #define MUESLIX_RXDESC_ABORT 0x04000000 /* Abort */
110 #define MUESLIX_RXDESC_CRC 0x02000000 /* CRC error */
111 #define MUESLIX_RXDESC_LEN_MASK 0xfff
112
113 /* TX descriptors */
114 #define MUESLIX_TXDESC_OWN 0x80000000 /* Ownership */
115 #define MUESLIX_TXDESC_FS 0x40000000 /* First Segment */
116 #define MUESLIX_TXDESC_LS 0x20000000 /* Last Segment */
117 #define MUESLIX_TXDESC_SUB 0x00100000 /* Length substractor ? */
118 #define MUESLIX_TXDESC_SUB_LEN 0x03000000 /* Length substrator ? */
119 #define MUESLIX_TXDESC_SUB_SHIFT 24
120 #define MUESLIX_TXDESC_PAD 0x00c00000 /* Sort of padding info ? */
121 #define MUESLIX_TXDESC_PAD_SHIFT 22
122
123 #define MUESLIX_TXDESC_LEN_MASK 0xfff
124
125 /* RX Descriptor */
126 struct rx_desc {
127 m_uint32_t rdes[2];
128 };
129
130 /* TX Descriptor */
131 struct tx_desc {
132 m_uint32_t tdes[2];
133 };
134
135 /* Forward declaration of Mueslix data */
136 typedef struct mueslix_data mueslix_data_t;
137
138 /* Mueslix channel */
139 struct mueslix_channel {
140 /* Channel ID */
141 u_int id;
142
143 /* RX/TX status */
144 u_int rx_tx_status;
145
146 /* Channel status (0=disabled) */
147 u_int status;
148
149 /* NetIO descriptor */
150 netio_desc_t *nio;
151
152 /* TX ring scanners task id */
153 ptask_id_t tx_tid;
154
155 /* physical addresses for start and end of RX/TX rings */
156 m_uint32_t rx_start,rx_end,tx_start,tx_end;
157
158 /* physical addresses of current RX and TX descriptors */
159 m_uint32_t rx_current,tx_current;
160
161 /* Parent mueslix structure */
162 mueslix_data_t *parent;
163 };
164
165 /* Mueslix Data */
166 struct mueslix_data {
167 char *name;
168
169 /* TPU options */
170 m_uint32_t tpu_options;
171
172 /* Virtual machine */
173 vm_instance_t *vm;
174
175 /* Virtual device */
176 struct vdevice *dev;
177
178 /* PCI device information */
179 struct pci_device *pci_dev;
180
181 /* Chip mode:
182 *
183 * 0=increment ring pointers before check + direct TX size,
184 * 1=increment ring pointers after check + "complex" TX size.
185 */
186 int chip_mode;
187
188 /* Channels */
189 struct mueslix_channel channel[MUESLIX_NR_CHANNELS];
190 m_uint32_t channel_enable_mask;
191
192 /* TPU microcode */
193 u_char ucode[MUESLIX_UCODE_LEN];
194
195 /* TPU Xmem and Ymem */
196 u_char xmem[MUESLIX_XYMEM_LEN];
197 u_char ymem[MUESLIX_XYMEM_LEN];
198 };
199
200 /* Offsets of the 4 channels */
201 static m_uint32_t channel_offset[MUESLIX_NR_CHANNELS] = {
202 MUESLIX_CHANNEL0_OFFSET, MUESLIX_CHANNEL1_OFFSET,
203 MUESLIX_CHANNEL2_OFFSET, MUESLIX_CHANNEL3_OFFSET,
204 };
205
206 /* Log a Mueslix message */
207 #define MUESLIX_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
208
209 /* Returns TRUE if RX/TX is enabled for a channel */
210 static inline int dev_mueslix_is_rx_tx_enabled(struct mueslix_data *d,u_int id)
211 {
212 /* 2 bits for RX/TX, 4 channels max */
213 return((d->channel_enable_mask >> (id << 1)) & 0x03);
214 }
215
216 /*
217 * Access to channel registers.
218 */
219 void dev_mueslix_chan_access(cpu_mips_t *cpu,struct mueslix_channel *channel,
220 m_uint32_t offset,u_int op_size,u_int op_type,
221 m_uint64_t *data)
222 {
223 switch(offset) {
224 case 0x60: /* signals ? */
225 if ((op_type == MTS_READ) && (channel->nio != NULL))
226 *data = 0xFFFFFFFF;
227 break;
228
229 case 0x64: /* port status - cable type and probably other things */
230 if (op_type == MTS_READ)
231 *data = 0x7B;
232 break;
233
234 case 0x90: /* has influence on clock rate */
235 if (op_type == MTS_READ)
236 *data = 0x11111111;
237 break;
238
239 case 0x80: /* TX start */
240 if (op_type == MTS_WRITE)
241 channel->tx_start = channel->tx_current = *data;
242 else
243 *data = channel->tx_start;
244 break;
245
246 case 0x84: /* TX end */
247 if (op_type == MTS_WRITE)
248 channel->tx_end = *data;
249 else
250 *data = channel->tx_end;
251 break;
252
253 case 0x88: /* RX start */
254 if (op_type == MTS_WRITE)
255 channel->rx_start = channel->rx_current = *data;
256 else
257 *data = channel->rx_start;
258 break;
259
260 case 0x8c: /* RX end */
261 if (op_type == MTS_WRITE)
262 channel->rx_end = *data;
263 else
264 *data = channel->rx_end;
265 break;
266 }
267 }
268
269 /* Handle TPU commands for chip mode 0 (3600) */
270 static void tpu_cm0_handle_cmd(struct mueslix_data *d,u_int cmd)
271 {
272 struct mueslix_channel *channel;
273 u_int opcode,channel_id;
274
275 opcode = (cmd >> 12) & 0xFF;
276 channel_id = cmd & 0x03;
277 channel = &d->channel[channel_id];
278
279 switch(opcode) {
280 case 0x10:
281 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
282 channel->status = 0;
283 break;
284 case 0x00:
285 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
286 channel->status = 1;
287 break;
288 default:
289 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
290 }
291 }
292
293 /* Handle TPU commands for chip mode 1 (7200) */
294 static void tpu_cm1_handle_cmd(struct mueslix_data *d,u_int cmd)
295 {
296 struct mueslix_channel *channel;
297 u_int opcode,channel_id;
298
299 opcode = (cmd >> 12) & 0xFF;
300 channel_id = cmd & 0x03;
301 channel = &d->channel[channel_id];
302
303 switch(opcode) {
304 case 0x50:
305 case 0x30:
306 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
307 channel->status = 0;
308 break;
309 case 0x00:
310 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
311 channel->status = 1;
312 break;
313 default:
314 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
315 }
316 }
317
318 /*
319 * dev_mueslix_access()
320 */
321 void *dev_mueslix_access(cpu_mips_t *cpu,struct vdevice *dev,m_uint32_t offset,
322 u_int op_size,u_int op_type,m_uint64_t *data)
323 {
324 struct mueslix_data *d = dev->priv_data;
325 struct mueslix_channel *channel;
326 m_uint32_t irq_status;
327 int i;
328
329 #if DEBUG_ACCESS >= 2
330 if (op_type == MTS_READ) {
331 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
332 offset,cpu->pc,op_size);
333 } else {
334 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
335 "val=0x%llx, size=%u\n",offset,cpu->pc,*data,op_size);
336 }
337 #endif
338
339 /* Returns 0 if we don't know the offset */
340 if (op_type == MTS_READ)
341 *data = 0x00000000;
342
343 /* Handle microcode access */
344 if ((offset >= MUESLIX_UCODE_OFFSET) &&
345 (offset < (MUESLIX_UCODE_OFFSET + MUESLIX_UCODE_LEN)))
346 return(d->ucode + offset - MUESLIX_UCODE_OFFSET);
347
348 /* Handle TPU XMem access */
349 if ((offset >= MUESLIX_XMEM_OFFSET) &&
350 (offset < (MUESLIX_XMEM_OFFSET + MUESLIX_XYMEM_LEN)))
351 return(d->xmem + offset - MUESLIX_XMEM_OFFSET);
352
353 /* Handle TPU YMem access */
354 if ((offset >= MUESLIX_YMEM_OFFSET) &&
355 (offset < (MUESLIX_YMEM_OFFSET + MUESLIX_XYMEM_LEN)))
356 return(d->ymem + offset - MUESLIX_YMEM_OFFSET);
357
358 /* Handle channel access */
359 for(i=0;i<MUESLIX_NR_CHANNELS;i++)
360 if ((offset >= channel_offset[i]) &&
361 (offset < (channel_offset[i] + MUESLIX_CHANNEL_LEN)))
362 {
363 dev_mueslix_chan_access(cpu,&d->channel[i],
364 offset - channel_offset[i],
365 op_size,op_type,data);
366 return NULL;
367 }
368
369 /* Generic case */
370 switch(offset) {
371 /* this reg is accessed when an interrupt occurs */
372 case 0x0:
373 if (op_type == MTS_READ) {
374 irq_status = 0;
375
376 for(i=0;i<MUESLIX_NR_CHANNELS;i++) {
377 channel = &d->channel[i];
378
379 if ((dev_mueslix_is_rx_tx_enabled(d,i) & MUESLIX_TX_ENABLE) &&
380 (channel->rx_tx_status & MUESLIX_CHANNEL_STATUS_RX))
381 irq_status |= MUESLIX_RX_IRQ << i;
382
383 if ((dev_mueslix_is_rx_tx_enabled(d,i) & MUESLIX_TX_ENABLE) &&
384 (channel->rx_tx_status & MUESLIX_CHANNEL_STATUS_TX))
385 irq_status |= MUESLIX_TX_IRQ << i;
386 }
387
388 /*
389 * Hack: we re-trigger an interrupt here. This was necessary
390 * because the Mueslix driver was not working properly with
391 * a C3620 platform.
392 */
393 if (irq_status)
394 pci_dev_trigger_irq(d->vm,d->pci_dev);
395
396 *data = irq_status;
397 } else {
398 for(i=0;i<MUESLIX_NR_CHANNELS;i++) {
399 channel = &d->channel[i];
400 channel->rx_tx_status = 0;
401 }
402 }
403 break;
404
405 /* maybe interrupt mask */
406 case 0x10:
407 if (op_type == MTS_READ)
408 *data = 0x2FF;
409 break;
410
411 case 0x14:
412 if (op_type == MTS_READ)
413 *data = d->channel_enable_mask;
414 else {
415 #if DEBUG_ACCESS
416 cpu_log(cpu,d->name,
417 "channel_enable_mask = 0x%5.5llx at pc=0x%llx\n",
418 *data,cpu->pc);
419 #endif
420 d->channel_enable_mask = *data;
421 }
422 break;
423
424 case 0x18:
425 if (op_type == MTS_READ)
426 *data = 0x7F7F7F7F;
427 break;
428
429 case 0x48:
430 if (op_type == MTS_READ)
431 *data = 0x00000000;
432 break;
433
434 case 0x7c:
435 if (op_type == MTS_READ)
436 *data = 0x492;
437 break;
438
439 case 0x2c00:
440 if (op_type == MTS_READ)
441 *data = d->tpu_options;
442 else
443 d->tpu_options = *data;
444 break;
445
446 /* cmd reg */
447 case MUESLIX_TPU_CMD_OFFSET:
448 #if DEBUG_ACCESS
449 if (op_type == MTS_WRITE) {
450 cpu_log(cpu,d->name,"cmd_reg = 0x%5.5llx at pc=0x%llx\n",
451 *data,cpu->pc);
452 }
453 #endif
454 switch(d->chip_mode) {
455 case 0: /* 3600 */
456 tpu_cm0_handle_cmd(d,*data);
457 break;
458 case 1: /* 7200 */
459 tpu_cm1_handle_cmd(d,*data);
460 break;
461 }
462 break;
463
464 /*
465 * cmd_rsp reg, it seems that 0xFFFF means OK
466 * (seen on a "sh contr se1/0" with "debug serial mueslix" enabled).
467 */
468 case MUESLIX_TPU_CMD_RSP_OFFSET:
469 if (op_type == MTS_READ)
470 *data = 0xFFFF;
471 break;
472
473 #if DEBUG_UNKNOWN
474 default:
475 if (op_type == MTS_READ) {
476 cpu_log(cpu,d->name,
477 "read from unknown addr 0x%x, pc=0x%llx (size=%u)\n",
478 offset,cpu->pc,op_size);
479 } else {
480 cpu_log(cpu,d->name,
481 "write to unknown addr 0x%x, value=0x%llx, "
482 "pc=0x%llx (size=%u)\n",offset,*data,cpu->pc,op_size);
483 }
484 #endif
485 }
486
487 return NULL;
488 }
489
490 /*
491 * Get the address of the next RX descriptor.
492 */
493 static m_uint32_t rxdesc_get_next(struct mueslix_channel *channel,
494 m_uint32_t rxd_addr)
495 {
496 m_uint32_t nrxd_addr;
497
498 switch(channel->parent->chip_mode) {
499 case 0:
500 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
501 if (nrxd_addr == channel->rx_end)
502 nrxd_addr = channel->rx_start;
503 break;
504
505 case 1:
506 default:
507 if (rxd_addr == channel->rx_end)
508 nrxd_addr = channel->rx_start;
509 else
510 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
511 break;
512 }
513
514 return(nrxd_addr);
515 }
516
517 /* Read an RX descriptor */
518 static void rxdesc_read(struct mueslix_data *d,m_uint32_t rxd_addr,
519 struct rx_desc *rxd)
520 {
521 #if DEBUG_RECEIVE
522 MUESLIX_LOG(d,"reading RX descriptor at address 0x%x\n",rxd_addr);
523 #endif
524
525 /* get the next descriptor from VM physical RAM */
526 physmem_copy_from_vm(d->vm,rxd,rxd_addr,sizeof(struct rx_desc));
527
528 /* byte-swapping */
529 rxd->rdes[0] = vmtoh32(rxd->rdes[0]);
530 rxd->rdes[1] = vmtoh32(rxd->rdes[1]);
531 }
532
533 /*
534 * Try to acquire the specified RX descriptor. Returns TRUE if we have it.
535 * It assumes that the byte-swapping is done.
536 */
537 static inline int rxdesc_acquire(m_uint32_t rdes0)
538 {
539 return(rdes0 & MUESLIX_RXDESC_OWN);
540 }
541
542 /* Put a packet in buffer of a descriptor */
543 static ssize_t rxdesc_put_pkt(struct mueslix_data *d,struct rx_desc *rxd,
544 u_char **pkt,ssize_t *pkt_len)
545 {
546 ssize_t len,cp_len;
547
548 len = rxd->rdes[0] & MUESLIX_RXDESC_LEN_MASK;
549
550 /* compute the data length to copy */
551 cp_len = m_min(len,*pkt_len);
552
553 #if DEBUG_RECEIVE
554 MUESLIX_LOG(d,"copying %d bytes at 0x%x\n",cp_len,rxd->rdes[1]);
555 #endif
556
557 /* copy packet data to the VM physical RAM */
558 physmem_copy_to_vm(d->vm,*pkt,rxd->rdes[1],cp_len);
559
560 *pkt += cp_len;
561 *pkt_len -= cp_len;
562 return(cp_len);
563 }
564
565 /*
566 * Put a packet in the RX ring of the Mueslix specified channel.
567 */
568 static void dev_mueslix_receive_pkt(struct mueslix_channel *channel,
569 u_char *pkt,ssize_t pkt_len)
570 {
571 struct mueslix_data *d = channel->parent;
572 m_uint32_t rx_start,rxdn_addr,rxdn_rdes0;
573 struct rx_desc rxd0,rxdn,*rxdc;
574 ssize_t cp_len,tot_len = pkt_len;
575 u_char *pkt_ptr = pkt;
576 int i;
577
578 if ((channel->rx_start == 0) || (channel->status == 0) ||
579 (channel->nio == NULL))
580 return;
581
582 /* Don't make anything if RX is not enabled for this channel */
583 if (!(dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE))
584 return;
585
586 /* Truncate the packet if it is too big */
587 pkt_len = m_min(pkt_len,MUESLIX_MAX_PKT_SIZE);
588
589 /* Copy the current rxring descriptor */
590 rxdesc_read(d,channel->rx_current,&rxd0);
591
592 /* We must have the first descriptor... */
593 if (!rxdesc_acquire(rxd0.rdes[0]))
594 return;
595
596 /* Remember the first RX descriptor address */
597 rx_start = channel->rx_current;
598
599 for(i=0,rxdc=&rxd0;tot_len>0;i++)
600 {
601 /* Put data into the descriptor buffers */
602 cp_len = rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
603
604 /* Get address of the next descriptor */
605 rxdn_addr = rxdesc_get_next(channel,channel->rx_current);
606
607 /* We have finished if the complete packet has been stored */
608 if (tot_len == 0) {
609 rxdc->rdes[0] = MUESLIX_RXDESC_LS;
610 rxdc->rdes[0] |= cp_len;
611
612 if (i != 0)
613 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
614
615 channel->rx_current = rxdn_addr;
616 break;
617 }
618
619 #if DEBUG_RECEIVE
620 MUESLIX_LOG(d,"trying to acquire new descriptor at 0x%x\n",rxdn_addr);
621 #endif
622
623 /* Get status of the next descriptor to see if we can acquire it */
624 rxdn_rdes0 = physmem_copy_u32_from_vm(d->vm,rxdn_addr);
625
626 if (!rxdesc_acquire(rxdn_rdes0))
627 rxdc->rdes[0] = MUESLIX_RXDESC_LS | MUESLIX_RXDESC_OVERRUN;
628 else
629 rxdc->rdes[0] = 0x00000000; /* ok, no special flag */
630
631 rxdc->rdes[0] |= cp_len;
632
633 /* Update the new status (only if we are not on the first desc) */
634 if (i != 0)
635 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
636
637 /* Update the RX pointer */
638 channel->rx_current = rxdn_addr;
639
640 if (rxdc->rdes[0] & MUESLIX_RXDESC_LS)
641 break;
642
643 /* Read the next descriptor from VM physical RAM */
644 rxdesc_read(d,rxdn_addr,&rxdn);
645 rxdc = &rxdn;
646 }
647
648 /* Update the first RX descriptor */
649 rxd0.rdes[0] |= MUESLIX_RXDESC_FS;
650 physmem_copy_u32_to_vm(d->vm,rx_start,rxd0.rdes[0]);
651
652 /* Indicate that we have a frame ready (XXX something to do ?) */
653
654 /* Generate IRQ on CPU */
655 channel->rx_tx_status |= MUESLIX_CHANNEL_STATUS_RX;
656 pci_dev_trigger_irq(d->vm,d->pci_dev);
657 }
658
659 /* Handle the Mueslix RX ring of the specified channel */
660 static int dev_mueslix_handle_rxring(netio_desc_t *nio,
661 u_char *pkt,ssize_t pkt_len,
662 struct mueslix_channel *channel)
663 {
664 #if DEBUG_RECEIVE
665 struct mueslix_data *d = channel->parent;
666
667 MUESLIX_LOG(d,"channel %u: receiving a packet of %d bytes\n",
668 channel->id,pkt_len);
669 mem_dump(log_file,pkt,pkt_len);
670 #endif
671
672 dev_mueslix_receive_pkt(channel,pkt,pkt_len);
673 return(TRUE);
674 }
675
676 /* Read a TX descriptor */
677 static void txdesc_read(struct mueslix_data *d,m_uint32_t txd_addr,
678 struct tx_desc *txd)
679 {
680 /* get the next descriptor from VM physical RAM */
681 physmem_copy_from_vm(d->vm,txd,txd_addr,sizeof(struct tx_desc));
682
683 /* byte-swapping */
684 txd->tdes[0] = vmtoh32(txd->tdes[0]);
685 txd->tdes[1] = vmtoh32(txd->tdes[1]);
686 }
687
688 /* Set the address of the next TX descriptor */
689 static void txdesc_set_next(struct mueslix_channel *channel)
690 {
691 switch(channel->parent->chip_mode) {
692 case 0:
693 channel->tx_current += sizeof(struct tx_desc);
694
695 if (channel->tx_current == channel->tx_end)
696 channel->tx_current = channel->tx_start;
697 break;
698
699 case 1:
700 default:
701 if (channel->tx_current == channel->tx_end)
702 channel->tx_current = channel->tx_start;
703 else
704 channel->tx_current += sizeof(struct tx_desc);
705 }
706 }
707
708 /* Handle the TX ring of a specific channel (single packet) */
709 static int dev_mueslix_handle_txring_single(struct mueslix_channel *channel)
710 {
711 struct mueslix_data *d = channel->parent;
712 u_char pkt[MUESLIX_MAX_PKT_SIZE],*pkt_ptr;
713 m_uint32_t tx_start,clen,sub_len,tot_len,pad;
714 struct tx_desc txd0,ctxd,*ptxd;
715 int done = FALSE;
716
717 if ((channel->tx_start == 0) || (channel->status == 0))
718 return(FALSE);
719
720 /* Copy the current txring descriptor */
721 tx_start = channel->tx_current;
722 ptxd = &txd0;
723 txdesc_read(d,channel->tx_current,ptxd);
724
725 /* If we don't own the descriptor, we cannot transmit */
726 if (!(txd0.tdes[0] & MUESLIX_TXDESC_OWN))
727 return(FALSE);
728
729 #if DEBUG_TRANSMIT
730 MUESLIX_LOG(d,"mueslix_handle_txring: 1st desc: "
731 "tdes[0]=0x%x, tdes[1]=0x%x\n",
732 ptxd->tdes[0],ptxd->tdes[1]);
733 #endif
734
735 pkt_ptr = pkt;
736 tot_len = 0;
737
738 do {
739 #if DEBUG_TRANSMIT
740 MUESLIX_LOG(d,"mueslix_handle_txring: loop: "
741 "tdes[0]=0x%x, tdes[1]=0x%x\n",
742 ptxd->tdes[0],ptxd->tdes[1]);
743 #endif
744
745 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_OWN)) {
746 MUESLIX_LOG(d,"mueslix_handle_txring: descriptor not owned!\n");
747 return(FALSE);
748 }
749
750 switch(channel->parent->chip_mode) {
751 case 0:
752 clen = ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK;
753 break;
754
755 case 1:
756 default:
757 clen = (ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK) << 2;
758
759 if (ptxd->tdes[0] & MUESLIX_TXDESC_SUB) {
760 sub_len = ptxd->tdes[0] & MUESLIX_TXDESC_SUB_LEN;
761 sub_len >>= MUESLIX_TXDESC_SUB_SHIFT;
762 clen -= sub_len;
763 }
764 }
765
766 /* Be sure that we have length not null */
767 if (clen != 0) {
768 //printf("pkt_ptr = %p, ptxd->tdes[1] = 0x%x, clen = %d\n",
769 //pkt_ptr, ptxd->tdes[1], clen);
770 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tdes[1],clen);
771 }
772
773 pkt_ptr += clen;
774 tot_len += clen;
775
776 /* Clear the OWN bit if this is not the first descriptor */
777 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_FS))
778 physmem_copy_u32_to_vm(d->vm,channel->tx_current,0);
779
780 /* Go to the next descriptor */
781 txdesc_set_next(channel);
782
783 /* Copy the next txring descriptor */
784 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_LS)) {
785 txdesc_read(d,channel->tx_current,&ctxd);
786 ptxd = &ctxd;
787 } else
788 done = TRUE;
789 }while(!done);
790
791 if (tot_len != 0) {
792 #if DEBUG_TRANSMIT
793 MUESLIX_LOG(d,"sending packet of %u bytes (flags=0x%4.4x)\n",
794 tot_len,txd0.tdes[0]);
795 mem_dump(log_file,pkt,tot_len);
796 #endif
797
798 pad = ptxd->tdes[0] & MUESLIX_TXDESC_PAD;
799 pad >>= MUESLIX_TXDESC_PAD_SHIFT;
800 tot_len += (pad - 1) & 0x03;
801
802 /* send it on wire */
803 netio_send(channel->nio,pkt,tot_len);
804 }
805
806 /* Clear the OWN flag of the first descriptor */
807 physmem_copy_u32_to_vm(d->vm,tx_start,0);
808
809 /* Interrupt on completion ? */
810 channel->rx_tx_status |= MUESLIX_CHANNEL_STATUS_TX;
811 pci_dev_trigger_irq(d->vm,d->pci_dev);
812 return(TRUE);
813 }
814
815 /* Handle the TX ring of a specific channel */
816 static int dev_mueslix_handle_txring(struct mueslix_channel *channel)
817 {
818 int i;
819
820 for(i=0;i<MUESLIX_TXRING_PASS_COUNT;i++)
821 if (!dev_mueslix_handle_txring_single(channel))
822 break;
823
824 return(TRUE);
825 }
826
827 /* pci_mueslix_read() */
828 static m_uint32_t pci_mueslix_read(cpu_mips_t *cpu,struct pci_device *dev,
829 int reg)
830 {
831 struct mueslix_data *d = dev->priv_data;
832
833 switch(reg) {
834 case 0x08: /* Rev ID */
835 return(0x2800001);
836 case PCI_REG_BAR0:
837 return(d->dev->phys_addr);
838 default:
839 return(0);
840 }
841 }
842
843 /* pci_mueslix_write() */
844 static void pci_mueslix_write(cpu_mips_t *cpu,struct pci_device *dev,
845 int reg,m_uint32_t value)
846 {
847 struct mueslix_data *d = dev->priv_data;
848
849 switch(reg) {
850 case PCI_REG_BAR0:
851 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
852 MUESLIX_LOG(d,"registers are mapped at 0x%x\n",value);
853 break;
854 }
855 }
856
857 /* Initialize a Mueslix chip */
858 struct mueslix_data *
859 dev_mueslix_init(vm_instance_t *vm,char *name,int chip_mode,
860 struct pci_bus *pci_bus,int pci_device,int irq)
861 {
862 struct pci_device *pci_dev;
863 struct mueslix_data *d;
864 struct vdevice *dev;
865 int i;
866
867 /* Allocate the private data structure for Mueslix chip */
868 if (!(d = malloc(sizeof(*d)))) {
869 fprintf(stderr,"%s (Mueslix): out of memory\n",name);
870 return NULL;
871 }
872
873 memset(d,0,sizeof(*d));
874 d->chip_mode = chip_mode;
875
876 for(i=0;i<MUESLIX_NR_CHANNELS;i++)
877 d->channel[i].id = i;
878
879 /* Add as PCI device */
880 pci_dev = pci_dev_add(pci_bus,name,
881 MUESLIX_PCI_VENDOR_ID,MUESLIX_PCI_PRODUCT_ID,
882 pci_device,0,irq,
883 d,NULL,pci_mueslix_read,pci_mueslix_write);
884
885 if (!pci_dev) {
886 fprintf(stderr,"%s (Mueslix): unable to create PCI device.\n",name);
887 return NULL;
888 }
889
890 /* Create the device itself */
891 if (!(dev = dev_create(name))) {
892 fprintf(stderr,"%s (Mueslix): unable to create device.\n",name);
893 return NULL;
894 }
895
896 d->name = name;
897 d->pci_dev = pci_dev;
898 d->vm = vm;
899
900 dev->phys_addr = 0;
901 dev->phys_len = 0x4000;
902 dev->handler = dev_mueslix_access;
903 dev->priv_data = d;
904
905 /* Store device info */
906 dev->priv_data = d;
907 d->dev = dev;
908 return(d);
909 }
910
911 /* Remove a Mueslix device */
912 void dev_mueslix_remove(struct mueslix_data *d)
913 {
914 if (d != NULL) {
915 pci_dev_remove(d->pci_dev);
916 vm_unbind_device(d->vm,d->dev);
917 cpu_group_rebuild_mts(d->vm->cpu_group);
918 free(d->dev);
919 free(d);
920 }
921 }
922
923 /* Bind a NIO to a Mueslix channel */
924 int dev_mueslix_set_nio(struct mueslix_data *d,u_int channel_id,
925 netio_desc_t *nio)
926 {
927 struct mueslix_channel *channel;
928
929 if (channel_id >= MUESLIX_NR_CHANNELS)
930 return(-1);
931
932 channel = &d->channel[channel_id];
933
934 /* check that a NIO is not already bound */
935 if (channel->nio != NULL)
936 return(-1);
937
938 /* define the new NIO */
939 channel->nio = nio;
940 channel->parent = d;
941 channel->tx_tid = ptask_add((ptask_callback)dev_mueslix_handle_txring,
942 channel,NULL);
943 netio_rxl_add(nio,(netio_rx_handler_t)dev_mueslix_handle_rxring,
944 channel,NULL);
945 return(0);
946 }
947
948 /* Unbind a NIO from a Mueslix channel */
949 int dev_mueslix_unset_nio(struct mueslix_data *d,u_int channel_id)
950 {
951 struct mueslix_channel *channel;
952
953 if (channel_id >= MUESLIX_NR_CHANNELS)
954 return(-1);
955
956 channel = &d->channel[channel_id];
957
958 if (channel->nio) {
959 ptask_remove(channel->tx_tid);
960 netio_rxl_remove(channel->nio);
961 channel->nio = NULL;
962 }
963 return(0);
964 }

  ViewVC Help
Powered by ViewVC 1.1.26