/[dynamips]/upstream/dynamips-0.2.7-RC1/dev_mueslix.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.7-RC1/dev_mueslix.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 7 - (show annotations)
Sat Oct 6 16:23:47 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 26990 byte(s)
dynamips-0.2.7-RC1

1 /*
2 * Cisco router simulation platform.
3 * Copyright (C) 2005,2006 Christophe Fillot. All rights reserved.
4 *
5 * Serial Interfaces (Mueslix).
6 *
7 * Note: "debug serial mueslix" gives more technical info.
8 *
9 * Chip mode: Cisco models 36xx and 72xx don't seem to use the same microcode,
10 * so there are code variants to make things work properly.
11 *
12 * Chip mode 0 => 3600
13 * Chip mode 1 => 7200
14 *
15 * 2 points noticed until now:
16 * - RX/TX ring wrapping checks are done differently,
17 * - TX packet sizes are not specified in the same way.
18 *
19 * Test methodology:
20 * - Connect two virtual routers together ;
21 * - Do pings by sending 10 packets by 10 packets. If this stops working,
22 * count the number of transmitted packets and check with RX/TX rings
23 * sizes. This is problably a ring wrapping problem.
24 * - Do multiple pings with various sizes (padding checks);
25 * - Check if CDP is working, with various hostname sizes. Since CDP
26 * contains a checksum, it is a good way to determine if packets are
27 * sent/received correctly.
28 * - Do a Telnet from both virtual router to the other one, and do a
29 * "sh run".
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <assert.h>
38
39 #include "cpu.h"
40 #include "vm.h"
41 #include "dynamips.h"
42 #include "memory.h"
43 #include "device.h"
44 #include "net.h"
45 #include "net_io.h"
46 #include "ptask.h"
47 #include "dev_mueslix.h"
48
49 /* Debugging flags */
50 #define DEBUG_ACCESS 0
51 #define DEBUG_UNKNOWN 0
52 #define DEBUG_PCI_REGS 0
53 #define DEBUG_TRANSMIT 0
54 #define DEBUG_RECEIVE 0
55
56 /* Mueslix PCI vendor/product codes */
57 #define MUESLIX_PCI_VENDOR_ID 0x1137
58 #define MUESLIX_PCI_PRODUCT_ID 0x0001
59
60 /* Number of channels (4 interfaces) */
61 #define MUESLIX_NR_CHANNELS 4
62 #define MUESLIX_CHANNEL_LEN 0x100
63
64 /* RX/TX status for a channel */
65 #define MUESLIX_CHANNEL_STATUS_RX 0x01
66 #define MUESLIX_CHANNEL_STATUS_TX 0x02
67
68 /* RX/TX enable masks (XXX check if bit position is correct) */
69 #define MUESLIX_TX_ENABLE 0x01
70 #define MUESLIX_RX_ENABLE 0x02
71
72 /* RX/TX IRQ masks */
73 #define MUESLIX_TX_IRQ 0x01
74 #define MUESLIX_RX_IRQ 0x10
75
76 /* Addresses of ports */
77 #define MUESLIX_CHANNEL0_OFFSET 0x100
78 #define MUESLIX_CHANNEL1_OFFSET 0x200
79 #define MUESLIX_CHANNEL2_OFFSET 0x300
80 #define MUESLIX_CHANNEL3_OFFSET 0x400
81
82 /* TPU Registers */
83 #define MUESLIX_TPU_CMD_OFFSET 0x2c24
84 #define MUESLIX_TPU_CMD_RSP_OFFSET 0x2c2c
85
86 /* General and channels registers */
87 #define MUESLIX_GEN_CHAN_LEN 0x500
88
89 /* TPU microcode */
90 #define MUESLIX_UCODE_OFFSET 0x2000
91 #define MUESLIX_UCODE_LEN 0x800
92
93 /* TPU Xmem and YMem */
94 #define MUESLIX_XMEM_OFFSET 0x2a00
95 #define MUESLIX_YMEM_OFFSET 0x2b00
96 #define MUESLIX_XYMEM_LEN 0x100
97
98 /* Maximum packet size */
99 #define MUESLIX_MAX_PKT_SIZE 2048
100
101 /* Send up to 16 packets in a TX ring scan pass */
102 #define MUESLIX_TXRING_PASS_COUNT 16
103
104 /* RX descriptors */
105 #define MUESLIX_RXDESC_OWN 0x80000000 /* Ownership */
106 #define MUESLIX_RXDESC_FS 0x40000000 /* First Segment */
107 #define MUESLIX_RXDESC_LS 0x20000000 /* Last Segment */
108 #define MUESLIX_RXDESC_OVERRUN 0x10000000 /* Overrun */
109 #define MUESLIX_RXDESC_IGNORED 0x08000000 /* Ignored */
110 #define MUESLIX_RXDESC_ABORT 0x04000000 /* Abort */
111 #define MUESLIX_RXDESC_CRC 0x02000000 /* CRC error */
112 #define MUESLIX_RXDESC_LEN_MASK 0xfff
113
114 /* TX descriptors */
115 #define MUESLIX_TXDESC_OWN 0x80000000 /* Ownership */
116 #define MUESLIX_TXDESC_FS 0x40000000 /* First Segment */
117 #define MUESLIX_TXDESC_LS 0x20000000 /* Last Segment */
118 #define MUESLIX_TXDESC_SUB 0x00100000 /* Length substractor ? */
119 #define MUESLIX_TXDESC_SUB_LEN 0x03000000 /* Length substrator ? */
120 #define MUESLIX_TXDESC_SUB_SHIFT 24
121 #define MUESLIX_TXDESC_PAD 0x00c00000 /* Sort of padding info ? */
122 #define MUESLIX_TXDESC_PAD_SHIFT 22
123
124 #define MUESLIX_TXDESC_LEN_MASK 0xfff
125
126 /* RX Descriptor */
127 struct rx_desc {
128 m_uint32_t rdes[2];
129 };
130
131 /* TX Descriptor */
132 struct tx_desc {
133 m_uint32_t tdes[2];
134 };
135
136 /* Forward declaration of Mueslix data */
137 typedef struct mueslix_data mueslix_data_t;
138
139 /* Mueslix channel */
140 struct mueslix_channel {
141 /* Channel ID */
142 u_int id;
143
144 /* RX/TX status */
145 u_int rx_tx_status;
146
147 /* Channel status (0=disabled) */
148 u_int status;
149
150 /* NetIO descriptor */
151 netio_desc_t *nio;
152
153 /* TX ring scanners task id */
154 ptask_id_t tx_tid;
155
156 /* physical addresses for start and end of RX/TX rings */
157 m_uint32_t rx_start,rx_end,tx_start,tx_end;
158
159 /* physical addresses of current RX and TX descriptors */
160 m_uint32_t rx_current,tx_current;
161
162 /* Parent mueslix structure */
163 mueslix_data_t *parent;
164 };
165
166 /* Mueslix Data */
167 struct mueslix_data {
168 char *name;
169
170 /* TPU options */
171 m_uint32_t tpu_options;
172
173 /* Virtual machine */
174 vm_instance_t *vm;
175
176 /* Virtual device */
177 struct vdevice *dev;
178
179 /* PCI device information */
180 struct pci_device *pci_dev;
181
182 /* Chip mode:
183 *
184 * 0=increment ring pointers before check + direct TX size,
185 * 1=increment ring pointers after check + "complex" TX size.
186 */
187 int chip_mode;
188
189 /* Channels */
190 struct mueslix_channel channel[MUESLIX_NR_CHANNELS];
191 m_uint32_t channel_enable_mask;
192
193 /* TPU microcode */
194 u_char ucode[MUESLIX_UCODE_LEN];
195
196 /* TPU Xmem and Ymem */
197 u_char xmem[MUESLIX_XYMEM_LEN];
198 u_char ymem[MUESLIX_XYMEM_LEN];
199 };
200
201 /* Offsets of the 4 channels */
202 static m_uint32_t channel_offset[MUESLIX_NR_CHANNELS] = {
203 MUESLIX_CHANNEL0_OFFSET, MUESLIX_CHANNEL1_OFFSET,
204 MUESLIX_CHANNEL2_OFFSET, MUESLIX_CHANNEL3_OFFSET,
205 };
206
207 /* Log a Mueslix message */
208 #define MUESLIX_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
209
210 /* Returns TRUE if RX/TX is enabled for a channel */
211 static inline int dev_mueslix_is_rx_tx_enabled(struct mueslix_data *d,u_int id)
212 {
213 /* 2 bits for RX/TX, 4 channels max */
214 return((d->channel_enable_mask >> (id << 1)) & 0x03);
215 }
216
217 /*
218 * Access to channel registers.
219 */
220 void dev_mueslix_chan_access(cpu_gen_t *cpu,struct mueslix_channel *channel,
221 m_uint32_t offset,u_int op_size,u_int op_type,
222 m_uint64_t *data)
223 {
224 switch(offset) {
225 case 0x60: /* signals ? */
226 if ((op_type == MTS_READ) && (channel->nio != NULL))
227 *data = 0xFFFFFFFF;
228 break;
229
230 case 0x64: /* port status - cable type and probably other things */
231 if (op_type == MTS_READ)
232 *data = 0x7B;
233 break;
234
235 case 0x90: /* has influence on clock rate */
236 if (op_type == MTS_READ)
237 *data = 0x11111111;
238 break;
239
240 case 0x80: /* TX start */
241 if (op_type == MTS_WRITE)
242 channel->tx_start = channel->tx_current = *data;
243 else
244 *data = channel->tx_start;
245 break;
246
247 case 0x84: /* TX end */
248 if (op_type == MTS_WRITE)
249 channel->tx_end = *data;
250 else
251 *data = channel->tx_end;
252 break;
253
254 case 0x88: /* RX start */
255 if (op_type == MTS_WRITE)
256 channel->rx_start = channel->rx_current = *data;
257 else
258 *data = channel->rx_start;
259 break;
260
261 case 0x8c: /* RX end */
262 if (op_type == MTS_WRITE)
263 channel->rx_end = *data;
264 else
265 *data = channel->rx_end;
266 break;
267 }
268 }
269
270 /* Handle TPU commands for chip mode 0 (3600) */
271 static void tpu_cm0_handle_cmd(struct mueslix_data *d,u_int cmd)
272 {
273 struct mueslix_channel *channel;
274 u_int opcode,channel_id;
275
276 opcode = (cmd >> 12) & 0xFF;
277 channel_id = cmd & 0x03;
278 channel = &d->channel[channel_id];
279
280 switch(opcode) {
281 case 0x10:
282 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
283 channel->status = 0;
284 break;
285 case 0x00:
286 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
287 channel->status = 1;
288 break;
289 default:
290 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
291 }
292 }
293
294 /* Handle TPU commands for chip mode 1 (7200) */
295 static void tpu_cm1_handle_cmd(struct mueslix_data *d,u_int cmd)
296 {
297 struct mueslix_channel *channel;
298 u_int opcode,channel_id;
299
300 opcode = (cmd >> 12) & 0xFF;
301 channel_id = cmd & 0x03;
302 channel = &d->channel[channel_id];
303
304 switch(opcode) {
305 case 0x50:
306 case 0x30:
307 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
308 channel->status = 0;
309 break;
310 case 0x00:
311 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
312 channel->status = 1;
313 break;
314 default:
315 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
316 }
317 }
318
319 /*
320 * dev_mueslix_access()
321 */
322 void *dev_mueslix_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
323 u_int op_size,u_int op_type,m_uint64_t *data)
324 {
325 struct mueslix_data *d = dev->priv_data;
326 struct mueslix_channel *channel;
327 m_uint32_t irq_status;
328 int i;
329
330 #if DEBUG_ACCESS >= 2
331 if (op_type == MTS_READ) {
332 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
333 offset,cpu_get_pc(cpu),op_size);
334 } else {
335 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
336 "val=0x%llx, size=%u\n",offset,cpu_get_pc(cpu),*data,op_size);
337 }
338 #endif
339
340 /* Returns 0 if we don't know the offset */
341 if (op_type == MTS_READ)
342 *data = 0x00000000;
343
344 /* Handle microcode access */
345 if ((offset >= MUESLIX_UCODE_OFFSET) &&
346 (offset < (MUESLIX_UCODE_OFFSET + MUESLIX_UCODE_LEN)))
347 return(d->ucode + offset - MUESLIX_UCODE_OFFSET);
348
349 /* Handle TPU XMem access */
350 if ((offset >= MUESLIX_XMEM_OFFSET) &&
351 (offset < (MUESLIX_XMEM_OFFSET + MUESLIX_XYMEM_LEN)))
352 return(d->xmem + offset - MUESLIX_XMEM_OFFSET);
353
354 /* Handle TPU YMem access */
355 if ((offset >= MUESLIX_YMEM_OFFSET) &&
356 (offset < (MUESLIX_YMEM_OFFSET + MUESLIX_XYMEM_LEN)))
357 return(d->ymem + offset - MUESLIX_YMEM_OFFSET);
358
359 /* Handle channel access */
360 for(i=0;i<MUESLIX_NR_CHANNELS;i++)
361 if ((offset >= channel_offset[i]) &&
362 (offset < (channel_offset[i] + MUESLIX_CHANNEL_LEN)))
363 {
364 dev_mueslix_chan_access(cpu,&d->channel[i],
365 offset - channel_offset[i],
366 op_size,op_type,data);
367 return NULL;
368 }
369
370 /* Generic case */
371 switch(offset) {
372 /* this reg is accessed when an interrupt occurs */
373 case 0x0:
374 if (op_type == MTS_READ) {
375 irq_status = 0;
376
377 for(i=0;i<MUESLIX_NR_CHANNELS;i++) {
378 channel = &d->channel[i];
379
380 if ((dev_mueslix_is_rx_tx_enabled(d,i) & MUESLIX_TX_ENABLE) &&
381 (channel->rx_tx_status & MUESLIX_CHANNEL_STATUS_RX))
382 irq_status |= MUESLIX_RX_IRQ << i;
383
384 if ((dev_mueslix_is_rx_tx_enabled(d,i) & MUESLIX_TX_ENABLE) &&
385 (channel->rx_tx_status & MUESLIX_CHANNEL_STATUS_TX))
386 irq_status |= MUESLIX_TX_IRQ << i;
387 }
388
389 /*
390 * Hack: we re-trigger an interrupt here. This was necessary
391 * because the Mueslix driver was not working properly with
392 * a C3620 platform.
393 */
394 if (irq_status)
395 pci_dev_trigger_irq(d->vm,d->pci_dev);
396
397 *data = irq_status;
398 } else {
399 for(i=0;i<MUESLIX_NR_CHANNELS;i++) {
400 channel = &d->channel[i];
401 channel->rx_tx_status = 0;
402 }
403 }
404 break;
405
406 /* maybe interrupt mask */
407 case 0x10:
408 if (op_type == MTS_READ)
409 *data = 0x2FF;
410 break;
411
412 case 0x14:
413 if (op_type == MTS_READ)
414 *data = d->channel_enable_mask;
415 else {
416 #if DEBUG_ACCESS
417 cpu_log(cpu,d->name,
418 "channel_enable_mask = 0x%5.5llx at pc=0x%llx\n",
419 *data,cpu_get_pc(cpu));
420 #endif
421 d->channel_enable_mask = *data;
422 }
423 break;
424
425 case 0x18:
426 if (op_type == MTS_READ)
427 *data = 0x7F7F7F7F;
428 break;
429
430 case 0x48:
431 if (op_type == MTS_READ)
432 *data = 0x00000000;
433 break;
434
435 case 0x7c:
436 if (op_type == MTS_READ)
437 *data = 0x492;
438 break;
439
440 case 0x2c00:
441 if (op_type == MTS_READ)
442 *data = d->tpu_options;
443 else
444 d->tpu_options = *data;
445 break;
446
447 /* cmd reg */
448 case MUESLIX_TPU_CMD_OFFSET:
449 #if DEBUG_ACCESS
450 if (op_type == MTS_WRITE) {
451 cpu_log(cpu,d->name,"cmd_reg = 0x%5.5llx at pc=0x%llx\n",
452 *data,cpu_get_pc(cpu));
453 }
454 #endif
455 switch(d->chip_mode) {
456 case 0: /* 3600 */
457 tpu_cm0_handle_cmd(d,*data);
458 break;
459 case 1: /* 7200 */
460 tpu_cm1_handle_cmd(d,*data);
461 break;
462 }
463 break;
464
465 /*
466 * cmd_rsp reg, it seems that 0xFFFF means OK
467 * (seen on a "sh contr se1/0" with "debug serial mueslix" enabled).
468 */
469 case MUESLIX_TPU_CMD_RSP_OFFSET:
470 if (op_type == MTS_READ)
471 *data = 0xFFFF;
472 break;
473
474 #if DEBUG_UNKNOWN
475 default:
476 if (op_type == MTS_READ) {
477 cpu_log(cpu,d->name,
478 "read from unknown addr 0x%x, pc=0x%llx (size=%u)\n",
479 offset,cpu_get_pc(cpu),op_size);
480 } else {
481 cpu_log(cpu,d->name,
482 "write to unknown addr 0x%x, value=0x%llx, "
483 "pc=0x%llx (size=%u)\n",
484 offset,*data,cpu_get_pc(cpu),op_size);
485 }
486 #endif
487 }
488
489 return NULL;
490 }
491
492 /*
493 * Get the address of the next RX descriptor.
494 */
495 static m_uint32_t rxdesc_get_next(struct mueslix_channel *channel,
496 m_uint32_t rxd_addr)
497 {
498 m_uint32_t nrxd_addr;
499
500 switch(channel->parent->chip_mode) {
501 case 0:
502 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
503 if (nrxd_addr == channel->rx_end)
504 nrxd_addr = channel->rx_start;
505 break;
506
507 case 1:
508 default:
509 if (rxd_addr == channel->rx_end)
510 nrxd_addr = channel->rx_start;
511 else
512 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
513 break;
514 }
515
516 return(nrxd_addr);
517 }
518
519 /* Read an RX descriptor */
520 static void rxdesc_read(struct mueslix_data *d,m_uint32_t rxd_addr,
521 struct rx_desc *rxd)
522 {
523 #if DEBUG_RECEIVE
524 MUESLIX_LOG(d,"reading RX descriptor at address 0x%x\n",rxd_addr);
525 #endif
526
527 /* get the next descriptor from VM physical RAM */
528 physmem_copy_from_vm(d->vm,rxd,rxd_addr,sizeof(struct rx_desc));
529
530 /* byte-swapping */
531 rxd->rdes[0] = vmtoh32(rxd->rdes[0]);
532 rxd->rdes[1] = vmtoh32(rxd->rdes[1]);
533 }
534
535 /*
536 * Try to acquire the specified RX descriptor. Returns TRUE if we have it.
537 * It assumes that the byte-swapping is done.
538 */
539 static inline int rxdesc_acquire(m_uint32_t rdes0)
540 {
541 return(rdes0 & MUESLIX_RXDESC_OWN);
542 }
543
544 /* Put a packet in buffer of a descriptor */
545 static ssize_t rxdesc_put_pkt(struct mueslix_data *d,struct rx_desc *rxd,
546 u_char **pkt,ssize_t *pkt_len)
547 {
548 ssize_t len,cp_len;
549
550 len = rxd->rdes[0] & MUESLIX_RXDESC_LEN_MASK;
551
552 /* compute the data length to copy */
553 cp_len = m_min(len,*pkt_len);
554
555 #if DEBUG_RECEIVE
556 MUESLIX_LOG(d,"copying %d bytes at 0x%x\n",cp_len,rxd->rdes[1]);
557 #endif
558
559 /* copy packet data to the VM physical RAM */
560 physmem_copy_to_vm(d->vm,*pkt,rxd->rdes[1],cp_len);
561
562 *pkt += cp_len;
563 *pkt_len -= cp_len;
564 return(cp_len);
565 }
566
567 /*
568 * Put a packet in the RX ring of the Mueslix specified channel.
569 */
570 static void dev_mueslix_receive_pkt(struct mueslix_channel *channel,
571 u_char *pkt,ssize_t pkt_len)
572 {
573 struct mueslix_data *d = channel->parent;
574 m_uint32_t rx_start,rxdn_addr,rxdn_rdes0;
575 struct rx_desc rxd0,rxdn,*rxdc;
576 ssize_t cp_len,tot_len = pkt_len;
577 u_char *pkt_ptr = pkt;
578 int i;
579
580 if ((channel->rx_start == 0) || (channel->status == 0) ||
581 (channel->nio == NULL))
582 return;
583
584 /* Don't make anything if RX is not enabled for this channel */
585 if (!(dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE))
586 return;
587
588 /* Truncate the packet if it is too big */
589 pkt_len = m_min(pkt_len,MUESLIX_MAX_PKT_SIZE);
590
591 /* Copy the current rxring descriptor */
592 rxdesc_read(d,channel->rx_current,&rxd0);
593
594 /* We must have the first descriptor... */
595 if (!rxdesc_acquire(rxd0.rdes[0]))
596 return;
597
598 /* Remember the first RX descriptor address */
599 rx_start = channel->rx_current;
600
601 for(i=0,rxdc=&rxd0;tot_len>0;i++)
602 {
603 /* Put data into the descriptor buffers */
604 cp_len = rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
605
606 /* Get address of the next descriptor */
607 rxdn_addr = rxdesc_get_next(channel,channel->rx_current);
608
609 /* We have finished if the complete packet has been stored */
610 if (tot_len == 0) {
611 rxdc->rdes[0] = MUESLIX_RXDESC_LS;
612 rxdc->rdes[0] |= cp_len;
613
614 if (i != 0)
615 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
616
617 channel->rx_current = rxdn_addr;
618 break;
619 }
620
621 #if DEBUG_RECEIVE
622 MUESLIX_LOG(d,"trying to acquire new descriptor at 0x%x\n",rxdn_addr);
623 #endif
624
625 /* Get status of the next descriptor to see if we can acquire it */
626 rxdn_rdes0 = physmem_copy_u32_from_vm(d->vm,rxdn_addr);
627
628 if (!rxdesc_acquire(rxdn_rdes0))
629 rxdc->rdes[0] = MUESLIX_RXDESC_LS | MUESLIX_RXDESC_OVERRUN;
630 else
631 rxdc->rdes[0] = 0x00000000; /* ok, no special flag */
632
633 rxdc->rdes[0] |= cp_len;
634
635 /* Update the new status (only if we are not on the first desc) */
636 if (i != 0)
637 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
638
639 /* Update the RX pointer */
640 channel->rx_current = rxdn_addr;
641
642 if (rxdc->rdes[0] & MUESLIX_RXDESC_LS)
643 break;
644
645 /* Read the next descriptor from VM physical RAM */
646 rxdesc_read(d,rxdn_addr,&rxdn);
647 rxdc = &rxdn;
648 }
649
650 /* Update the first RX descriptor */
651 rxd0.rdes[0] |= MUESLIX_RXDESC_FS;
652 physmem_copy_u32_to_vm(d->vm,rx_start,rxd0.rdes[0]);
653
654 /* Indicate that we have a frame ready (XXX something to do ?) */
655
656 /* Generate IRQ on CPU */
657 channel->rx_tx_status |= MUESLIX_CHANNEL_STATUS_RX;
658 pci_dev_trigger_irq(d->vm,d->pci_dev);
659 }
660
661 /* Handle the Mueslix RX ring of the specified channel */
662 static int dev_mueslix_handle_rxring(netio_desc_t *nio,
663 u_char *pkt,ssize_t pkt_len,
664 struct mueslix_channel *channel)
665 {
666 #if DEBUG_RECEIVE
667 struct mueslix_data *d = channel->parent;
668
669 MUESLIX_LOG(d,"channel %u: receiving a packet of %d bytes\n",
670 channel->id,pkt_len);
671 mem_dump(log_file,pkt,pkt_len);
672 #endif
673
674 dev_mueslix_receive_pkt(channel,pkt,pkt_len);
675 return(TRUE);
676 }
677
678 /* Read a TX descriptor */
679 static void txdesc_read(struct mueslix_data *d,m_uint32_t txd_addr,
680 struct tx_desc *txd)
681 {
682 /* get the next descriptor from VM physical RAM */
683 physmem_copy_from_vm(d->vm,txd,txd_addr,sizeof(struct tx_desc));
684
685 /* byte-swapping */
686 txd->tdes[0] = vmtoh32(txd->tdes[0]);
687 txd->tdes[1] = vmtoh32(txd->tdes[1]);
688 }
689
690 /* Set the address of the next TX descriptor */
691 static void txdesc_set_next(struct mueslix_channel *channel)
692 {
693 switch(channel->parent->chip_mode) {
694 case 0:
695 channel->tx_current += sizeof(struct tx_desc);
696
697 if (channel->tx_current == channel->tx_end)
698 channel->tx_current = channel->tx_start;
699 break;
700
701 case 1:
702 default:
703 if (channel->tx_current == channel->tx_end)
704 channel->tx_current = channel->tx_start;
705 else
706 channel->tx_current += sizeof(struct tx_desc);
707 }
708 }
709
710 /* Handle the TX ring of a specific channel (single packet) */
711 static int dev_mueslix_handle_txring_single(struct mueslix_channel *channel)
712 {
713 struct mueslix_data *d = channel->parent;
714 u_char pkt[MUESLIX_MAX_PKT_SIZE],*pkt_ptr;
715 m_uint32_t tx_start,clen,sub_len,tot_len,pad;
716 struct tx_desc txd0,ctxd,*ptxd;
717 int done = FALSE;
718
719 if ((channel->tx_start == 0) || (channel->status == 0))
720 return(FALSE);
721
722 /* Copy the current txring descriptor */
723 tx_start = channel->tx_current;
724 ptxd = &txd0;
725 txdesc_read(d,channel->tx_current,ptxd);
726
727 /* If we don't own the descriptor, we cannot transmit */
728 if (!(txd0.tdes[0] & MUESLIX_TXDESC_OWN))
729 return(FALSE);
730
731 #if DEBUG_TRANSMIT
732 MUESLIX_LOG(d,"mueslix_handle_txring: 1st desc: "
733 "tdes[0]=0x%x, tdes[1]=0x%x\n",
734 ptxd->tdes[0],ptxd->tdes[1]);
735 #endif
736
737 pkt_ptr = pkt;
738 tot_len = 0;
739
740 do {
741 #if DEBUG_TRANSMIT
742 MUESLIX_LOG(d,"mueslix_handle_txring: loop: "
743 "tdes[0]=0x%x, tdes[1]=0x%x\n",
744 ptxd->tdes[0],ptxd->tdes[1]);
745 #endif
746
747 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_OWN)) {
748 MUESLIX_LOG(d,"mueslix_handle_txring: descriptor not owned!\n");
749 return(FALSE);
750 }
751
752 switch(channel->parent->chip_mode) {
753 case 0:
754 clen = ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK;
755 break;
756
757 case 1:
758 default:
759 clen = (ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK) << 2;
760
761 if (ptxd->tdes[0] & MUESLIX_TXDESC_SUB) {
762 sub_len = ptxd->tdes[0] & MUESLIX_TXDESC_SUB_LEN;
763 sub_len >>= MUESLIX_TXDESC_SUB_SHIFT;
764 clen -= sub_len;
765 }
766 }
767
768 /* Be sure that we have length not null */
769 if (clen != 0) {
770 //printf("pkt_ptr = %p, ptxd->tdes[1] = 0x%x, clen = %d\n",
771 //pkt_ptr, ptxd->tdes[1], clen);
772 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tdes[1],clen);
773 }
774
775 pkt_ptr += clen;
776 tot_len += clen;
777
778 /* Clear the OWN bit if this is not the first descriptor */
779 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_FS))
780 physmem_copy_u32_to_vm(d->vm,channel->tx_current,0);
781
782 /* Go to the next descriptor */
783 txdesc_set_next(channel);
784
785 /* Copy the next txring descriptor */
786 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_LS)) {
787 txdesc_read(d,channel->tx_current,&ctxd);
788 ptxd = &ctxd;
789 } else
790 done = TRUE;
791 }while(!done);
792
793 if (tot_len != 0) {
794 #if DEBUG_TRANSMIT
795 MUESLIX_LOG(d,"sending packet of %u bytes (flags=0x%4.4x)\n",
796 tot_len,txd0.tdes[0]);
797 mem_dump(log_file,pkt,tot_len);
798 #endif
799
800 pad = ptxd->tdes[0] & MUESLIX_TXDESC_PAD;
801 pad >>= MUESLIX_TXDESC_PAD_SHIFT;
802 tot_len += (pad - 1) & 0x03;
803
804 /* send it on wire */
805 netio_send(channel->nio,pkt,tot_len);
806 }
807
808 /* Clear the OWN flag of the first descriptor */
809 physmem_copy_u32_to_vm(d->vm,tx_start,0);
810
811 /* Interrupt on completion ? */
812 channel->rx_tx_status |= MUESLIX_CHANNEL_STATUS_TX;
813 pci_dev_trigger_irq(d->vm,d->pci_dev);
814 return(TRUE);
815 }
816
817 /* Handle the TX ring of a specific channel */
818 static int dev_mueslix_handle_txring(struct mueslix_channel *channel)
819 {
820 int i;
821
822 for(i=0;i<MUESLIX_TXRING_PASS_COUNT;i++)
823 if (!dev_mueslix_handle_txring_single(channel))
824 break;
825
826 return(TRUE);
827 }
828
829 /* pci_mueslix_read() */
830 static m_uint32_t pci_mueslix_read(cpu_gen_t *cpu,struct pci_device *dev,
831 int reg)
832 {
833 struct mueslix_data *d = dev->priv_data;
834
835 switch(reg) {
836 case 0x08: /* Rev ID */
837 return(0x2800001);
838 case PCI_REG_BAR0:
839 return(d->dev->phys_addr);
840 default:
841 return(0);
842 }
843 }
844
845 /* pci_mueslix_write() */
846 static void pci_mueslix_write(cpu_gen_t *cpu,struct pci_device *dev,
847 int reg,m_uint32_t value)
848 {
849 struct mueslix_data *d = dev->priv_data;
850
851 switch(reg) {
852 case PCI_REG_BAR0:
853 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
854 MUESLIX_LOG(d,"registers are mapped at 0x%x\n",value);
855 break;
856 }
857 }
858
859 /* Initialize a Mueslix chip */
860 struct mueslix_data *
861 dev_mueslix_init(vm_instance_t *vm,char *name,int chip_mode,
862 struct pci_bus *pci_bus,int pci_device,int irq)
863 {
864 struct pci_device *pci_dev;
865 struct mueslix_data *d;
866 struct vdevice *dev;
867 int i;
868
869 /* Allocate the private data structure for Mueslix chip */
870 if (!(d = malloc(sizeof(*d)))) {
871 fprintf(stderr,"%s (Mueslix): out of memory\n",name);
872 return NULL;
873 }
874
875 memset(d,0,sizeof(*d));
876 d->chip_mode = chip_mode;
877
878 for(i=0;i<MUESLIX_NR_CHANNELS;i++)
879 d->channel[i].id = i;
880
881 /* Add as PCI device */
882 pci_dev = pci_dev_add(pci_bus,name,
883 MUESLIX_PCI_VENDOR_ID,MUESLIX_PCI_PRODUCT_ID,
884 pci_device,0,irq,
885 d,NULL,pci_mueslix_read,pci_mueslix_write);
886
887 if (!pci_dev) {
888 fprintf(stderr,"%s (Mueslix): unable to create PCI device.\n",name);
889 return NULL;
890 }
891
892 /* Create the device itself */
893 if (!(dev = dev_create(name))) {
894 fprintf(stderr,"%s (Mueslix): unable to create device.\n",name);
895 return NULL;
896 }
897
898 d->name = name;
899 d->pci_dev = pci_dev;
900 d->vm = vm;
901
902 dev->phys_addr = 0;
903 dev->phys_len = 0x4000;
904 dev->handler = dev_mueslix_access;
905 dev->priv_data = d;
906
907 /* Store device info */
908 dev->priv_data = d;
909 d->dev = dev;
910 return(d);
911 }
912
913 /* Remove a Mueslix device */
914 void dev_mueslix_remove(struct mueslix_data *d)
915 {
916 if (d != NULL) {
917 pci_dev_remove(d->pci_dev);
918 vm_unbind_device(d->vm,d->dev);
919 cpu_group_rebuild_mts(d->vm->cpu_group);
920 free(d->dev);
921 free(d);
922 }
923 }
924
925 /* Bind a NIO to a Mueslix channel */
926 int dev_mueslix_set_nio(struct mueslix_data *d,u_int channel_id,
927 netio_desc_t *nio)
928 {
929 struct mueslix_channel *channel;
930
931 if (channel_id >= MUESLIX_NR_CHANNELS)
932 return(-1);
933
934 channel = &d->channel[channel_id];
935
936 /* check that a NIO is not already bound */
937 if (channel->nio != NULL)
938 return(-1);
939
940 /* define the new NIO */
941 channel->nio = nio;
942 channel->parent = d;
943 channel->tx_tid = ptask_add((ptask_callback)dev_mueslix_handle_txring,
944 channel,NULL);
945 netio_rxl_add(nio,(netio_rx_handler_t)dev_mueslix_handle_rxring,
946 channel,NULL);
947 return(0);
948 }
949
950 /* Unbind a NIO from a Mueslix channel */
951 int dev_mueslix_unset_nio(struct mueslix_data *d,u_int channel_id)
952 {
953 struct mueslix_channel *channel;
954
955 if (channel_id >= MUESLIX_NR_CHANNELS)
956 return(-1);
957
958 channel = &d->channel[channel_id];
959
960 if (channel->nio) {
961 ptask_remove(channel->tx_tid);
962 netio_rxl_remove(channel->nio);
963 channel->nio = NULL;
964 }
965 return(0);
966 }

  ViewVC Help
Powered by ViewVC 1.1.26