/[dynamips]/upstream/dynamips-0.2.8-RC1/dev_mueslix.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.8-RC1/dev_mueslix.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 11 - (show annotations)
Sat Oct 6 16:33:40 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 28160 byte(s)
dynamips-0.2.8-RC1

1 /*
2 * Cisco router simulation platform.
3 * Copyright (C) 2005,2006 Christophe Fillot. All rights reserved.
4 *
5 * Serial Interfaces (Mueslix).
6 *
7 * Note: "debug serial mueslix" gives more technical info.
8 *
9 * Chip mode: Cisco models 36xx and 72xx don't seem to use the same microcode,
10 * so there are code variants to make things work properly.
11 *
12 * Chip mode 0 => 3600
13 * Chip mode 1 => 7200
14 *
15 * 2 points noticed until now:
16 * - RX/TX ring wrapping checks are done differently,
17 * - TX packet sizes are not specified in the same way.
18 *
19 * Test methodology:
20 * - Connect two virtual routers together ;
21 * - Do pings by sending 10 packets by 10 packets. If this stops working,
22 * count the number of transmitted packets and check with RX/TX rings
23 * sizes. This is problably a ring wrapping problem.
24 * - Do multiple pings with various sizes (padding checks);
25 * - Check if CDP is working, with various hostname sizes. Since CDP
26 * contains a checksum, it is a good way to determine if packets are
27 * sent/received correctly.
28 * - Do a Telnet from both virtual router to the other one, and do a
29 * "sh run".
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <assert.h>
38
39 #include "cpu.h"
40 #include "vm.h"
41 #include "dynamips.h"
42 #include "memory.h"
43 #include "device.h"
44 #include "net.h"
45 #include "net_io.h"
46 #include "ptask.h"
47 #include "dev_mueslix.h"
48
49 /* Debugging flags */
50 #define DEBUG_ACCESS 0
51 #define DEBUG_UNKNOWN 0
52 #define DEBUG_PCI_REGS 0
53 #define DEBUG_TRANSMIT 0
54 #define DEBUG_RECEIVE 0
55
56 /* Mueslix PCI vendor/product codes */
57 #define MUESLIX_PCI_VENDOR_ID 0x1137
58 #define MUESLIX_PCI_PRODUCT_ID 0x0001
59
60 /* Number of channels (4 interfaces) */
61 #define MUESLIX_NR_CHANNELS 4
62 #define MUESLIX_CHANNEL_LEN 0x100
63
64 /* RX/TX status for a channel */
65 #define MUESLIX_CHANNEL_STATUS_RX 0x01
66 #define MUESLIX_CHANNEL_STATUS_TX 0x02
67
68 /* RX/TX enable masks (XXX check if bit position is correct) */
69 #define MUESLIX_TX_ENABLE 0x01
70 #define MUESLIX_RX_ENABLE 0x02
71
72 /* RX/TX IRQ masks */
73 #define MUESLIX_TX_IRQ 0x01
74 #define MUESLIX_RX_IRQ 0x10
75
76 /* Addresses of ports */
77 #define MUESLIX_CHANNEL0_OFFSET 0x100
78 #define MUESLIX_CHANNEL1_OFFSET 0x200
79 #define MUESLIX_CHANNEL2_OFFSET 0x300
80 #define MUESLIX_CHANNEL3_OFFSET 0x400
81
82 /* TPU Registers */
83 #define MUESLIX_TPU_CMD_OFFSET 0x2c24
84 #define MUESLIX_TPU_CMD_RSP_OFFSET 0x2c2c
85
86 /* General and channels registers */
87 #define MUESLIX_GEN_CHAN_LEN 0x500
88
89 /* TPU microcode */
90 #define MUESLIX_UCODE_OFFSET 0x2000
91 #define MUESLIX_UCODE_LEN 0x800
92
93 /* TPU Xmem and YMem */
94 #define MUESLIX_XMEM_OFFSET 0x2a00
95 #define MUESLIX_YMEM_OFFSET 0x2b00
96 #define MUESLIX_XYMEM_LEN 0x100
97
98 /* Maximum packet size */
99 #define MUESLIX_MAX_PKT_SIZE 18000
100
101 /* Send up to 16 packets in a TX ring scan pass */
102 #define MUESLIX_TXRING_PASS_COUNT 16
103
104 /* RX descriptors */
105 #define MUESLIX_RXDESC_OWN 0x80000000 /* Ownership */
106 #define MUESLIX_RXDESC_FS 0x40000000 /* First Segment */
107 #define MUESLIX_RXDESC_LS 0x20000000 /* Last Segment */
108 #define MUESLIX_RXDESC_OVERRUN 0x10000000 /* Overrun */
109 #define MUESLIX_RXDESC_IGNORED 0x08000000 /* Ignored */
110 #define MUESLIX_RXDESC_ABORT 0x04000000 /* Abort */
111 #define MUESLIX_RXDESC_CRC 0x02000000 /* CRC error */
112 #define MUESLIX_RXDESC_LEN_MASK 0xffff
113
114 /* TX descriptors */
115 #define MUESLIX_TXDESC_OWN 0x80000000 /* Ownership */
116 #define MUESLIX_TXDESC_FS 0x40000000 /* First Segment */
117 #define MUESLIX_TXDESC_LS 0x20000000 /* Last Segment */
118 #define MUESLIX_TXDESC_SUB 0x00100000 /* Length substractor ? */
119 #define MUESLIX_TXDESC_SUB_LEN 0x03000000 /* Length substrator ? */
120 #define MUESLIX_TXDESC_SUB_SHIFT 24
121 #define MUESLIX_TXDESC_PAD 0x00c00000 /* Sort of padding info ? */
122 #define MUESLIX_TXDESC_PAD_SHIFT 22
123
124 #define MUESLIX_TXDESC_LEN_MASK 0xffff
125
126 /* RX Descriptor */
127 struct rx_desc {
128 m_uint32_t rdes[2];
129 };
130
131 /* TX Descriptor */
132 struct tx_desc {
133 m_uint32_t tdes[2];
134 };
135
136 /* Forward declaration of Mueslix data */
137 typedef struct mueslix_data mueslix_data_t;
138
139 /* Mueslix channel */
140 struct mueslix_channel {
141 /* Channel ID */
142 u_int id;
143
144 /* Channel status (0=disabled) */
145 u_int status;
146
147 /* CRC control register */
148 u_int crc_ctrl_reg;
149
150 /* CRC size */
151 u_int crc_size;
152
153 /* NetIO descriptor */
154 netio_desc_t *nio;
155
156 /* TX ring scanners task id */
157 ptask_id_t tx_tid;
158
159 /* physical addresses for start and end of RX/TX rings */
160 m_uint32_t rx_start,rx_end,tx_start,tx_end;
161
162 /* physical addresses of current RX and TX descriptors */
163 m_uint32_t rx_current,tx_current;
164
165 /* Parent mueslix structure */
166 mueslix_data_t *parent;
167 };
168
169 /* Mueslix Data */
170 struct mueslix_data {
171 char *name;
172
173 /* Lock */
174 pthread_mutex_t lock;
175
176 /* IRQ status and mask */
177 m_uint32_t irq_status,irq_mask;
178 u_int irq_clearing_count;
179
180 /* TPU options */
181 m_uint32_t tpu_options;
182
183 /* Virtual machine */
184 vm_instance_t *vm;
185
186 /* Virtual device */
187 struct vdevice *dev;
188
189 /* PCI device information */
190 struct pci_device *pci_dev;
191
192 /* Chip mode:
193 *
194 * 0=increment ring pointers before check + direct TX size,
195 * 1=increment ring pointers after check + "complex" TX size.
196 */
197 int chip_mode;
198
199 /* Channels */
200 struct mueslix_channel channel[MUESLIX_NR_CHANNELS];
201 m_uint32_t channel_enable_mask;
202
203 /* TPU microcode */
204 u_char ucode[MUESLIX_UCODE_LEN];
205
206 /* TPU Xmem and Ymem */
207 u_char xmem[MUESLIX_XYMEM_LEN];
208 u_char ymem[MUESLIX_XYMEM_LEN];
209 };
210
211 /* Offsets of the 4 channels */
212 static m_uint32_t channel_offset[MUESLIX_NR_CHANNELS] = {
213 MUESLIX_CHANNEL0_OFFSET, MUESLIX_CHANNEL1_OFFSET,
214 MUESLIX_CHANNEL2_OFFSET, MUESLIX_CHANNEL3_OFFSET,
215 };
216
217 /* Lock/Unlock primitives */
218 #define MUESLIX_LOCK(d) pthread_mutex_lock(&(d)->lock)
219 #define MUESLIX_UNLOCK(d) pthread_mutex_unlock(&(d)->lock)
220
221 /* Log a Mueslix message */
222 #define MUESLIX_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
223
224 /* Returns TRUE if RX/TX is enabled for a channel */
225 static inline int dev_mueslix_is_rx_tx_enabled(struct mueslix_data *d,u_int id)
226 {
227 /* 2 bits for RX/TX, 4 channels max */
228 return((d->channel_enable_mask >> (id << 1)) & 0x03);
229 }
230
231 /* Update IRQ status */
232 static inline void dev_mueslix_update_irq_status(struct mueslix_data *d)
233 {
234 if (d->irq_status & d->irq_mask)
235 pci_dev_trigger_irq(d->vm,d->pci_dev);
236 else {
237 if (++d->irq_clearing_count == 3) {
238 pci_dev_clear_irq(d->vm,d->pci_dev);
239 d->irq_clearing_count = 0;
240 }
241 }
242 }
243
244 /*
245 * Access to channel registers.
246 */
247 void dev_mueslix_chan_access(cpu_gen_t *cpu,struct mueslix_channel *channel,
248 m_uint32_t offset,u_int op_size,u_int op_type,
249 m_uint64_t *data)
250 {
251 switch(offset) {
252 case 0x00: /* CRC control register ? */
253 if (op_type == MTS_READ) {
254 *data = channel->crc_ctrl_reg;
255 } else {
256 channel->crc_ctrl_reg = *data;
257
258 switch(channel->crc_ctrl_reg) {
259 case 0x08:
260 case 0x0a:
261 channel->crc_size = channel->crc_ctrl_reg - 0x06;
262 break;
263
264 default:
265 MUESLIX_LOG(channel->parent,"channel %u: unknown value "
266 "for CRC ctrl reg 0x%4.4x\n",
267 channel->id,channel->crc_ctrl_reg);
268
269 channel->crc_size = 2;
270 }
271 MUESLIX_LOG(channel->parent,
272 "channel %u: CRC size set to 0x%4.4x\n",
273 channel->id,channel->crc_size);
274 }
275 break;
276
277 case 0x60: /* signals ? */
278 if ((op_type == MTS_READ) && (channel->nio != NULL))
279 *data = 0xFFFFFFFF;
280 break;
281
282 case 0x64: /* port status - cable type and probably other things */
283 if (op_type == MTS_READ)
284 *data = 0x7B;
285 break;
286
287 case 0x90: /* has influence on clock rate */
288 if (op_type == MTS_READ)
289 *data = 0x11111111;
290 break;
291
292 case 0x80: /* TX start */
293 if (op_type == MTS_WRITE)
294 channel->tx_start = channel->tx_current = *data;
295 else
296 *data = channel->tx_start;
297 break;
298
299 case 0x84: /* TX end */
300 if (op_type == MTS_WRITE)
301 channel->tx_end = *data;
302 else
303 *data = channel->tx_end;
304 break;
305
306 case 0x88: /* RX start */
307 if (op_type == MTS_WRITE)
308 channel->rx_start = channel->rx_current = *data;
309 else
310 *data = channel->rx_start;
311 break;
312
313 case 0x8c: /* RX end */
314 if (op_type == MTS_WRITE)
315 channel->rx_end = *data;
316 else
317 *data = channel->rx_end;
318 break;
319 }
320 }
321
322 /* Handle TPU commands for chip mode 0 (3600) */
323 static void tpu_cm0_handle_cmd(struct mueslix_data *d,u_int cmd)
324 {
325 struct mueslix_channel *channel;
326 u_int opcode,channel_id;
327
328 opcode = (cmd >> 12) & 0xFF;
329 channel_id = cmd & 0x03;
330 channel = &d->channel[channel_id];
331
332 switch(opcode) {
333 case 0x10:
334 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
335 channel->status = 0;
336 break;
337 case 0x00:
338 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
339 channel->status = 1;
340 break;
341 default:
342 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
343 }
344 }
345
346 /* Handle TPU commands for chip mode 1 (7200) */
347 static void tpu_cm1_handle_cmd(struct mueslix_data *d,u_int cmd)
348 {
349 struct mueslix_channel *channel;
350 u_int opcode,channel_id;
351
352 opcode = (cmd >> 12) & 0xFF;
353 channel_id = cmd & 0x03;
354 channel = &d->channel[channel_id];
355
356 switch(opcode) {
357 case 0x50:
358 case 0x30:
359 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
360 channel->status = 0;
361 break;
362 case 0x00:
363 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
364 channel->status = 1;
365 break;
366 default:
367 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
368 }
369 }
370
371 /*
372 * dev_mueslix_access()
373 */
374 void *dev_mueslix_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
375 u_int op_size,u_int op_type,m_uint64_t *data)
376 {
377 struct mueslix_data *d = dev->priv_data;
378 int i;
379
380 #if DEBUG_ACCESS >= 2
381 if (op_type == MTS_READ) {
382 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
383 offset,cpu_get_pc(cpu),op_size);
384 } else {
385 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
386 "val=0x%llx, size=%u\n",offset,cpu_get_pc(cpu),*data,op_size);
387 }
388 #endif
389
390 /* Returns 0 if we don't know the offset */
391 if (op_type == MTS_READ)
392 *data = 0x00000000;
393
394 /* Handle microcode access */
395 if ((offset >= MUESLIX_UCODE_OFFSET) &&
396 (offset < (MUESLIX_UCODE_OFFSET + MUESLIX_UCODE_LEN)))
397 return(d->ucode + offset - MUESLIX_UCODE_OFFSET);
398
399 /* Handle TPU XMem access */
400 if ((offset >= MUESLIX_XMEM_OFFSET) &&
401 (offset < (MUESLIX_XMEM_OFFSET + MUESLIX_XYMEM_LEN)))
402 return(d->xmem + offset - MUESLIX_XMEM_OFFSET);
403
404 /* Handle TPU YMem access */
405 if ((offset >= MUESLIX_YMEM_OFFSET) &&
406 (offset < (MUESLIX_YMEM_OFFSET + MUESLIX_XYMEM_LEN)))
407 return(d->ymem + offset - MUESLIX_YMEM_OFFSET);
408
409 /* Handle channel access */
410 for(i=0;i<MUESLIX_NR_CHANNELS;i++)
411 if ((offset >= channel_offset[i]) &&
412 (offset < (channel_offset[i] + MUESLIX_CHANNEL_LEN)))
413 {
414 MUESLIX_LOCK(d);
415 dev_mueslix_chan_access(cpu,&d->channel[i],
416 offset - channel_offset[i],
417 op_size,op_type,data);
418 MUESLIX_UNLOCK(d);
419 return NULL;
420 }
421
422 MUESLIX_LOCK(d);
423
424 /* Generic case */
425 switch(offset) {
426 /* this reg is accessed when an interrupt occurs */
427 case 0x0:
428 if (op_type == MTS_READ) {
429 *data = d->irq_status;
430 } else {
431 d->irq_status &= ~(*data);
432 dev_mueslix_update_irq_status(d);
433 }
434 break;
435
436 /* Maybe interrupt mask */
437 case 0x10:
438 if (op_type == MTS_READ) {
439 *data = d->irq_mask;
440 } else {
441 d->irq_mask = *data;
442 dev_mueslix_update_irq_status(d);
443 }
444 break;
445
446 case 0x14:
447 if (op_type == MTS_READ)
448 *data = d->channel_enable_mask;
449 else {
450 #if DEBUG_ACCESS
451 cpu_log(cpu,d->name,
452 "channel_enable_mask = 0x%5.5llx at pc=0x%llx\n",
453 *data,cpu_get_pc(cpu));
454 #endif
455 d->channel_enable_mask = *data;
456 }
457 break;
458
459 case 0x18:
460 if (op_type == MTS_READ)
461 *data = 0x7F7F7F7F;
462 break;
463
464 case 0x48:
465 if (op_type == MTS_READ)
466 *data = 0x00000000;
467 break;
468
469 case 0x7c:
470 if (op_type == MTS_READ)
471 *data = 0x492;
472 break;
473
474 case 0x2c00:
475 if (op_type == MTS_READ)
476 *data = d->tpu_options;
477 else
478 d->tpu_options = *data;
479 break;
480
481 /* cmd reg */
482 case MUESLIX_TPU_CMD_OFFSET:
483 #if DEBUG_ACCESS
484 if (op_type == MTS_WRITE) {
485 cpu_log(cpu,d->name,"cmd_reg = 0x%5.5llx at pc=0x%llx\n",
486 *data,cpu_get_pc(cpu));
487 }
488 #endif
489 switch(d->chip_mode) {
490 case 0: /* 3600 */
491 tpu_cm0_handle_cmd(d,*data);
492 break;
493 case 1: /* 7200 */
494 tpu_cm1_handle_cmd(d,*data);
495 break;
496 }
497 break;
498
499 /*
500 * cmd_rsp reg, it seems that 0xFFFF means OK
501 * (seen on a "sh contr se1/0" with "debug serial mueslix" enabled).
502 */
503 case MUESLIX_TPU_CMD_RSP_OFFSET:
504 if (op_type == MTS_READ)
505 *data = 0xFFFF;
506 break;
507
508 #if DEBUG_UNKNOWN
509 default:
510 if (op_type == MTS_READ) {
511 cpu_log(cpu,d->name,
512 "read from unknown addr 0x%x, pc=0x%llx (size=%u)\n",
513 offset,cpu_get_pc(cpu),op_size);
514 } else {
515 cpu_log(cpu,d->name,
516 "write to unknown addr 0x%x, value=0x%llx, "
517 "pc=0x%llx (size=%u)\n",
518 offset,*data,cpu_get_pc(cpu),op_size);
519 }
520 #endif
521 }
522
523 MUESLIX_UNLOCK(d);
524 return NULL;
525 }
526
527 /*
528 * Get the address of the next RX descriptor.
529 */
530 static m_uint32_t rxdesc_get_next(struct mueslix_channel *channel,
531 m_uint32_t rxd_addr)
532 {
533 m_uint32_t nrxd_addr;
534
535 switch(channel->parent->chip_mode) {
536 case 0:
537 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
538 if (nrxd_addr == channel->rx_end)
539 nrxd_addr = channel->rx_start;
540 break;
541
542 case 1:
543 default:
544 if (rxd_addr == channel->rx_end)
545 nrxd_addr = channel->rx_start;
546 else
547 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
548 break;
549 }
550
551 return(nrxd_addr);
552 }
553
554 /* Read an RX descriptor */
555 static void rxdesc_read(struct mueslix_data *d,m_uint32_t rxd_addr,
556 struct rx_desc *rxd)
557 {
558 #if DEBUG_RECEIVE
559 MUESLIX_LOG(d,"reading RX descriptor at address 0x%x\n",rxd_addr);
560 #endif
561
562 /* get the next descriptor from VM physical RAM */
563 physmem_copy_from_vm(d->vm,rxd,rxd_addr,sizeof(struct rx_desc));
564
565 /* byte-swapping */
566 rxd->rdes[0] = vmtoh32(rxd->rdes[0]);
567 rxd->rdes[1] = vmtoh32(rxd->rdes[1]);
568 }
569
570 /*
571 * Try to acquire the specified RX descriptor. Returns TRUE if we have it.
572 * It assumes that the byte-swapping is done.
573 */
574 static inline int rxdesc_acquire(m_uint32_t rdes0)
575 {
576 return(rdes0 & MUESLIX_RXDESC_OWN);
577 }
578
579 /* Put a packet in buffer of a descriptor */
580 static ssize_t rxdesc_put_pkt(struct mueslix_data *d,struct rx_desc *rxd,
581 u_char **pkt,ssize_t *pkt_len)
582 {
583 ssize_t len,cp_len;
584
585 len = rxd->rdes[0] & MUESLIX_RXDESC_LEN_MASK;
586
587 /* compute the data length to copy */
588 cp_len = m_min(len,*pkt_len);
589
590 #if DEBUG_RECEIVE
591 MUESLIX_LOG(d,"copying %d bytes at 0x%x\n",cp_len,rxd->rdes[1]);
592 #endif
593
594 /* copy packet data to the VM physical RAM */
595 physmem_copy_to_vm(d->vm,*pkt,rxd->rdes[1],cp_len);
596
597 *pkt += cp_len;
598 *pkt_len -= cp_len;
599 return(cp_len);
600 }
601
602 /*
603 * Put a packet in the RX ring of the Mueslix specified channel.
604 */
605 static void dev_mueslix_receive_pkt(struct mueslix_channel *channel,
606 u_char *pkt,ssize_t pkt_len)
607 {
608 struct mueslix_data *d = channel->parent;
609 m_uint32_t rx_start,rxdn_addr,rxdn_rdes0;
610 struct rx_desc rxd0,rxdn,*rxdc;
611 ssize_t cp_len,tot_len = pkt_len;
612 u_char *pkt_ptr = pkt;
613 int i;
614
615 if ((channel->rx_start == 0) || (channel->status == 0) ||
616 (channel->nio == NULL))
617 return;
618
619 /* Don't make anything if RX is not enabled for this channel */
620 if (!(dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE))
621 return;
622
623 /* Truncate the packet if it is too big */
624 pkt_len = m_min(pkt_len,MUESLIX_MAX_PKT_SIZE);
625
626 /* Copy the current rxring descriptor */
627 rxdesc_read(d,channel->rx_current,&rxd0);
628
629 /* We must have the first descriptor... */
630 if (!rxdesc_acquire(rxd0.rdes[0]))
631 return;
632
633 /* Remember the first RX descriptor address */
634 rx_start = channel->rx_current;
635
636 for(i=0,rxdc=&rxd0;tot_len>0;i++)
637 {
638 /* Put data into the descriptor buffers */
639 cp_len = rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
640
641 /* Get address of the next descriptor */
642 rxdn_addr = rxdesc_get_next(channel,channel->rx_current);
643
644 /* We have finished if the complete packet has been stored */
645 if (tot_len == 0) {
646 rxdc->rdes[0] = MUESLIX_RXDESC_LS;
647 rxdc->rdes[0] |= cp_len + channel->crc_size + 1;
648
649 if (i != 0)
650 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
651
652 channel->rx_current = rxdn_addr;
653 break;
654 }
655
656 #if DEBUG_RECEIVE
657 MUESLIX_LOG(d,"trying to acquire new descriptor at 0x%x\n",rxdn_addr);
658 #endif
659
660 /* Get status of the next descriptor to see if we can acquire it */
661 rxdn_rdes0 = physmem_copy_u32_from_vm(d->vm,rxdn_addr);
662
663 if (!rxdesc_acquire(rxdn_rdes0))
664 rxdc->rdes[0] = MUESLIX_RXDESC_LS | MUESLIX_RXDESC_OVERRUN;
665 else
666 rxdc->rdes[0] = 0x00000000; /* ok, no special flag */
667
668 rxdc->rdes[0] |= cp_len;
669
670 /* Update the new status (only if we are not on the first desc) */
671 if (i != 0)
672 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
673
674 /* Update the RX pointer */
675 channel->rx_current = rxdn_addr;
676
677 if (rxdc->rdes[0] & MUESLIX_RXDESC_LS)
678 break;
679
680 /* Read the next descriptor from VM physical RAM */
681 rxdesc_read(d,rxdn_addr,&rxdn);
682 rxdc = &rxdn;
683 }
684
685 /* Update the first RX descriptor */
686 rxd0.rdes[0] |= MUESLIX_RXDESC_FS;
687 physmem_copy_u32_to_vm(d->vm,rx_start,rxd0.rdes[0]);
688
689 /* Indicate that we have a frame ready (XXX something to do ?) */
690
691 /* Generate IRQ on CPU */
692 d->irq_status |= MUESLIX_RX_IRQ << channel->id;
693 dev_mueslix_update_irq_status(d);
694 }
695
696 /* Handle the Mueslix RX ring of the specified channel */
697 static int dev_mueslix_handle_rxring(netio_desc_t *nio,
698 u_char *pkt,ssize_t pkt_len,
699 struct mueslix_channel *channel)
700 {
701 struct mueslix_data *d = channel->parent;
702
703 #if DEBUG_RECEIVE
704 MUESLIX_LOG(d,"channel %u: receiving a packet of %d bytes\n",
705 channel->id,pkt_len);
706 mem_dump(log_file,pkt,pkt_len);
707 #endif
708
709 MUESLIX_LOCK(d);
710 if (dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE)
711 dev_mueslix_receive_pkt(channel,pkt,pkt_len);
712 MUESLIX_UNLOCK(d);
713 return(TRUE);
714 }
715
716 /* Read a TX descriptor */
717 static void txdesc_read(struct mueslix_data *d,m_uint32_t txd_addr,
718 struct tx_desc *txd)
719 {
720 /* get the next descriptor from VM physical RAM */
721 physmem_copy_from_vm(d->vm,txd,txd_addr,sizeof(struct tx_desc));
722
723 /* byte-swapping */
724 txd->tdes[0] = vmtoh32(txd->tdes[0]);
725 txd->tdes[1] = vmtoh32(txd->tdes[1]);
726 }
727
728 /* Set the address of the next TX descriptor */
729 static void txdesc_set_next(struct mueslix_channel *channel)
730 {
731 switch(channel->parent->chip_mode) {
732 case 0:
733 channel->tx_current += sizeof(struct tx_desc);
734
735 if (channel->tx_current == channel->tx_end)
736 channel->tx_current = channel->tx_start;
737 break;
738
739 case 1:
740 default:
741 if (channel->tx_current == channel->tx_end)
742 channel->tx_current = channel->tx_start;
743 else
744 channel->tx_current += sizeof(struct tx_desc);
745 }
746 }
747
748 /* Handle the TX ring of a specific channel (single packet) */
749 static int dev_mueslix_handle_txring_single(struct mueslix_channel *channel)
750 {
751 struct mueslix_data *d = channel->parent;
752 u_char pkt[MUESLIX_MAX_PKT_SIZE],*pkt_ptr;
753 m_uint32_t tx_start,clen,sub_len,tot_len,pad;
754 struct tx_desc txd0,ctxd,*ptxd;
755 int done = FALSE;
756
757 if ((channel->tx_start == 0) || (channel->status == 0))
758 return(FALSE);
759
760 /* Copy the current txring descriptor */
761 tx_start = channel->tx_current;
762 ptxd = &txd0;
763 txdesc_read(d,channel->tx_current,ptxd);
764
765 /* If we don't own the descriptor, we cannot transmit */
766 if (!(txd0.tdes[0] & MUESLIX_TXDESC_OWN))
767 return(FALSE);
768
769 #if DEBUG_TRANSMIT
770 MUESLIX_LOG(d,"mueslix_handle_txring: 1st desc: "
771 "tdes[0]=0x%x, tdes[1]=0x%x\n",
772 ptxd->tdes[0],ptxd->tdes[1]);
773 #endif
774
775 pkt_ptr = pkt;
776 tot_len = 0;
777
778 do {
779 #if DEBUG_TRANSMIT
780 MUESLIX_LOG(d,"mueslix_handle_txring: loop: "
781 "tdes[0]=0x%x, tdes[1]=0x%x\n",
782 ptxd->tdes[0],ptxd->tdes[1]);
783 #endif
784
785 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_OWN)) {
786 MUESLIX_LOG(d,"mueslix_handle_txring: descriptor not owned!\n");
787 return(FALSE);
788 }
789
790 switch(channel->parent->chip_mode) {
791 case 0:
792 clen = ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK;
793 break;
794
795 case 1:
796 default:
797 clen = (ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK) << 2;
798
799 if (ptxd->tdes[0] & MUESLIX_TXDESC_SUB) {
800 sub_len = ptxd->tdes[0] & MUESLIX_TXDESC_SUB_LEN;
801 sub_len >>= MUESLIX_TXDESC_SUB_SHIFT;
802 clen -= sub_len;
803 }
804 }
805
806 /* Be sure that we have length not null */
807 if (clen != 0) {
808 //printf("pkt_ptr = %p, ptxd->tdes[1] = 0x%x, clen = %d\n",
809 // pkt_ptr, ptxd->tdes[1], clen);
810 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tdes[1],clen);
811 }
812
813 pkt_ptr += clen;
814 tot_len += clen;
815
816 /* Clear the OWN bit if this is not the first descriptor */
817 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_FS))
818 physmem_copy_u32_to_vm(d->vm,channel->tx_current,0);
819
820 /* Go to the next descriptor */
821 txdesc_set_next(channel);
822
823 /* Copy the next txring descriptor */
824 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_LS)) {
825 txdesc_read(d,channel->tx_current,&ctxd);
826 ptxd = &ctxd;
827 } else
828 done = TRUE;
829 }while(!done);
830
831 if (tot_len != 0) {
832 #if DEBUG_TRANSMIT
833 MUESLIX_LOG(d,"sending packet of %u bytes (flags=0x%4.4x)\n",
834 tot_len,txd0.tdes[0]);
835 mem_dump(log_file,pkt,tot_len);
836 #endif
837
838 pad = ptxd->tdes[0] & MUESLIX_TXDESC_PAD;
839 pad >>= MUESLIX_TXDESC_PAD_SHIFT;
840 tot_len -= (4 - pad) & 0x03;
841
842 /* send it on wire */
843 netio_send(channel->nio,pkt,tot_len);
844 }
845
846 /* Clear the OWN flag of the first descriptor */
847 physmem_copy_u32_to_vm(d->vm,tx_start,0);
848
849 /* Interrupt on completion ? */
850 d->irq_status |= MUESLIX_TX_IRQ << channel->id;
851 dev_mueslix_update_irq_status(d);
852 return(TRUE);
853 }
854
855 /* Handle the TX ring of a specific channel */
856 static int dev_mueslix_handle_txring(struct mueslix_channel *channel)
857 {
858 struct mueslix_data *d = channel->parent;
859 int res,i;
860
861 if (!dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_TX_ENABLE)
862 return(FALSE);
863
864 for(i=0;i<MUESLIX_TXRING_PASS_COUNT;i++) {
865 MUESLIX_LOCK(d);
866 res = dev_mueslix_handle_txring_single(channel);
867 MUESLIX_UNLOCK(d);
868
869 if (!res)
870 break;
871 }
872
873 return(TRUE);
874 }
875
876 /* pci_mueslix_read() */
877 static m_uint32_t pci_mueslix_read(cpu_gen_t *cpu,struct pci_device *dev,
878 int reg)
879 {
880 struct mueslix_data *d = dev->priv_data;
881
882 switch(reg) {
883 case 0x08: /* Rev ID */
884 return(0x2800001);
885 case PCI_REG_BAR0:
886 return(d->dev->phys_addr);
887 default:
888 return(0);
889 }
890 }
891
892 /* pci_mueslix_write() */
893 static void pci_mueslix_write(cpu_gen_t *cpu,struct pci_device *dev,
894 int reg,m_uint32_t value)
895 {
896 struct mueslix_data *d = dev->priv_data;
897
898 switch(reg) {
899 case PCI_REG_BAR0:
900 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
901 MUESLIX_LOG(d,"registers are mapped at 0x%x\n",value);
902 break;
903 }
904 }
905
906 /* Initialize a Mueslix chip */
907 struct mueslix_data *
908 dev_mueslix_init(vm_instance_t *vm,char *name,int chip_mode,
909 struct pci_bus *pci_bus,int pci_device,int irq)
910 {
911 struct pci_device *pci_dev;
912 struct mueslix_data *d;
913 struct vdevice *dev;
914 int i;
915
916 /* Allocate the private data structure for Mueslix chip */
917 if (!(d = malloc(sizeof(*d)))) {
918 fprintf(stderr,"%s (Mueslix): out of memory\n",name);
919 return NULL;
920 }
921
922 memset(d,0,sizeof(*d));
923 pthread_mutex_init(&d->lock,NULL);
924 d->chip_mode = chip_mode;
925
926 for(i=0;i<MUESLIX_NR_CHANNELS;i++) {
927 d->channel[i].id = i;
928 d->channel[i].parent = d;
929 }
930
931 /* Add as PCI device */
932 pci_dev = pci_dev_add(pci_bus,name,
933 MUESLIX_PCI_VENDOR_ID,MUESLIX_PCI_PRODUCT_ID,
934 pci_device,0,irq,
935 d,NULL,pci_mueslix_read,pci_mueslix_write);
936
937 if (!pci_dev) {
938 fprintf(stderr,"%s (Mueslix): unable to create PCI device.\n",name);
939 return NULL;
940 }
941
942 /* Create the device itself */
943 if (!(dev = dev_create(name))) {
944 fprintf(stderr,"%s (Mueslix): unable to create device.\n",name);
945 return NULL;
946 }
947
948 d->name = name;
949 d->pci_dev = pci_dev;
950 d->vm = vm;
951
952 dev->phys_addr = 0;
953 dev->phys_len = 0x4000;
954 dev->handler = dev_mueslix_access;
955 dev->priv_data = d;
956
957 /* Store device info */
958 dev->priv_data = d;
959 d->dev = dev;
960 return(d);
961 }
962
963 /* Remove a Mueslix device */
964 void dev_mueslix_remove(struct mueslix_data *d)
965 {
966 if (d != NULL) {
967 pci_dev_remove(d->pci_dev);
968 vm_unbind_device(d->vm,d->dev);
969 cpu_group_rebuild_mts(d->vm->cpu_group);
970 free(d->dev);
971 free(d);
972 }
973 }
974
975 /* Bind a NIO to a Mueslix channel */
976 int dev_mueslix_set_nio(struct mueslix_data *d,u_int channel_id,
977 netio_desc_t *nio)
978 {
979 struct mueslix_channel *channel;
980
981 if (channel_id >= MUESLIX_NR_CHANNELS)
982 return(-1);
983
984 channel = &d->channel[channel_id];
985
986 /* check that a NIO is not already bound */
987 if (channel->nio != NULL)
988 return(-1);
989
990 /* define the new NIO */
991 channel->nio = nio;
992 channel->tx_tid = ptask_add((ptask_callback)dev_mueslix_handle_txring,
993 channel,NULL);
994 netio_rxl_add(nio,(netio_rx_handler_t)dev_mueslix_handle_rxring,
995 channel,NULL);
996 return(0);
997 }
998
999 /* Unbind a NIO from a Mueslix channel */
1000 int dev_mueslix_unset_nio(struct mueslix_data *d,u_int channel_id)
1001 {
1002 struct mueslix_channel *channel;
1003
1004 if (channel_id >= MUESLIX_NR_CHANNELS)
1005 return(-1);
1006
1007 channel = &d->channel[channel_id];
1008
1009 if (channel->nio) {
1010 ptask_remove(channel->tx_tid);
1011 netio_rxl_remove(channel->nio);
1012 channel->nio = NULL;
1013 }
1014 return(0);
1015 }

  ViewVC Help
Powered by ViewVC 1.1.26