30 |
/* Debugging flags */ |
/* Debugging flags */ |
31 |
#define DEBUG_UNKNOWN 0 |
#define DEBUG_UNKNOWN 0 |
32 |
#define DEBUG_DMA 0 |
#define DEBUG_DMA 0 |
33 |
|
#define DEBUG_SDMA 0 |
34 |
|
#define DEBUG_MPSC 0 |
35 |
#define DEBUG_MII 0 |
#define DEBUG_MII 0 |
36 |
#define DEBUG_ETH_TX 0 |
#define DEBUG_ETH_TX 0 |
37 |
#define DEBUG_ETH_RX 0 |
#define DEBUG_ETH_RX 0 |
44 |
#define PCI_PRODUCT_GALILEO_GT64120 0x4620 /* GT-64120 */ |
#define PCI_PRODUCT_GALILEO_GT64120 0x4620 /* GT-64120 */ |
45 |
#define PCI_PRODUCT_GALILEO_GT96100 0x9653 /* GT-96100 */ |
#define PCI_PRODUCT_GALILEO_GT96100 0x9653 /* GT-96100 */ |
46 |
|
|
47 |
/* === Global definitions === */ |
/* === Global definitions ================================================= */ |
48 |
|
|
49 |
/* Interrupt High Cause Register */ |
/* Interrupt High Cause Register */ |
50 |
#define GT_IHCR_ETH0_SUM 0x00000001 |
#define GT_IHCR_ETH0_SUM 0x00000001 |
55 |
#define GT_SCR_ETH0_SUM 0x00000001 |
#define GT_SCR_ETH0_SUM 0x00000001 |
56 |
#define GT_SCR_ETH1_SUM 0x00000002 |
#define GT_SCR_ETH1_SUM 0x00000002 |
57 |
#define GT_SCR_SDMA_SUM 0x00000010 |
#define GT_SCR_SDMA_SUM 0x00000010 |
58 |
|
#define GT_SCR_SDMA0_SUM 0x00000100 |
59 |
|
#define GT_SCR_MPSC0_SUM 0x00000200 |
60 |
|
|
61 |
/* === DMA definitions === */ |
/* === DMA definitions ==================================================== */ |
62 |
#define GT_DMA_CHANNELS 4 |
#define GT_DMA_CHANNELS 4 |
63 |
|
|
64 |
#define GT_DMA_FLYBY_ENABLE 0x00000001 /* FlyBy Enable */ |
#define GT_DMA_FLYBY_ENABLE 0x00000001 /* FlyBy Enable */ |
93 |
m_uint32_t ctrl; |
m_uint32_t ctrl; |
94 |
}; |
}; |
95 |
|
|
96 |
/* === Ethernet definitions === */ |
/* === Serial DMA (SDMA) ================================================== */ |
97 |
|
|
98 |
|
/* SDMA: 2 groups of 8 channels */ |
99 |
|
#define GT_SDMA_CHANNELS 8 |
100 |
|
#define GT_SDMA_GROUPS 2 |
101 |
|
|
102 |
|
/* SDMA channel */ |
103 |
|
struct sdma_channel { |
104 |
|
u_int id; |
105 |
|
|
106 |
|
m_uint32_t sdc; |
107 |
|
m_uint32_t sdcm; |
108 |
|
m_uint32_t rx_desc; |
109 |
|
m_uint32_t rx_buf_ptr; |
110 |
|
m_uint32_t scrdp; |
111 |
|
m_uint32_t tx_desc; |
112 |
|
m_uint32_t sctdp; |
113 |
|
m_uint32_t sftdp; |
114 |
|
}; |
115 |
|
|
116 |
|
/* SGCR: SDMA Group Register */ |
117 |
|
#define GT_REG_SGC 0x101af0 |
118 |
|
|
119 |
|
/* SDMA cause register: 8 fields (1 for each channel) of 4 bits */ |
120 |
|
#define GT_SDMA_CAUSE_RXBUF0 0x01 |
121 |
|
#define GT_SDMA_CAUSE_RXERR0 0x02 |
122 |
|
#define GT_SDMA_CAUSE_TXBUF0 0x04 |
123 |
|
#define GT_SDMA_CAUSE_TXEND0 0x08 |
124 |
|
|
125 |
|
/* SDMA channel register offsets */ |
126 |
|
#define GT_SDMA_SDC 0x000900 /* Configuration Register */ |
127 |
|
#define GT_SDMA_SDCM 0x000908 /* Command Register */ |
128 |
|
#define GT_SDMA_RX_DESC 0x008900 /* RX descriptor */ |
129 |
|
#define GT_SDMA_SCRDP 0x008910 /* Current RX descriptor */ |
130 |
|
#define GT_SDMA_TX_DESC 0x00c900 /* TX descriptor */ |
131 |
|
#define GT_SDMA_SCTDP 0x00c910 /* Current TX desc. pointer */ |
132 |
|
#define GT_SDMA_SFTDP 0x00c914 /* First TX desc. pointer */ |
133 |
|
|
134 |
|
/* SDMA RX/TX descriptor */ |
135 |
|
struct sdma_desc { |
136 |
|
m_uint32_t buf_size; |
137 |
|
m_uint32_t cmd_stat; |
138 |
|
m_uint32_t next_ptr; |
139 |
|
m_uint32_t buf_ptr; |
140 |
|
}; |
141 |
|
|
142 |
|
/* SDMA Descriptor Command/Status word */ |
143 |
|
#define GT_SDMA_CMD_O 0x80000000 /* Owner bit */ |
144 |
|
#define GT_SDMA_CMD_AM 0x40000000 /* Auto-mode */ |
145 |
|
#define GT_SDMA_CMD_EI 0x00800000 /* Enable Interrupt */ |
146 |
|
#define GT_SDMA_CMD_F 0x00020000 /* First buffer */ |
147 |
|
#define GT_SDMA_CMD_L 0x00010000 /* Last buffer */ |
148 |
|
|
149 |
|
/* SDCR: SDMA Configuration Register */ |
150 |
|
#define GT_SDCR_RFT 0x00000001 /* Receive FIFO Threshold */ |
151 |
|
#define GT_SDCR_SFM 0x00000002 /* Single Frame Mode */ |
152 |
|
#define GT_SDCR_RC 0x0000003c /* Retransmit count */ |
153 |
|
#define GT_SDCR_BLMR 0x00000040 /* Big/Little Endian RX mode */ |
154 |
|
#define GT_SDCR_BLMT 0x00000080 /* Big/Litlle Endian TX mode */ |
155 |
|
#define GT_SDCR_POVR 0x00000100 /* PCI override */ |
156 |
|
#define GT_SDCR_RIFB 0x00000200 /* RX IRQ on frame boundary */ |
157 |
|
#define GT_SDCR_BSZ 0x00003000 /* Burst size */ |
158 |
|
|
159 |
|
/* SDCMR: SDMA Command Register */ |
160 |
|
#define GT_SDCMR_ERD 0x00000080 /* Enable RX DMA */ |
161 |
|
#define GT_SDCMR_AR 0x00008000 /* Abort Receive */ |
162 |
|
#define GT_SDCMR_STD 0x00010000 /* Stop TX */ |
163 |
|
#define GT_SDCMR_STDH GT_SDCMR_STD /* Stop TX High */ |
164 |
|
#define GT_SDCMR_STDL 0x00020000 /* Stop TX Low */ |
165 |
|
#define GT_SDCMR_TXD 0x00800000 /* TX Demand */ |
166 |
|
#define GT_SDCMR_TXDH GT_SDCMR_TXD /* Start TX High */ |
167 |
|
#define GT_SDCMR_TXDL 0x01000000 /* Start TX Low */ |
168 |
|
#define GT_SDCMR_AT 0x80000000 /* Abort Transmit */ |
169 |
|
|
170 |
|
/* === MultiProtocol Serial Controller (MPSC) ============================= */ |
171 |
|
|
172 |
|
/* 8 MPSC channels */ |
173 |
|
#define GT_MPSC_CHANNELS 8 |
174 |
|
|
175 |
|
/* MPSC channel */ |
176 |
|
struct mpsc_channel { |
177 |
|
m_uint32_t mmcrl; |
178 |
|
m_uint32_t mmcrh; |
179 |
|
m_uint32_t mpcr; |
180 |
|
m_uint32_t chr[10]; |
181 |
|
|
182 |
|
vtty_t *vtty; |
183 |
|
netio_desc_t *nio; |
184 |
|
}; |
185 |
|
|
186 |
|
#define GT_MPSC_MMCRL 0x000A00 /* Main Config Register Low */ |
187 |
|
#define GT_MPSC_MMCRH 0x000A04 /* Main Config Register High */ |
188 |
|
#define GT_MPSC_MPCR 0x000A08 /* Protocol Config Register */ |
189 |
|
#define GT_MPSC_CHR1 0x000A0C |
190 |
|
#define GT_MPSC_CHR2 0x000A10 |
191 |
|
#define GT_MPSC_CHR3 0x000A14 |
192 |
|
#define GT_MPSC_CHR4 0x000A18 |
193 |
|
#define GT_MPSC_CHR5 0x000A1C |
194 |
|
#define GT_MPSC_CHR6 0x000A20 |
195 |
|
#define GT_MPSC_CHR7 0x000A24 |
196 |
|
#define GT_MPSC_CHR8 0x000A28 |
197 |
|
#define GT_MPSC_CHR9 0x000A2C |
198 |
|
#define GT_MPSC_CHR10 0x000A30 |
199 |
|
|
200 |
|
#define GT_MMCRL_MODE_MASK 0x0000007 |
201 |
|
|
202 |
|
#define GT_MPSC_MODE_HDLC 0 |
203 |
|
#define GT_MPSC_MODE_UART 4 |
204 |
|
#define GT_MPSC_MODE_BISYNC 5 |
205 |
|
|
206 |
|
/* === Ethernet definitions =============================================== */ |
207 |
#define GT_ETH_PORTS 2 |
#define GT_ETH_PORTS 2 |
208 |
#define GT_MAX_PKT_SIZE 2048 |
#define GT_MAX_PKT_SIZE 2048 |
209 |
|
|
263 |
#define GT_PSR_TXHIGH 0x00000040 /* TX High priority status */ |
#define GT_PSR_TXHIGH 0x00000040 /* TX High priority status */ |
264 |
#define GT_PSR_TXINP 0x00000080 /* TX in Progress */ |
#define GT_PSR_TXINP 0x00000080 /* TX in Progress */ |
265 |
|
|
|
/* SDCR: SDMA Configuration Register */ |
|
|
#define GT_SDCR_RC 0x0000003c /* Retransmit count */ |
|
|
#define GT_SDCR_BLMR 0x00000040 /* Big/Little Endian RX mode */ |
|
|
#define GT_SDCR_BLMT 0x00000080 /* Big/Litlle Endian TX mode */ |
|
|
#define GT_SDCR_POVR 0x00000100 /* PCI override */ |
|
|
#define GT_SDCR_RIFB 0x00000200 /* RX IRQ on frame boundary */ |
|
|
#define GT_SDCR_BSZ 0x00003000 /* Burst size */ |
|
|
|
|
|
/* SDCMR: SDMA Command Register */ |
|
|
#define GT_SDCMR_ERD 0x00000080 /* Enable RX DMA */ |
|
|
#define GT_SDCMR_AR 0x00008000 /* Abort Receive */ |
|
|
#define GT_SDCMR_STDH 0x00010000 /* Stop TX High */ |
|
|
#define GT_SDCMR_STDL 0x00020000 /* Stop TX Low */ |
|
|
#define GT_SDCMR_TXDH 0x00800000 /* Start TX High */ |
|
|
#define GT_SDCMR_TXDL 0x01000000 /* Start TX Low */ |
|
|
#define GT_SDCMR_AT 0x80000000 /* Abort Transmit */ |
|
|
|
|
266 |
/* ICR: Interrupt Cause Register */ |
/* ICR: Interrupt Cause Register */ |
267 |
#define GT_ICR_RXBUF 0x00000001 /* RX Buffer returned to host */ |
#define GT_ICR_RXBUF 0x00000001 /* RX Buffer returned to host */ |
268 |
#define GT_ICR_TXBUFH 0x00000004 /* TX Buffer High */ |
#define GT_ICR_TXBUFH 0x00000004 /* TX Buffer High */ |
340 |
#define GT_RXDESC_BS_MASK 0xFFFF0000 /* Buffer size */ |
#define GT_RXDESC_BS_MASK 0xFFFF0000 /* Buffer size */ |
341 |
#define GT_RXDESC_BS_SHIFT 16 |
#define GT_RXDESC_BS_SHIFT 16 |
342 |
|
|
|
/* RX/TX descriptor */ |
|
|
struct eth_desc { |
|
|
m_uint32_t buf_size; |
|
|
m_uint32_t cmd_stat; |
|
|
m_uint32_t next_ptr; |
|
|
m_uint32_t buf_ptr; |
|
|
}; |
|
|
|
|
343 |
/* Galileo Ethernet port */ |
/* Galileo Ethernet port */ |
344 |
struct eth_port { |
struct eth_port { |
345 |
netio_desc_t *nio; |
netio_desc_t *nio; |
366 |
m_uint32_t rx_bytes,tx_bytes,rx_frames,tx_frames; |
m_uint32_t rx_bytes,tx_bytes,rx_frames,tx_frames; |
367 |
}; |
}; |
368 |
|
|
369 |
|
/* ======================================================================== */ |
370 |
|
|
371 |
/* Galileo GT64xxx/GT96xxx system controller */ |
/* Galileo GT64xxx/GT96xxx system controller */ |
372 |
struct gt_data { |
struct gt_data { |
373 |
char *name; |
char *name; |
375 |
struct vdevice dev; |
struct vdevice dev; |
376 |
struct pci_device *pci_dev; |
struct pci_device *pci_dev; |
377 |
vm_instance_t *vm; |
vm_instance_t *vm; |
378 |
|
pthread_mutex_t lock; |
379 |
|
|
380 |
struct pci_bus *bus[2]; |
struct pci_bus *bus[2]; |
381 |
struct dma_channel dma[GT_DMA_CHANNELS]; |
struct dma_channel dma[GT_DMA_CHANNELS]; |
382 |
|
|
383 |
|
/* Interrupts (common) */ |
384 |
m_uint32_t int_cause_reg; |
m_uint32_t int_cause_reg; |
385 |
|
m_uint32_t int_high_cause_reg; |
386 |
m_uint32_t int_mask_reg; |
m_uint32_t int_mask_reg; |
387 |
|
|
388 |
/* Ethernet ports (GT-96100) */ |
/* Interrupts (GT96100) */ |
389 |
|
m_uint32_t int0_main_mask_reg,int0_high_mask_reg; |
390 |
|
m_uint32_t int1_main_mask_reg,int1_high_mask_reg; |
391 |
|
m_uint32_t ser_cause_reg; |
392 |
|
m_uint32_t serint0_mask_reg,serint1_mask_reg; |
393 |
|
u_int int0_irq,int1_irq,serint0_irq,serint1_irq; |
394 |
|
|
395 |
|
/* SDMA - Serial DMA (GT96100) */ |
396 |
|
m_uint32_t sgcr; |
397 |
|
m_uint32_t sdma_cause_reg,sdma_mask_reg; |
398 |
|
struct sdma_channel sdma[GT_SDMA_GROUPS][GT_SDMA_CHANNELS]; |
399 |
|
|
400 |
|
/* MPSC - MultiProtocol Serial Controller (GT96100) */ |
401 |
|
struct mpsc_channel mpsc[GT_MPSC_CHANNELS]; |
402 |
|
|
403 |
|
/* Ethernet ports (GT96100) */ |
404 |
u_int eth_irq; |
u_int eth_irq; |
405 |
ptask_id_t eth_tx_tid; |
ptask_id_t eth_tx_tid; |
406 |
struct eth_port eth_ports[GT_ETH_PORTS]; |
struct eth_port eth_ports[GT_ETH_PORTS]; |
407 |
m_uint32_t smi_reg; |
m_uint32_t smi_reg; |
408 |
m_uint16_t mii_regs[32][32]; |
m_uint16_t mii_regs[32][32]; |
409 |
|
|
410 |
|
/* IRQ status update */ |
411 |
|
void (*gt_update_irq_status)(struct gt_data *gt_data); |
412 |
}; |
}; |
413 |
|
|
414 |
|
#define GT_LOCK(d) pthread_mutex_lock(&(d)->lock) |
415 |
|
#define GT_UNLOCK(d) pthread_mutex_unlock(&(d)->lock) |
416 |
|
|
417 |
/* Log a GT message */ |
/* Log a GT message */ |
418 |
#define GT_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg) |
#define GT_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg) |
419 |
|
|
420 |
/* Update the interrupt status */ |
/* Update the interrupt status */ |
421 |
static void gt_update_irq_status(struct gt_data *gt_data) |
static void gt64k_update_irq_status(struct gt_data *gt_data) |
422 |
{ |
{ |
423 |
if (gt_data->pci_dev) { |
if (gt_data->pci_dev) { |
424 |
if (gt_data->int_cause_reg & gt_data->int_mask_reg) |
if (gt_data->int_cause_reg & gt_data->int_mask_reg) |
492 |
#endif |
#endif |
493 |
/* Trigger DMA interrupt */ |
/* Trigger DMA interrupt */ |
494 |
gt_data->int_cause_reg |= 1 << (4 + chan_id); |
gt_data->int_cause_reg |= 1 << (4 + chan_id); |
495 |
gt_update_irq_status(gt_data); |
gt_data->gt_update_irq_status(gt_data); |
496 |
} |
} |
497 |
} |
} |
498 |
|
|
499 |
#define DMA_REG(ch,reg_name) \ |
#define DMA_REG(ch,reg_name) \ |
500 |
if (op_type == MTS_WRITE) \ |
if (op_type == MTS_WRITE) \ |
501 |
gt_data->dma[ch].reg_name = swap32(*data); \ |
gt_data->dma[ch].reg_name = *data; \ |
502 |
else \ |
else \ |
503 |
*data = swap32(gt_data->dma[ch].reg_name); |
*data = gt_data->dma[ch].reg_name; |
504 |
|
|
505 |
/* Handle a DMA channel */ |
/* Handle a DMA channel */ |
506 |
static int gt_dma_access(cpu_gen_t *cpu,struct vdevice *dev, |
static int gt_dma_access(cpu_gen_t *cpu,struct vdevice *dev, |
580 |
{ |
{ |
581 |
struct gt_data *gt_data = dev->priv_data; |
struct gt_data *gt_data = dev->priv_data; |
582 |
|
|
583 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) { |
584 |
*data = 0; |
*data = 0; |
585 |
|
} else { |
586 |
|
*data = swap32(*data); |
587 |
|
} |
588 |
|
|
589 |
if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0) |
if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0) |
590 |
return NULL; |
goto done; |
591 |
|
|
592 |
switch(offset) { |
switch(offset) { |
593 |
/* ===== DRAM Settings (completely faked, 128 Mb) ===== */ |
/* ===== DRAM Settings (completely faked, 128 Mb) ===== */ |
594 |
case 0x008: /* ras10_low */ |
case 0x008: /* ras10_low */ |
595 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
596 |
*data = swap32(0x000); |
*data = 0x000; |
597 |
break; |
break; |
598 |
case 0x010: /* ras10_high */ |
case 0x010: /* ras10_high */ |
599 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
600 |
*data = swap32(0x7F); |
*data = 0x7F; |
601 |
break; |
break; |
602 |
case 0x018: /* ras32_low */ |
case 0x018: /* ras32_low */ |
603 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
604 |
*data = swap32(0x080); |
*data = 0x080; |
605 |
break; |
break; |
606 |
case 0x020: /* ras32_high */ |
case 0x020: /* ras32_high */ |
607 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
608 |
*data = swap32(0x7F); |
*data = 0x7F; |
609 |
break; |
break; |
610 |
case 0x400: /* ras0_low */ |
case 0x400: /* ras0_low */ |
611 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
612 |
*data = swap32(0x00); |
*data = 0x00; |
613 |
break; |
break; |
614 |
case 0x404: /* ras0_high */ |
case 0x404: /* ras0_high */ |
615 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
616 |
*data = swap32(0xFF); |
*data = 0xFF; |
617 |
break; |
break; |
618 |
case 0x408: /* ras1_low */ |
case 0x408: /* ras1_low */ |
619 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
620 |
*data = swap32(0x7F); |
*data = 0x7F; |
621 |
break; |
break; |
622 |
case 0x40c: /* ras1_high */ |
case 0x40c: /* ras1_high */ |
623 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
624 |
*data = swap32(0x00); |
*data = 0x00; |
625 |
break; |
break; |
626 |
case 0x410: /* ras2_low */ |
case 0x410: /* ras2_low */ |
627 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
628 |
*data = swap32(0x00); |
*data = 0x00; |
629 |
break; |
break; |
630 |
case 0x414: /* ras2_high */ |
case 0x414: /* ras2_high */ |
631 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
632 |
*data = swap32(0xFF); |
*data = 0xFF; |
633 |
break; |
break; |
634 |
case 0x418: /* ras3_low */ |
case 0x418: /* ras3_low */ |
635 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
636 |
*data = swap32(0x7F); |
*data = 0x7F; |
637 |
break; |
break; |
638 |
case 0x41c: /* ras3_high */ |
case 0x41c: /* ras3_high */ |
639 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
640 |
*data = swap32(0x00); |
*data = 0x00; |
641 |
break; |
break; |
642 |
case 0xc08: /* pci0_cs10 */ |
case 0xc08: /* pci0_cs10 */ |
643 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
644 |
*data = swap32(0xFFF); |
*data = 0xFFF; |
645 |
break; |
break; |
646 |
case 0xc0c: /* pci0_cs32 */ |
case 0xc0c: /* pci0_cs32 */ |
647 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
648 |
*data = swap32(0xFFF); |
*data = 0xFFF; |
649 |
break; |
break; |
650 |
|
|
651 |
case 0xc00: /* pci_cmd */ |
case 0xc00: /* pci_cmd */ |
652 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
653 |
*data = swap32(0x00008001); |
*data = 0x00008001; |
654 |
break; |
break; |
655 |
|
|
656 |
/* ===== Interrupt Cause Register ===== */ |
/* ===== Interrupt Cause Register ===== */ |
657 |
case 0xc18: |
case 0xc18: |
658 |
if (op_type == MTS_READ) { |
if (op_type == MTS_READ) { |
659 |
*data = swap32(gt_data->int_cause_reg); |
*data = gt_data->int_cause_reg; |
660 |
} else { |
} else { |
661 |
gt_data->int_cause_reg &= swap32(*data); |
gt_data->int_cause_reg &= *data; |
662 |
gt_update_irq_status(gt_data); |
gt64k_update_irq_status(gt_data); |
663 |
} |
} |
664 |
break; |
break; |
665 |
|
|
666 |
/* ===== Interrupt Mask Register ===== */ |
/* ===== Interrupt Mask Register ===== */ |
667 |
case 0xc1c: |
case 0xc1c: |
668 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
669 |
*data = swap32(gt_data->int_mask_reg); |
*data = gt_data->int_mask_reg; |
670 |
else { |
else { |
671 |
gt_data->int_mask_reg = swap32(*data); |
gt_data->int_mask_reg = *data; |
672 |
gt_update_irq_status(gt_data); |
gt64k_update_irq_status(gt_data); |
673 |
} |
} |
674 |
break; |
break; |
675 |
|
|
676 |
/* ===== PCI Configuration ===== */ |
/* ===== PCI Configuration ===== */ |
677 |
case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */ |
case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */ |
678 |
pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data); |
pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,FALSE,data); |
679 |
break; |
break; |
680 |
|
|
681 |
case PCI_BUS_DATA: /* pci data address (0xcfc) */ |
case PCI_BUS_DATA: /* pci data address (0xcfc) */ |
682 |
pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data); |
pci_dev_data_handler(cpu,gt_data->bus[0],op_type,FALSE,data); |
683 |
break; |
break; |
684 |
|
|
685 |
#if DEBUG_UNKNOWN |
#if DEBUG_UNKNOWN |
694 |
#endif |
#endif |
695 |
} |
} |
696 |
|
|
697 |
|
done: |
698 |
|
if (op_type == MTS_READ) |
699 |
|
*data = swap32(*data); |
700 |
return NULL; |
return NULL; |
701 |
} |
} |
702 |
|
|
708 |
{ |
{ |
709 |
struct gt_data *gt_data = dev->priv_data; |
struct gt_data *gt_data = dev->priv_data; |
710 |
|
|
711 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) { |
712 |
*data = 0; |
*data = 0; |
713 |
|
} else { |
714 |
|
*data = swap32(*data); |
715 |
|
} |
716 |
|
|
717 |
if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0) |
if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0) |
718 |
return NULL; |
goto done; |
719 |
|
|
720 |
switch(offset) { |
switch(offset) { |
721 |
case 0x008: /* ras10_low */ |
case 0x008: /* ras10_low */ |
722 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
723 |
*data = swap32(0x000); |
*data = 0x000; |
724 |
break; |
break; |
725 |
case 0x010: /* ras10_high */ |
case 0x010: /* ras10_high */ |
726 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
727 |
*data = swap32(0x7F); |
*data = 0x7F; |
728 |
break; |
break; |
729 |
case 0x018: /* ras32_low */ |
case 0x018: /* ras32_low */ |
730 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
731 |
*data = swap32(0x100); |
*data = 0x100; |
732 |
break; |
break; |
733 |
case 0x020: /* ras32_high */ |
case 0x020: /* ras32_high */ |
734 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
735 |
*data = swap32(0x7F); |
*data = 0x7F; |
736 |
break; |
break; |
737 |
case 0x400: /* ras0_low */ |
case 0x400: /* ras0_low */ |
738 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
739 |
*data = swap32(0x00); |
*data = 0x00; |
740 |
break; |
break; |
741 |
case 0x404: /* ras0_high */ |
case 0x404: /* ras0_high */ |
742 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
743 |
*data = swap32(0xFF); |
*data = 0xFF; |
744 |
break; |
break; |
745 |
case 0x408: /* ras1_low */ |
case 0x408: /* ras1_low */ |
746 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
747 |
*data = swap32(0x7F); |
*data = 0x7F; |
748 |
break; |
break; |
749 |
case 0x40c: /* ras1_high */ |
case 0x40c: /* ras1_high */ |
750 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
751 |
*data = swap32(0x00); |
*data = 0x00; |
752 |
break; |
break; |
753 |
case 0x410: /* ras2_low */ |
case 0x410: /* ras2_low */ |
754 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
755 |
*data = swap32(0x00); |
*data = 0x00; |
756 |
break; |
break; |
757 |
case 0x414: /* ras2_high */ |
case 0x414: /* ras2_high */ |
758 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
759 |
*data = swap32(0xFF); |
*data = 0xFF; |
760 |
break; |
break; |
761 |
case 0x418: /* ras3_low */ |
case 0x418: /* ras3_low */ |
762 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
763 |
*data = swap32(0x7F); |
*data = 0x7F; |
764 |
break; |
break; |
765 |
case 0x41c: /* ras3_high */ |
case 0x41c: /* ras3_high */ |
766 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
767 |
*data = swap32(0x00); |
*data = 0x00; |
768 |
break; |
break; |
769 |
case 0xc08: /* pci0_cs10 */ |
case 0xc08: /* pci0_cs10 */ |
770 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
771 |
*data = swap32(0xFFF); |
*data = 0xFFF; |
772 |
break; |
break; |
773 |
case 0xc0c: /* pci0_cs32 */ |
case 0xc0c: /* pci0_cs32 */ |
774 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
775 |
*data = swap32(0xFFF); |
*data = 0xFFF; |
776 |
break; |
break; |
777 |
|
|
778 |
case 0xc00: /* pci_cmd */ |
case 0xc00: /* pci_cmd */ |
779 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
780 |
*data = swap32(0x00008001); |
*data = 0x00008001; |
781 |
break; |
break; |
782 |
|
|
783 |
/* ===== Interrupt Cause Register ===== */ |
/* ===== Interrupt Cause Register ===== */ |
784 |
case 0xc18: |
case 0xc18: |
785 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
786 |
*data = swap32(gt_data->int_cause_reg); |
*data = gt_data->int_cause_reg; |
787 |
else { |
else { |
788 |
gt_data->int_cause_reg &= swap32(*data); |
gt_data->int_cause_reg &= *data; |
789 |
gt_update_irq_status(gt_data); |
gt64k_update_irq_status(gt_data); |
790 |
} |
} |
791 |
break; |
break; |
792 |
|
|
793 |
/* ===== Interrupt Mask Register ===== */ |
/* ===== Interrupt Mask Register ===== */ |
794 |
case 0xc1c: |
case 0xc1c: |
795 |
if (op_type == MTS_READ) { |
if (op_type == MTS_READ) { |
796 |
*data = swap32(gt_data->int_mask_reg); |
*data = gt_data->int_mask_reg; |
797 |
} else { |
} else { |
798 |
gt_data->int_mask_reg = swap32(*data); |
gt_data->int_mask_reg = *data; |
799 |
gt_update_irq_status(gt_data); |
gt64k_update_irq_status(gt_data); |
800 |
} |
} |
801 |
break; |
break; |
802 |
|
|
803 |
/* ===== PCI Bus 1 ===== */ |
/* ===== PCI Bus 1 ===== */ |
804 |
case 0xcf0: |
case 0xcf0: |
805 |
pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data); |
pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,FALSE,data); |
806 |
break; |
break; |
807 |
|
|
808 |
case 0xcf4: |
case 0xcf4: |
809 |
pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data); |
pci_dev_data_handler(cpu,gt_data->bus[1],op_type,FALSE,data); |
810 |
break; |
break; |
811 |
|
|
812 |
/* ===== PCI Bus 0 ===== */ |
/* ===== PCI Bus 0 ===== */ |
813 |
case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */ |
case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */ |
814 |
pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data); |
pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,FALSE,data); |
815 |
break; |
break; |
816 |
|
|
817 |
case PCI_BUS_DATA: /* pci data address (0xcfc) */ |
case PCI_BUS_DATA: /* pci data address (0xcfc) */ |
818 |
pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data); |
pci_dev_data_handler(cpu,gt_data->bus[0],op_type,FALSE,data); |
819 |
break; |
break; |
820 |
|
|
821 |
#if DEBUG_UNKNOWN |
#if DEBUG_UNKNOWN |
830 |
#endif |
#endif |
831 |
} |
} |
832 |
|
|
833 |
|
done: |
834 |
|
if (op_type == MTS_READ) |
835 |
|
*data = swap32(*data); |
836 |
return NULL; |
return NULL; |
837 |
} |
} |
838 |
|
|
839 |
|
/* ======================================================================== */ |
840 |
|
/* GT96k Interrupts */ |
841 |
|
/* ======================================================================== */ |
842 |
|
static void gt96k_update_irq_status(struct gt_data *d) |
843 |
|
{ |
844 |
|
/* Interrupt0* active ? */ |
845 |
|
if ((d->int_cause_reg & d->int0_main_mask_reg) || |
846 |
|
(d->int_high_cause_reg & d->int0_high_mask_reg)) |
847 |
|
{ |
848 |
|
d->int_cause_reg |= 1 << 30; |
849 |
|
vm_set_irq(d->vm,d->int0_irq); |
850 |
|
} |
851 |
|
else |
852 |
|
{ |
853 |
|
d->int_cause_reg &= ~(1 << 30); |
854 |
|
vm_clear_irq(d->vm,d->int0_irq); |
855 |
|
} |
856 |
|
|
857 |
|
/* Interrupt1* active ? */ |
858 |
|
if ((d->int_cause_reg & d->int1_main_mask_reg) || |
859 |
|
(d->int_high_cause_reg & d->int1_high_mask_reg)) |
860 |
|
{ |
861 |
|
d->int_cause_reg |= 1 << 31; |
862 |
|
vm_set_irq(d->vm,d->int1_irq); |
863 |
|
} |
864 |
|
else |
865 |
|
{ |
866 |
|
d->int_cause_reg &= ~(1 << 31); |
867 |
|
vm_clear_irq(d->vm,d->int1_irq); |
868 |
|
} |
869 |
|
|
870 |
|
/* SerInt0* active ? */ |
871 |
|
if (d->ser_cause_reg & d->serint0_mask_reg) { |
872 |
|
vm_set_irq(d->vm,d->serint0_irq); |
873 |
|
} else { |
874 |
|
vm_clear_irq(d->vm,d->serint0_irq); |
875 |
|
} |
876 |
|
|
877 |
|
/* SerInt1* active ? */ |
878 |
|
if (d->ser_cause_reg & d->serint1_mask_reg) { |
879 |
|
vm_set_irq(d->vm,d->serint1_irq); |
880 |
|
} else { |
881 |
|
vm_clear_irq(d->vm,d->serint1_irq); |
882 |
|
} |
883 |
|
} |
884 |
|
|
885 |
|
/* ======================================================================== */ |
886 |
|
/* SDMA (Serial DMA) */ |
887 |
|
/* ======================================================================== */ |
888 |
|
|
889 |
|
/* Update SDMA interrupt status */ |
890 |
|
static void gt_sdma_update_int_status(struct gt_data *d) |
891 |
|
{ |
892 |
|
/* Update general SDMA status */ |
893 |
|
if (d->sdma_cause_reg & d->sdma_mask_reg) { |
894 |
|
d->ser_cause_reg |= GT_SCR_SDMA_SUM; |
895 |
|
d->int_high_cause_reg |= GT_IHCR_SDMA_SUM; |
896 |
|
} else { |
897 |
|
d->ser_cause_reg &= ~GT_SCR_SDMA_SUM; |
898 |
|
d->int_high_cause_reg &= ~GT_IHCR_SDMA_SUM; |
899 |
|
} |
900 |
|
|
901 |
|
gt96k_update_irq_status(d); |
902 |
|
} |
903 |
|
|
904 |
|
/* Update SDMA interrupt status for the specified channel */ |
905 |
|
static void gt_sdma_update_channel_int_status(struct gt_data *d,u_int chan_id) |
906 |
|
{ |
907 |
|
m_uint32_t ch_st; |
908 |
|
|
909 |
|
/* Get the status of the specified SDMA channel */ |
910 |
|
ch_st = d->sdma_cause_reg & (0x0000000F << (chan_id << 2)); |
911 |
|
|
912 |
|
if (ch_st) |
913 |
|
d->ser_cause_reg |= GT_SCR_SDMA0_SUM << (chan_id << 1); |
914 |
|
else |
915 |
|
d->ser_cause_reg &= ~(GT_SCR_SDMA0_SUM << (chan_id << 1)); |
916 |
|
|
917 |
|
gt_sdma_update_int_status(d); |
918 |
|
} |
919 |
|
|
920 |
|
/* Set SDMA cause register for a channel */ |
921 |
|
static inline void gt_sdma_set_cause(struct gt_data *d,u_int chan_id, |
922 |
|
u_int value) |
923 |
|
{ |
924 |
|
d->sdma_cause_reg |= value << (chan_id << 2); |
925 |
|
} |
926 |
|
|
927 |
|
/* Read a SDMA descriptor from memory */ |
928 |
|
static void gt_sdma_desc_read(struct gt_data *d,m_uint32_t addr, |
929 |
|
struct sdma_desc *desc) |
930 |
|
{ |
931 |
|
physmem_copy_from_vm(d->vm,desc,addr,sizeof(struct sdma_desc)); |
932 |
|
|
933 |
|
/* byte-swapping */ |
934 |
|
desc->buf_size = vmtoh32(desc->buf_size); |
935 |
|
desc->cmd_stat = vmtoh32(desc->cmd_stat); |
936 |
|
desc->next_ptr = vmtoh32(desc->next_ptr); |
937 |
|
desc->buf_ptr = vmtoh32(desc->buf_ptr); |
938 |
|
} |
939 |
|
|
940 |
|
/* Write a SDMA descriptor to memory */ |
941 |
|
static void gt_sdma_desc_write(struct gt_data *d,m_uint32_t addr, |
942 |
|
struct sdma_desc *desc) |
943 |
|
{ |
944 |
|
struct sdma_desc tmp; |
945 |
|
|
946 |
|
/* byte-swapping */ |
947 |
|
tmp.cmd_stat = vmtoh32(desc->cmd_stat); |
948 |
|
tmp.buf_size = vmtoh32(desc->buf_size); |
949 |
|
tmp.next_ptr = vmtoh32(desc->next_ptr); |
950 |
|
tmp.buf_ptr = vmtoh32(desc->buf_ptr); |
951 |
|
|
952 |
|
physmem_copy_to_vm(d->vm,&tmp,addr,sizeof(struct sdma_desc)); |
953 |
|
} |
954 |
|
|
955 |
|
/* Send contents of a SDMA buffer */ |
956 |
|
static void gt_sdma_send_buffer(struct gt_data *d,u_int chan_id, |
957 |
|
u_char *buffer,m_uint32_t len) |
958 |
|
{ |
959 |
|
struct mpsc_channel *channel; |
960 |
|
u_int mode; |
961 |
|
|
962 |
|
channel = &d->mpsc[chan_id]; |
963 |
|
mode = channel->mmcrl & GT_MMCRL_MODE_MASK; |
964 |
|
|
965 |
|
switch(mode) { |
966 |
|
case GT_MPSC_MODE_HDLC: |
967 |
|
if (channel->nio != NULL) |
968 |
|
netio_send(channel->nio,buffer,len); |
969 |
|
break; |
970 |
|
|
971 |
|
case GT_MPSC_MODE_UART: |
972 |
|
if (channel->vtty != NULL) |
973 |
|
vtty_put_buffer(channel->vtty,(char *)buffer,len); |
974 |
|
break; |
975 |
|
} |
976 |
|
} |
977 |
|
|
978 |
|
/* Start TX DMA process */ |
979 |
|
static int gt_sdma_tx_start(struct gt_data *d,struct sdma_channel *chan) |
980 |
|
{ |
981 |
|
u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr; |
982 |
|
struct sdma_desc txd0,ctxd,*ptxd; |
983 |
|
m_uint32_t tx_start,tx_current; |
984 |
|
m_uint32_t len,tot_len; |
985 |
|
int abort = FALSE; |
986 |
|
|
987 |
|
tx_start = tx_current = chan->sctdp; |
988 |
|
|
989 |
|
if (!tx_start) |
990 |
|
return(FALSE); |
991 |
|
|
992 |
|
ptxd = &txd0; |
993 |
|
gt_sdma_desc_read(d,tx_start,ptxd); |
994 |
|
|
995 |
|
/* If we don't own the first descriptor, we cannot transmit */ |
996 |
|
if (!(txd0.cmd_stat & GT_TXDESC_OWN)) |
997 |
|
return(FALSE); |
998 |
|
|
999 |
|
/* Empty packet for now */ |
1000 |
|
pkt_ptr = pkt; |
1001 |
|
tot_len = 0; |
1002 |
|
|
1003 |
|
for(;;) |
1004 |
|
{ |
1005 |
|
/* Copy packet data to the buffer */ |
1006 |
|
len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT; |
1007 |
|
|
1008 |
|
physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len); |
1009 |
|
pkt_ptr += len; |
1010 |
|
tot_len += len; |
1011 |
|
|
1012 |
|
/* Clear the OWN bit if this is not the first descriptor */ |
1013 |
|
if (!(ptxd->cmd_stat & GT_TXDESC_F)) { |
1014 |
|
ptxd->cmd_stat &= ~GT_TXDESC_OWN; |
1015 |
|
physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat); |
1016 |
|
} |
1017 |
|
|
1018 |
|
tx_current = ptxd->next_ptr; |
1019 |
|
|
1020 |
|
/* Last descriptor or no more desc available ? */ |
1021 |
|
if (ptxd->cmd_stat & GT_TXDESC_L) |
1022 |
|
break; |
1023 |
|
|
1024 |
|
if (!tx_current) { |
1025 |
|
abort = TRUE; |
1026 |
|
break; |
1027 |
|
} |
1028 |
|
|
1029 |
|
/* Fetch the next descriptor */ |
1030 |
|
gt_sdma_desc_read(d,tx_current,&ctxd); |
1031 |
|
ptxd = &ctxd; |
1032 |
|
} |
1033 |
|
|
1034 |
|
if ((tot_len != 0) && !abort) { |
1035 |
|
#if DEBUG_SDMA |
1036 |
|
GT_LOG(d,"SDMA%u: sending packet of %u bytes\n",tot_len); |
1037 |
|
mem_dump(log_file,pkt,tot_len); |
1038 |
|
#endif |
1039 |
|
/* send it on wire */ |
1040 |
|
gt_sdma_send_buffer(d,chan->id,pkt,tot_len); |
1041 |
|
|
1042 |
|
/* Signal that a TX buffer has been transmitted */ |
1043 |
|
gt_sdma_set_cause(d,chan->id,GT_SDMA_CAUSE_TXBUF0); |
1044 |
|
} |
1045 |
|
|
1046 |
|
/* Clear the OWN flag of the first descriptor */ |
1047 |
|
txd0.cmd_stat &= ~GT_TXDESC_OWN; |
1048 |
|
physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat); |
1049 |
|
|
1050 |
|
chan->sctdp = tx_current; |
1051 |
|
|
1052 |
|
if (abort || !tx_current) { |
1053 |
|
gt_sdma_set_cause(d,chan->id,GT_SDMA_CAUSE_TXEND0); |
1054 |
|
chan->sdcm &= ~GT_SDCMR_TXD; |
1055 |
|
} |
1056 |
|
|
1057 |
|
/* Update interrupt status $*/ |
1058 |
|
gt_sdma_update_channel_int_status(d,chan->id); |
1059 |
|
return(TRUE); |
1060 |
|
} |
1061 |
|
|
1062 |
|
/* Put a packet in buffer of a descriptor */ |
1063 |
|
static void gt_sdma_rxdesc_put_pkt(struct gt_data *d,struct sdma_desc *rxd, |
1064 |
|
u_char **pkt,ssize_t *pkt_len) |
1065 |
|
{ |
1066 |
|
ssize_t len,cp_len; |
1067 |
|
|
1068 |
|
len = (rxd->buf_size & GT_RXDESC_BS_MASK) >> GT_RXDESC_BS_SHIFT; |
1069 |
|
|
1070 |
|
/* compute the data length to copy */ |
1071 |
|
cp_len = m_min(len,*pkt_len); |
1072 |
|
|
1073 |
|
/* copy packet data to the VM physical RAM */ |
1074 |
|
physmem_copy_to_vm(d->vm,*pkt,rxd->buf_ptr,cp_len); |
1075 |
|
|
1076 |
|
/* set the byte count in descriptor */ |
1077 |
|
rxd->buf_size |= cp_len; |
1078 |
|
|
1079 |
|
*pkt += cp_len; |
1080 |
|
*pkt_len -= cp_len; |
1081 |
|
} |
1082 |
|
|
1083 |
|
/* Put a packet into SDMA buffers */ |
1084 |
|
static int gt_sdma_handle_rxqueue(struct gt_data *d, |
1085 |
|
struct sdma_channel *channel, |
1086 |
|
u_char *pkt,ssize_t pkt_len) |
1087 |
|
{ |
1088 |
|
m_uint32_t rx_start,rx_current; |
1089 |
|
struct sdma_desc rxd0,rxdn,*rxdc; |
1090 |
|
ssize_t tot_len = pkt_len; |
1091 |
|
u_char *pkt_ptr = pkt; |
1092 |
|
int i; |
1093 |
|
|
1094 |
|
/* Truncate the packet if it is too big */ |
1095 |
|
pkt_len = m_min(pkt_len,GT_MAX_PKT_SIZE); |
1096 |
|
|
1097 |
|
/* Copy the first RX descriptor */ |
1098 |
|
if (!(rx_start = rx_current = channel->scrdp)) |
1099 |
|
goto dma_error; |
1100 |
|
|
1101 |
|
/* Load the first RX descriptor */ |
1102 |
|
gt_sdma_desc_read(d,rx_start,&rxd0); |
1103 |
|
|
1104 |
|
#if DEBUG_SDMA |
1105 |
|
GT_LOG(d,"SDMA channel %u: reading desc at 0x%8.8x " |
1106 |
|
"[buf_size=0x%8.8x,cmd_stat=0x%8.8x," |
1107 |
|
"next_ptr=0x%8.8x,buf_ptr=0x%8.8x]\n", |
1108 |
|
channel->id,rx_start,rxd0.buf_size,rxd0.cmd_stat, |
1109 |
|
rxd0.next_ptr,rxd0.buf_ptr); |
1110 |
|
#endif |
1111 |
|
|
1112 |
|
for(i=0,rxdc=&rxd0;tot_len>0;i++) |
1113 |
|
{ |
1114 |
|
/* We must own the descriptor */ |
1115 |
|
if (!(rxdc->cmd_stat & GT_RXDESC_OWN)) |
1116 |
|
goto dma_error; |
1117 |
|
|
1118 |
|
/* Put data into the descriptor buffer */ |
1119 |
|
gt_sdma_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len); |
1120 |
|
|
1121 |
|
/* Clear the OWN bit */ |
1122 |
|
rxdc->cmd_stat &= ~GT_RXDESC_OWN; |
1123 |
|
|
1124 |
|
/* We have finished if the complete packet has been stored */ |
1125 |
|
if (tot_len == 0) { |
1126 |
|
rxdc->cmd_stat |= GT_RXDESC_L; |
1127 |
|
rxdc->buf_size += 4; /* Add 4 bytes for CRC */ |
1128 |
|
} |
1129 |
|
|
1130 |
|
/* Update the descriptor in host memory (but not the 1st) */ |
1131 |
|
if (i != 0) |
1132 |
|
gt_sdma_desc_write(d,rx_current,rxdc); |
1133 |
|
|
1134 |
|
/* Get address of the next descriptor */ |
1135 |
|
rx_current = rxdc->next_ptr; |
1136 |
|
|
1137 |
|
if (tot_len == 0) |
1138 |
|
break; |
1139 |
|
|
1140 |
|
if (!rx_current) |
1141 |
|
goto dma_error; |
1142 |
|
|
1143 |
|
/* Read the next descriptor from VM physical RAM */ |
1144 |
|
gt_sdma_desc_read(d,rx_current,&rxdn); |
1145 |
|
rxdc = &rxdn; |
1146 |
|
} |
1147 |
|
|
1148 |
|
/* Update the RX pointers */ |
1149 |
|
channel->scrdp = rx_current; |
1150 |
|
|
1151 |
|
/* Update the first RX descriptor */ |
1152 |
|
rxd0.cmd_stat |= GT_RXDESC_F; |
1153 |
|
gt_sdma_desc_write(d,rx_start,&rxd0); |
1154 |
|
|
1155 |
|
/* Indicate that we have a frame ready */ |
1156 |
|
gt_sdma_set_cause(d,channel->id,GT_SDMA_CAUSE_RXBUF0); |
1157 |
|
gt_sdma_update_channel_int_status(d,channel->id); |
1158 |
|
return(TRUE); |
1159 |
|
|
1160 |
|
dma_error: |
1161 |
|
gt_sdma_set_cause(d,channel->id,GT_SDMA_CAUSE_RXERR0); |
1162 |
|
gt_sdma_update_channel_int_status(d,channel->id); |
1163 |
|
return(FALSE); |
1164 |
|
} |
1165 |
|
|
1166 |
|
/* Handle RX packet for a SDMA channel*/ |
1167 |
|
static int gt_sdma_handle_rx_pkt(netio_desc_t *nio, |
1168 |
|
u_char *pkt,ssize_t pkt_len, |
1169 |
|
struct gt_data *d,void *arg) |
1170 |
|
{ |
1171 |
|
struct sdma_channel *channel; |
1172 |
|
u_int chan_id = (int)arg; |
1173 |
|
u_int group_id; |
1174 |
|
|
1175 |
|
GT_LOCK(d); |
1176 |
|
|
1177 |
|
/* Find the SDMA group associated to the MPSC channel for receiving */ |
1178 |
|
group_id = (d->sgcr >> chan_id) & 0x01; |
1179 |
|
channel = &d->sdma[group_id][chan_id]; |
1180 |
|
|
1181 |
|
gt_sdma_handle_rxqueue(d,channel,pkt,pkt_len); |
1182 |
|
GT_UNLOCK(d); |
1183 |
|
return(TRUE); |
1184 |
|
} |
1185 |
|
|
1186 |
|
/* Handle a SDMA channel */ |
1187 |
|
static int gt_sdma_access(cpu_gen_t *cpu,struct vdevice *dev, |
1188 |
|
m_uint32_t offset,u_int op_size,u_int op_type, |
1189 |
|
m_uint64_t *data) |
1190 |
|
{ |
1191 |
|
struct gt_data *gt_data = dev->priv_data; |
1192 |
|
struct sdma_channel *channel; |
1193 |
|
u_int group,chan_id,reg; |
1194 |
|
|
1195 |
|
if ((offset & 0x000F00) != 0x000900) |
1196 |
|
return(FALSE); |
1197 |
|
|
1198 |
|
/* Decode group, channel and register */ |
1199 |
|
group = (offset >> 20) & 0x0F; |
1200 |
|
chan_id = (offset >> 16) & 0x0F; |
1201 |
|
reg = offset & 0xFFFF; |
1202 |
|
|
1203 |
|
if ((group >= GT_SDMA_GROUPS) || (chan_id >= GT_SDMA_CHANNELS)) { |
1204 |
|
cpu_log(cpu,"GT96100","invalid SDMA register 0x%8.8x\n",offset); |
1205 |
|
return(TRUE); |
1206 |
|
} |
1207 |
|
|
1208 |
|
channel = >_data->sdma[group][chan_id]; |
1209 |
|
|
1210 |
|
#if 0 |
1211 |
|
printf("SDMA: access to reg 0x%6.6x (group=%u, channel=%u)\n", |
1212 |
|
offset, group, chan_id); |
1213 |
|
#endif |
1214 |
|
|
1215 |
|
switch(reg) { |
1216 |
|
/* Configuration Register */ |
1217 |
|
case GT_SDMA_SDC: |
1218 |
|
break; |
1219 |
|
|
1220 |
|
/* Command Register */ |
1221 |
|
case GT_SDMA_SDCM: |
1222 |
|
if (op_type == MTS_WRITE) { |
1223 |
|
channel->sdcm = *data; |
1224 |
|
|
1225 |
|
if (channel->sdcm & GT_SDCMR_TXD) { |
1226 |
|
#if DEBUG_SDMA |
1227 |
|
cpu_log(cpu,"GT96100-SDMA","starting TX transfer (%u/%u)\n", |
1228 |
|
group,chan_id); |
1229 |
|
#endif |
1230 |
|
gt_sdma_tx_start(gt_data,channel); |
1231 |
|
} |
1232 |
|
} else { |
1233 |
|
*data = 0xFF; //0xFFFFFFFF; |
1234 |
|
} |
1235 |
|
break; |
1236 |
|
|
1237 |
|
/* Current RX descriptor */ |
1238 |
|
case GT_SDMA_SCRDP: |
1239 |
|
if (op_type == MTS_READ) |
1240 |
|
*data = channel->scrdp; |
1241 |
|
else |
1242 |
|
channel->scrdp = *data; |
1243 |
|
break; |
1244 |
|
|
1245 |
|
/* Current TX desc. pointer */ |
1246 |
|
case GT_SDMA_SCTDP: |
1247 |
|
if (op_type == MTS_READ) |
1248 |
|
*data = channel->sctdp; |
1249 |
|
else |
1250 |
|
channel->sctdp = *data; |
1251 |
|
break; |
1252 |
|
|
1253 |
|
/* First TX desc. pointer */ |
1254 |
|
case GT_SDMA_SFTDP: |
1255 |
|
if (op_type == MTS_READ) |
1256 |
|
*data = channel->sftdp; |
1257 |
|
else |
1258 |
|
channel->sftdp = *data; |
1259 |
|
break; |
1260 |
|
|
1261 |
|
default: |
1262 |
|
/* unknown/unmanaged register */ |
1263 |
|
return(FALSE); |
1264 |
|
} |
1265 |
|
|
1266 |
|
return(TRUE); |
1267 |
|
} |
1268 |
|
|
1269 |
|
/* ======================================================================== */ |
1270 |
|
/* MPSC (MultiProtocol Serial Controller) */ |
1271 |
|
/* ======================================================================== */ |
1272 |
|
|
1273 |
|
/* Handle a MPSC channel */ |
1274 |
|
static int gt_mpsc_access(cpu_gen_t *cpu,struct vdevice *dev, |
1275 |
|
m_uint32_t offset,u_int op_size,u_int op_type, |
1276 |
|
m_uint64_t *data) |
1277 |
|
{ |
1278 |
|
struct gt_data *gt_data = dev->priv_data; |
1279 |
|
struct mpsc_channel *channel; |
1280 |
|
u_int chan_id,reg,reg2; |
1281 |
|
|
1282 |
|
if ((offset & 0x000F00) != 0x000A00) |
1283 |
|
return(FALSE); |
1284 |
|
|
1285 |
|
/* Decode channel ID and register */ |
1286 |
|
chan_id = offset >> 15; |
1287 |
|
reg = offset & 0xFFF; |
1288 |
|
|
1289 |
|
if (chan_id >= GT_MPSC_CHANNELS) |
1290 |
|
return(FALSE); |
1291 |
|
|
1292 |
|
channel = >_data->mpsc[chan_id]; |
1293 |
|
|
1294 |
|
switch(reg) { |
1295 |
|
/* Main Config Register Low */ |
1296 |
|
case GT_MPSC_MMCRL: |
1297 |
|
if (op_type == MTS_READ) { |
1298 |
|
*data = channel->mmcrl; |
1299 |
|
} else { |
1300 |
|
#if DEBUG_MPSC |
1301 |
|
GT_LOG(gt_data,"MPSC channel %u set in mode %llu\n", |
1302 |
|
chan_id,*data & 0x07); |
1303 |
|
#endif |
1304 |
|
channel->mmcrl = *data; |
1305 |
|
} |
1306 |
|
break; |
1307 |
|
|
1308 |
|
/* Main Config Register High */ |
1309 |
|
case GT_MPSC_MMCRH: |
1310 |
|
if (op_type == MTS_READ) |
1311 |
|
*data = channel->mmcrh; |
1312 |
|
else |
1313 |
|
channel->mmcrh = *data; |
1314 |
|
break; |
1315 |
|
|
1316 |
|
/* Protocol Config Register */ |
1317 |
|
case GT_MPSC_MPCR: |
1318 |
|
if (op_type == MTS_READ) |
1319 |
|
*data = channel->mpcr; |
1320 |
|
else |
1321 |
|
channel->mpcr = *data; |
1322 |
|
break; |
1323 |
|
|
1324 |
|
/* Channel registers */ |
1325 |
|
case GT_MPSC_CHR1: |
1326 |
|
case GT_MPSC_CHR2: |
1327 |
|
case GT_MPSC_CHR3: |
1328 |
|
case GT_MPSC_CHR4: |
1329 |
|
case GT_MPSC_CHR5: |
1330 |
|
case GT_MPSC_CHR6: |
1331 |
|
case GT_MPSC_CHR7: |
1332 |
|
case GT_MPSC_CHR8: |
1333 |
|
case GT_MPSC_CHR9: |
1334 |
|
//case GT_MPSC_CHR10: |
1335 |
|
reg2 = (reg - GT_MPSC_CHR1) >> 2; |
1336 |
|
if (op_type == MTS_READ) |
1337 |
|
*data = channel->chr[reg2]; |
1338 |
|
else |
1339 |
|
channel->chr[reg2] = *data; |
1340 |
|
break; |
1341 |
|
|
1342 |
|
case GT_MPSC_CHR10: |
1343 |
|
if (op_type == MTS_READ) |
1344 |
|
*data = channel->chr[9] | 0x20; |
1345 |
|
else |
1346 |
|
channel->chr[9] = *data; |
1347 |
|
break; |
1348 |
|
|
1349 |
|
default: |
1350 |
|
/* unknown/unmanaged register */ |
1351 |
|
return(FALSE); |
1352 |
|
} |
1353 |
|
|
1354 |
|
return(TRUE); |
1355 |
|
} |
1356 |
|
|
1357 |
|
/* Set NIO for a MPSC channel */ |
1358 |
|
int dev_gt96100_mpsc_set_nio(struct gt_data *d,u_int chan_id,netio_desc_t *nio) |
1359 |
|
{ |
1360 |
|
struct mpsc_channel *channel; |
1361 |
|
|
1362 |
|
if (chan_id >= GT_MPSC_CHANNELS) |
1363 |
|
return(-1); |
1364 |
|
|
1365 |
|
channel = &d->mpsc[chan_id]; |
1366 |
|
|
1367 |
|
if (channel->nio != NULL) |
1368 |
|
return(-1); |
1369 |
|
|
1370 |
|
channel->nio = nio; |
1371 |
|
netio_rxl_add(nio,(netio_rx_handler_t)gt_sdma_handle_rx_pkt, |
1372 |
|
d,(void *)chan_id); |
1373 |
|
return(0); |
1374 |
|
} |
1375 |
|
|
1376 |
|
/* Unset NIO for a MPSC channel */ |
1377 |
|
int dev_gt96100_mpsc_unset_nio(struct gt_data *d,u_int chan_id) |
1378 |
|
{ |
1379 |
|
struct mpsc_channel *channel; |
1380 |
|
|
1381 |
|
if (chan_id >= GT_MPSC_CHANNELS) |
1382 |
|
return(-1); |
1383 |
|
|
1384 |
|
channel = &d->mpsc[chan_id]; |
1385 |
|
|
1386 |
|
if (channel->nio != NULL) { |
1387 |
|
netio_rxl_remove(channel->nio); |
1388 |
|
channel->nio = NULL; |
1389 |
|
} |
1390 |
|
|
1391 |
|
return(0); |
1392 |
|
} |
1393 |
|
|
1394 |
|
/* Set a VTTY for a MPSC channel */ |
1395 |
|
int dev_gt96100_mpsc_set_vtty(struct gt_data *d,u_int chan_id,vtty_t *vtty) |
1396 |
|
{ |
1397 |
|
struct mpsc_channel *channel; |
1398 |
|
|
1399 |
|
if (chan_id >= GT_MPSC_CHANNELS) |
1400 |
|
return(-1); |
1401 |
|
|
1402 |
|
channel = &d->mpsc[chan_id]; |
1403 |
|
|
1404 |
|
if (channel->vtty != NULL) |
1405 |
|
return(-1); |
1406 |
|
|
1407 |
|
channel->vtty = vtty; |
1408 |
|
return(0); |
1409 |
|
} |
1410 |
|
|
1411 |
|
/* Unset a VTTY for a MPSC channel */ |
1412 |
|
int dev_gt96100_mpsc_unset_vtty(struct gt_data *d,u_int chan_id) |
1413 |
|
{ |
1414 |
|
struct mpsc_channel *channel; |
1415 |
|
|
1416 |
|
if (chan_id >= GT_MPSC_CHANNELS) |
1417 |
|
return(-1); |
1418 |
|
|
1419 |
|
channel = &d->mpsc[chan_id]; |
1420 |
|
|
1421 |
|
if (channel->vtty != NULL) { |
1422 |
|
channel->vtty = NULL; |
1423 |
|
} |
1424 |
|
|
1425 |
|
return(0); |
1426 |
|
} |
1427 |
|
|
1428 |
|
/* ======================================================================== */ |
1429 |
|
/* Ethernet */ |
1430 |
|
/* ======================================================================== */ |
1431 |
|
|
1432 |
/* Trigger/clear Ethernet interrupt if one or both port have pending events */ |
/* Trigger/clear Ethernet interrupt if one or both port have pending events */ |
1433 |
static void gt_eth_set_int_status(struct gt_data *d) |
static void gt_eth_set_int_status(struct gt_data *d) |
1434 |
{ |
{ |
1435 |
if ((d->eth_ports[0].icr & GT_ICR_INT_SUM) || |
/* Compute Ether0 summary */ |
1436 |
(d->eth_ports[1].icr & GT_ICR_INT_SUM)) |
if (d->eth_ports[0].icr & GT_ICR_INT_SUM) { |
1437 |
vm_set_irq(d->vm,d->eth_irq); |
d->ser_cause_reg |= GT_SCR_ETH0_SUM; |
1438 |
else |
d->int_high_cause_reg |= GT_IHCR_ETH0_SUM; |
1439 |
vm_clear_irq(d->vm,d->eth_irq); |
} else { |
1440 |
|
d->ser_cause_reg &= ~GT_SCR_ETH0_SUM; |
1441 |
|
d->int_high_cause_reg &= ~GT_IHCR_ETH0_SUM; |
1442 |
|
} |
1443 |
|
|
1444 |
|
/* Compute Ether1 summary */ |
1445 |
|
if (d->eth_ports[1].icr & GT_ICR_INT_SUM) { |
1446 |
|
d->ser_cause_reg |= GT_SCR_ETH1_SUM; |
1447 |
|
d->int_high_cause_reg |= GT_IHCR_ETH1_SUM; |
1448 |
|
} else { |
1449 |
|
d->ser_cause_reg &= ~GT_SCR_ETH1_SUM; |
1450 |
|
d->int_high_cause_reg &= ~GT_IHCR_ETH1_SUM; |
1451 |
|
} |
1452 |
|
|
1453 |
|
gt96k_update_irq_status(d); |
1454 |
} |
} |
1455 |
|
|
1456 |
/* Update the Ethernet port interrupt status */ |
/* Update the Ethernet port interrupt status */ |
1556 |
m_uint64_t *data) |
m_uint64_t *data) |
1557 |
{ |
{ |
1558 |
struct gt_data *d = dev->priv_data; |
struct gt_data *d = dev->priv_data; |
1559 |
struct eth_port *port; |
struct eth_port *port = NULL; |
|
u_int port_id = 0; |
|
1560 |
u_int queue; |
u_int queue; |
1561 |
|
|
1562 |
if ((offset < 0x80000) || (offset >= 0x90000)) |
if ((offset < 0x80000) || (offset >= 0x90000)) |
1563 |
return(FALSE); |
return(FALSE); |
1564 |
|
|
1565 |
if (op_type == MTS_WRITE) |
/* Determine the Ethernet port */ |
|
*data = swap32(*data); |
|
|
|
|
|
/* Detemine the Ethernet port */ |
|
1566 |
if ((offset >= 0x84800) && (offset < 0x88800)) |
if ((offset >= 0x84800) && (offset < 0x88800)) |
1567 |
port_id = 0; |
port = &d->eth_ports[0]; |
1568 |
|
else if ((offset >= 0x88800) && (offset < 0x8c800)) |
1569 |
if ((offset >= 0x88800) && (offset < 0x8c800)) |
port = &d->eth_ports[1]; |
|
port_id = 1; |
|
|
|
|
|
port = &d->eth_ports[port_id]; |
|
1570 |
|
|
1571 |
switch(offset) { |
switch(offset) { |
1572 |
/* SMI register */ |
/* SMI register */ |
1791 |
#endif |
#endif |
1792 |
} |
} |
1793 |
|
|
|
if (op_type == MTS_READ) |
|
|
*data = swap32(*data); |
|
|
|
|
1794 |
return(TRUE); |
return(TRUE); |
1795 |
} |
} |
1796 |
|
|
1801 |
u_int op_size,u_int op_type,m_uint64_t *data) |
u_int op_size,u_int op_type,m_uint64_t *data) |
1802 |
{ |
{ |
1803 |
struct gt_data *gt_data = dev->priv_data; |
struct gt_data *gt_data = dev->priv_data; |
1804 |
|
|
1805 |
if (op_type == MTS_READ) |
GT_LOCK(gt_data); |
1806 |
|
|
1807 |
|
if (op_type == MTS_READ) { |
1808 |
*data = 0; |
*data = 0; |
1809 |
|
} else { |
1810 |
|
if (op_size == 4) |
1811 |
|
*data = swap32(*data); |
1812 |
|
} |
1813 |
|
|
1814 |
|
#if 0 /* DEBUG */ |
1815 |
|
if (offset != 0x101a80) { |
1816 |
|
if (op_type == MTS_READ) { |
1817 |
|
cpu_log(cpu,"GT96100","READ OFFSET 0x%6.6x\n",offset); |
1818 |
|
} else { |
1819 |
|
cpu_log(cpu,"GT96100","WRITE OFFSET 0x%6.6x, DATA=0x%8.8llx\n", |
1820 |
|
offset,*data); |
1821 |
|
} |
1822 |
|
} |
1823 |
|
#endif |
1824 |
|
|
1825 |
|
/* DMA registers */ |
1826 |
if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0) |
if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0) |
1827 |
return NULL; |
goto done; |
1828 |
|
|
1829 |
|
/* Serial DMA channel registers */ |
1830 |
|
if (gt_sdma_access(cpu,dev,offset,op_size,op_type,data) != 0) |
1831 |
|
goto done; |
1832 |
|
|
1833 |
|
/* MPSC registers */ |
1834 |
|
if (gt_mpsc_access(cpu,dev,offset,op_size,op_type,data) != 0) |
1835 |
|
goto done; |
1836 |
|
|
1837 |
|
/* Ethernet registers */ |
1838 |
if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0) |
if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0) |
1839 |
return NULL; |
goto done; |
1840 |
|
|
1841 |
switch(offset) { |
switch(offset) { |
1842 |
/* Watchdog configuration register */ |
/* Watchdog configuration register */ |
1849 |
|
|
1850 |
case 0x008: /* ras10_low */ |
case 0x008: /* ras10_low */ |
1851 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1852 |
*data = swap32(0x000); |
*data = 0x000; |
1853 |
break; |
break; |
1854 |
case 0x010: /* ras10_high */ |
case 0x010: /* ras10_high */ |
1855 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1856 |
*data = swap32(0x7F); |
*data = 0x7F; |
1857 |
break; |
break; |
1858 |
case 0x018: /* ras32_low */ |
case 0x018: /* ras32_low */ |
1859 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1860 |
*data = swap32(0x100); |
*data = 0x100; |
1861 |
break; |
break; |
1862 |
case 0x020: /* ras32_high */ |
case 0x020: /* ras32_high */ |
1863 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1864 |
*data = swap32(0x7F); |
*data = 0x7F; |
1865 |
break; |
break; |
1866 |
case 0x400: /* ras0_low */ |
case 0x400: /* ras0_low */ |
1867 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1868 |
*data = swap32(0x00); |
*data = 0x00; |
1869 |
break; |
break; |
1870 |
case 0x404: /* ras0_high */ |
case 0x404: /* ras0_high */ |
1871 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1872 |
*data = swap32(0xFF); |
*data = 0xFF; |
1873 |
break; |
break; |
1874 |
case 0x408: /* ras1_low */ |
case 0x408: /* ras1_low */ |
1875 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1876 |
*data = swap32(0x7F); |
*data = 0x7F; |
1877 |
break; |
break; |
1878 |
case 0x40c: /* ras1_high */ |
case 0x40c: /* ras1_high */ |
1879 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1880 |
*data = swap32(0x00); |
*data = 0x00; |
1881 |
break; |
break; |
1882 |
case 0x410: /* ras2_low */ |
case 0x410: /* ras2_low */ |
1883 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1884 |
*data = swap32(0x00); |
*data = 0x00; |
1885 |
break; |
break; |
1886 |
case 0x414: /* ras2_high */ |
case 0x414: /* ras2_high */ |
1887 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1888 |
*data = swap32(0xFF); |
*data = 0xFF; |
1889 |
break; |
break; |
1890 |
case 0x418: /* ras3_low */ |
case 0x418: /* ras3_low */ |
1891 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1892 |
*data = swap32(0x7F); |
*data = 0x7F; |
1893 |
break; |
break; |
1894 |
case 0x41c: /* ras3_high */ |
case 0x41c: /* ras3_high */ |
1895 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1896 |
*data = swap32(0x00); |
*data = 0x00; |
1897 |
break; |
break; |
1898 |
case 0xc08: /* pci0_cs10 */ |
case 0xc08: /* pci0_cs10 */ |
1899 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1900 |
*data = swap32(0xFFF); |
*data = 0xFFF; |
1901 |
break; |
break; |
1902 |
case 0xc0c: /* pci0_cs32 */ |
case 0xc0c: /* pci0_cs32 */ |
1903 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1904 |
*data = swap32(0xFFF); |
*data = 0xFFF; |
1905 |
break; |
break; |
1906 |
|
|
1907 |
case 0xc00: /* pci_cmd */ |
case 0xc00: /* pci_cmd */ |
1908 |
if (op_type == MTS_READ) |
if (op_type == MTS_READ) |
1909 |
*data = swap32(0x00008001); |
*data = 0x00008001; |
1910 |
break; |
break; |
1911 |
|
|
1912 |
/* ===== Interrupt Main Cause Register ===== */ |
/* ===== Interrupt Main Cause Register ===== */ |
1913 |
case 0xc18: |
case 0xc18: |
1914 |
if (op_type == MTS_READ) { |
if (op_type == MTS_READ) { |
1915 |
*data = gt_data->int_cause_reg; |
*data = gt_data->int_cause_reg; |
|
|
|
|
/* TODO: signal Eth0/Eth1 */ |
|
|
//*data |= (1 << 30) | (1 << 31) | 1; |
|
|
|
|
|
*data = swap32(*data); |
|
1916 |
} else { |
} else { |
1917 |
gt_data->int_cause_reg &= swap32(*data); |
/* Don't touch bit 0, 30 and 31 which are read-only */ |
1918 |
gt_update_irq_status(gt_data); |
gt_data->int_cause_reg &= (*data | 0xC0000001); |
1919 |
|
gt96k_update_irq_status(gt_data); |
1920 |
} |
} |
1921 |
break; |
break; |
1922 |
|
|
1923 |
/* ===== Interrupt Mask Register ===== */ |
/* ===== Interrupt High Cause Register ===== */ |
1924 |
|
case 0xc98: |
1925 |
|
if (op_type == MTS_READ) |
1926 |
|
*data = gt_data->int_high_cause_reg; |
1927 |
|
break; |
1928 |
|
|
1929 |
|
/* ===== Interrupt0 Main Mask Register ===== */ |
1930 |
case 0xc1c: |
case 0xc1c: |
1931 |
if (op_type == MTS_READ) { |
if (op_type == MTS_READ) { |
1932 |
*data = swap32(gt_data->int_mask_reg); |
*data = gt_data->int0_main_mask_reg; |
1933 |
} else { |
} else { |
1934 |
gt_data->int_mask_reg = swap32(*data); |
gt_data->int0_main_mask_reg = *data; |
1935 |
gt_update_irq_status(gt_data); |
gt96k_update_irq_status(gt_data); |
1936 |
} |
} |
1937 |
break; |
break; |
1938 |
|
|
1939 |
/* ===== Interrupt High Cause Register ===== */ |
/* ===== Interrupt0 High Mask Register ===== */ |
1940 |
case 0xc98: |
case 0xc9c: |
1941 |
if (op_type == MTS_READ) { |
if (op_type == MTS_READ) { |
1942 |
*data = 0; |
*data = gt_data->int0_high_mask_reg; |
1943 |
|
} else { |
1944 |
/* interrupt on ethernet port 0 ? */ |
gt_data->int0_high_mask_reg = *data; |
1945 |
if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM) |
gt96k_update_irq_status(gt_data); |
1946 |
*data |= GT_IHCR_ETH0_SUM; |
} |
1947 |
|
break; |
1948 |
|
|
1949 |
/* interrupt on ethernet port 1 ? */ |
/* ===== Interrupt1 Main Mask Register ===== */ |
1950 |
if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM) |
case 0xc24: |
1951 |
*data |= GT_IHCR_ETH1_SUM; |
if (op_type == MTS_READ) { |
1952 |
|
*data = gt_data->int1_main_mask_reg; |
1953 |
|
} else { |
1954 |
|
gt_data->int1_main_mask_reg = *data; |
1955 |
|
gt96k_update_irq_status(gt_data); |
1956 |
|
} |
1957 |
|
break; |
1958 |
|
|
1959 |
*data = swap32(*data); |
/* ===== Interrupt1 High Mask Register ===== */ |
1960 |
|
case 0xca4: |
1961 |
|
if (op_type == MTS_READ) { |
1962 |
|
*data = gt_data->int1_high_mask_reg; |
1963 |
|
} else { |
1964 |
|
gt_data->int1_high_mask_reg = *data; |
1965 |
|
gt96k_update_irq_status(gt_data); |
1966 |
} |
} |
1967 |
break; |
break; |
1968 |
|
|
1969 |
/* Serial Cause Register */ |
/* ===== Serial Cause Register (read-only) ===== */ |
1970 |
case 0x103a00: |
case 0x103a00: |
1971 |
|
if (op_type == MTS_READ) |
1972 |
|
*data = gt_data->ser_cause_reg; |
1973 |
|
break; |
1974 |
|
|
1975 |
|
/* ===== SerInt0 Mask Register ===== */ |
1976 |
|
case 0x103a80: |
1977 |
if (op_type == MTS_READ) { |
if (op_type == MTS_READ) { |
1978 |
*data = 0; |
*data = gt_data->serint0_mask_reg; |
1979 |
|
} else { |
1980 |
|
gt_data->serint0_mask_reg = *data; |
1981 |
|
gt96k_update_irq_status(gt_data); |
1982 |
|
} |
1983 |
|
break; |
1984 |
|
|
1985 |
/* interrupt on ethernet port 0 ? */ |
/* ===== SerInt1 Mask Register ===== */ |
1986 |
if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM) |
case 0x103a88: |
1987 |
*data |= GT_SCR_ETH0_SUM; |
if (op_type == MTS_READ) { |
1988 |
|
*data = gt_data->serint1_mask_reg; |
1989 |
/* interrupt on ethernet port 1 ? */ |
} else { |
1990 |
if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM) |
gt_data->serint1_mask_reg = *data; |
1991 |
*data |= GT_SCR_ETH1_SUM; |
gt96k_update_irq_status(gt_data); |
1992 |
|
} |
1993 |
|
break; |
1994 |
|
|
1995 |
gt_update_irq_status(gt_data); |
/* ===== SDMA cause register ===== */ |
1996 |
*data = swap32(*data); |
case 0x103a10: |
1997 |
|
if (op_type == MTS_READ) { |
1998 |
|
*data = gt_data->sdma_cause_reg; |
1999 |
|
} else { |
2000 |
|
gt_data->sdma_cause_reg &= *data; |
2001 |
|
gt_sdma_update_int_status(gt_data); |
2002 |
|
} |
2003 |
|
break; |
2004 |
|
|
2005 |
|
case 0x103a13: |
2006 |
|
if (op_type == MTS_WRITE) { |
2007 |
|
//printf("Writing 0x103a13, *data = 0x%8.8llx, " |
2008 |
|
// "sdma_cause_reg=0x%8.8x\n", |
2009 |
|
// *data, gt_data->sdma_cause_reg); |
2010 |
|
|
2011 |
|
gt_data->sdma_cause_reg = 0; |
2012 |
|
gt_sdma_update_channel_int_status(gt_data,6); |
2013 |
|
gt_sdma_update_channel_int_status(gt_data,7); |
2014 |
|
} |
2015 |
|
break; |
2016 |
|
|
2017 |
|
/* ==== SDMA mask register */ |
2018 |
|
case 0x103a90: |
2019 |
|
if (op_type == MTS_READ) { |
2020 |
|
*data = gt_data->sdma_mask_reg; |
2021 |
|
} else { |
2022 |
|
gt_data->sdma_mask_reg = *data; |
2023 |
|
gt_sdma_update_int_status(gt_data); |
2024 |
|
} |
2025 |
|
break; |
2026 |
|
|
2027 |
|
case 0x103a38: |
2028 |
|
case 0x103a3c: |
2029 |
|
case 0x100A48: |
2030 |
|
if (op_type == MTS_READ) { |
2031 |
|
//*data = 0xFFFFFFFF; |
2032 |
} |
} |
2033 |
break; |
break; |
2034 |
|
|
2035 |
|
/* CIU Arbiter Configuration Register */ |
2036 |
|
case 0x101ac0: |
2037 |
|
if (op_type == MTS_READ) |
2038 |
|
*data = 0x80000000; |
2039 |
|
break; |
2040 |
|
|
2041 |
|
/* SGCR - SDMA Global Configuration Register */ |
2042 |
|
case GT_REG_SGC: |
2043 |
|
if (op_type == MTS_READ) |
2044 |
|
*data = gt_data->sgcr; |
2045 |
|
else |
2046 |
|
gt_data->sgcr = *data; |
2047 |
|
break; |
2048 |
|
|
2049 |
/* ===== PCI Bus 1 ===== */ |
/* ===== PCI Bus 1 ===== */ |
2050 |
case 0xcf0: |
case 0xcf0: |
2051 |
pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data); |
pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,FALSE,data); |
2052 |
break; |
break; |
2053 |
|
|
2054 |
case 0xcf4: |
case 0xcf4: |
2055 |
pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data); |
pci_dev_data_handler(cpu,gt_data->bus[1],op_type,FALSE,data); |
2056 |
break; |
break; |
2057 |
|
|
2058 |
/* ===== PCI Bus 0 ===== */ |
/* ===== PCI Bus 0 ===== */ |
2059 |
case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */ |
case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */ |
2060 |
pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data); |
pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,FALSE,data); |
2061 |
break; |
break; |
2062 |
|
|
2063 |
case PCI_BUS_DATA: /* pci data address (0xcfc) */ |
case PCI_BUS_DATA: /* pci data address (0xcfc) */ |
2064 |
pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data); |
pci_dev_data_handler(cpu,gt_data->bus[0],op_type,FALSE,data); |
2065 |
break; |
break; |
2066 |
|
|
2067 |
#if DEBUG_UNKNOWN |
#if DEBUG_UNKNOWN |
2076 |
#endif |
#endif |
2077 |
} |
} |
2078 |
|
|
2079 |
|
done: |
2080 |
|
GT_UNLOCK(gt_data); |
2081 |
|
if ((op_type == MTS_READ) && (op_size == 4)) |
2082 |
|
*data = swap32(*data); |
2083 |
return NULL; |
return NULL; |
2084 |
} |
} |
2085 |
|
|
|
/* Read an Ethernet descriptor */ |
|
|
static void gt_eth_desc_read(struct gt_data *d,m_uint32_t addr, |
|
|
struct eth_desc *desc) |
|
|
{ |
|
|
physmem_copy_from_vm(d->vm,desc,addr,sizeof(struct eth_desc)); |
|
|
|
|
|
/* byte-swapping */ |
|
|
desc->cmd_stat = vmtoh32(desc->cmd_stat); |
|
|
desc->buf_size = vmtoh32(desc->buf_size); |
|
|
desc->next_ptr = vmtoh32(desc->next_ptr); |
|
|
desc->buf_ptr = vmtoh32(desc->buf_ptr); |
|
|
} |
|
|
|
|
|
/* Write an Ethernet descriptor */ |
|
|
static void gt_eth_desc_write(struct gt_data *d,m_uint32_t addr, |
|
|
struct eth_desc *desc) |
|
|
{ |
|
|
struct eth_desc tmp; |
|
|
|
|
|
/* byte-swapping */ |
|
|
tmp.cmd_stat = vmtoh32(desc->cmd_stat); |
|
|
tmp.buf_size = vmtoh32(desc->buf_size); |
|
|
tmp.next_ptr = vmtoh32(desc->next_ptr); |
|
|
tmp.buf_ptr = vmtoh32(desc->buf_ptr); |
|
|
|
|
|
physmem_copy_to_vm(d->vm,&tmp,addr,sizeof(struct eth_desc)); |
|
|
} |
|
|
|
|
2086 |
/* Handle a TX queue (single packet) */ |
/* Handle a TX queue (single packet) */ |
2087 |
static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port, |
static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port, |
2088 |
int queue) |
int queue) |
2089 |
{ |
{ |
2090 |
u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr; |
u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr; |
2091 |
struct eth_desc txd0,ctxd,*ptxd; |
struct sdma_desc txd0,ctxd,*ptxd; |
2092 |
m_uint32_t tx_start,tx_current; |
m_uint32_t tx_start,tx_current; |
2093 |
m_uint32_t len,tot_len; |
m_uint32_t len,tot_len; |
2094 |
int abort = FALSE; |
int abort = FALSE; |
2107 |
return(FALSE); |
return(FALSE); |
2108 |
|
|
2109 |
ptxd = &txd0; |
ptxd = &txd0; |
2110 |
gt_eth_desc_read(d,tx_start,ptxd); |
gt_sdma_desc_read(d,tx_start,ptxd); |
2111 |
|
|
2112 |
/* If we don't own the first descriptor, we cannot transmit */ |
/* If we don't own the first descriptor, we cannot transmit */ |
2113 |
if (!(txd0.cmd_stat & GT_TXDESC_OWN)) |
if (!(txd0.cmd_stat & GT_TXDESC_OWN)) |
2155 |
} |
} |
2156 |
|
|
2157 |
/* Fetch the next descriptor */ |
/* Fetch the next descriptor */ |
2158 |
gt_eth_desc_read(d,tx_current,&ctxd); |
gt_sdma_desc_read(d,tx_current,&ctxd); |
2159 |
ptxd = &ctxd; |
ptxd = &ctxd; |
2160 |
} |
} |
2161 |
|
|
2219 |
{ |
{ |
2220 |
int i; |
int i; |
2221 |
|
|
2222 |
|
GT_LOCK(d); |
2223 |
|
|
2224 |
for(i=0;i<GT_ETH_PORTS;i++) |
for(i=0;i<GT_ETH_PORTS;i++) |
2225 |
gt_eth_handle_port_txqueues(d,i); |
gt_eth_handle_port_txqueues(d,i); |
2226 |
|
|
2227 |
|
GT_UNLOCK(d); |
2228 |
return(TRUE); |
return(TRUE); |
2229 |
} |
} |
2230 |
|
|
2377 |
return(0); |
return(0); |
2378 |
} |
} |
2379 |
|
|
|
/* Put a packet in buffer of a descriptor */ |
|
|
static void gt_eth_rxdesc_put_pkt(struct gt_data *d,struct eth_desc *rxd, |
|
|
u_char **pkt,ssize_t *pkt_len) |
|
|
{ |
|
|
ssize_t len,cp_len; |
|
|
|
|
|
len = (rxd->buf_size & GT_RXDESC_BS_MASK) >> GT_RXDESC_BS_SHIFT; |
|
|
|
|
|
/* compute the data length to copy */ |
|
|
cp_len = m_min(len,*pkt_len); |
|
|
|
|
|
/* copy packet data to the VM physical RAM */ |
|
|
physmem_copy_to_vm(d->vm,*pkt,rxd->buf_ptr,cp_len); |
|
|
|
|
|
/* set the byte count in descriptor */ |
|
|
rxd->buf_size |= cp_len; |
|
|
|
|
|
*pkt += cp_len; |
|
|
*pkt_len -= cp_len; |
|
|
} |
|
|
|
|
2380 |
/* Put a packet in the specified RX queue */ |
/* Put a packet in the specified RX queue */ |
2381 |
static int gt_eth_handle_rxqueue(struct gt_data *d,u_int port_id,u_int queue, |
static int gt_eth_handle_rxqueue(struct gt_data *d,u_int port_id,u_int queue, |
2382 |
u_char *pkt,ssize_t pkt_len) |
u_char *pkt,ssize_t pkt_len) |
2383 |
{ |
{ |
2384 |
struct eth_port *port = &d->eth_ports[port_id]; |
struct eth_port *port = &d->eth_ports[port_id]; |
2385 |
m_uint32_t rx_start,rx_current; |
m_uint32_t rx_start,rx_current; |
2386 |
struct eth_desc rxd0,rxdn,*rxdc; |
struct sdma_desc rxd0,rxdn,*rxdc; |
2387 |
ssize_t tot_len = pkt_len; |
ssize_t tot_len = pkt_len; |
2388 |
u_char *pkt_ptr = pkt; |
u_char *pkt_ptr = pkt; |
2389 |
n_eth_dot1q_hdr_t *hdr; |
n_eth_dot1q_hdr_t *hdr; |
2411 |
return(FALSE); |
return(FALSE); |
2412 |
|
|
2413 |
/* Load the first RX descriptor */ |
/* Load the first RX descriptor */ |
2414 |
gt_eth_desc_read(d,rx_start,&rxd0); |
gt_sdma_desc_read(d,rx_start,&rxd0); |
2415 |
|
|
2416 |
#if DEBUG_ETH_RX |
#if DEBUG_ETH_RX |
2417 |
GT_LOG(d,"port %u/queue %u: reading desc at 0x%8.8x " |
GT_LOG(d,"port %u/queue %u: reading desc at 0x%8.8x " |
2428 |
goto dma_error; |
goto dma_error; |
2429 |
|
|
2430 |
/* Put data into the descriptor buffer */ |
/* Put data into the descriptor buffer */ |
2431 |
gt_eth_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len); |
gt_sdma_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len); |
2432 |
|
|
2433 |
/* Clear the OWN bit */ |
/* Clear the OWN bit */ |
2434 |
rxdc->cmd_stat &= ~GT_RXDESC_OWN; |
rxdc->cmd_stat &= ~GT_RXDESC_OWN; |
2441 |
|
|
2442 |
/* Update the descriptor in host memory (but not the 1st) */ |
/* Update the descriptor in host memory (but not the 1st) */ |
2443 |
if (i != 0) |
if (i != 0) |
2444 |
gt_eth_desc_write(d,rx_current,rxdc); |
gt_sdma_desc_write(d,rx_current,rxdc); |
2445 |
|
|
2446 |
/* Get address of the next descriptor */ |
/* Get address of the next descriptor */ |
2447 |
rx_current = rxdc->next_ptr; |
rx_current = rxdc->next_ptr; |
2453 |
goto dma_error; |
goto dma_error; |
2454 |
|
|
2455 |
/* Read the next descriptor from VM physical RAM */ |
/* Read the next descriptor from VM physical RAM */ |
2456 |
gt_eth_desc_read(d,rx_current,&rxdn); |
gt_sdma_desc_read(d,rx_current,&rxdn); |
2457 |
rxdc = &rxdn; |
rxdc = &rxdn; |
2458 |
} |
} |
2459 |
|
|
2472 |
if (ntohs(hdr->type) <= N_ETH_MTU) /* 802.3 frame */ |
if (ntohs(hdr->type) <= N_ETH_MTU) /* 802.3 frame */ |
2473 |
rxd0.cmd_stat |= GT_RXDESC_FT; |
rxd0.cmd_stat |= GT_RXDESC_FT; |
2474 |
|
|
2475 |
gt_eth_desc_write(d,rx_start,&rxd0); |
gt_sdma_desc_write(d,rx_start,&rxd0); |
2476 |
|
|
2477 |
/* Update MIB counters */ |
/* Update MIB counters */ |
2478 |
port->rx_bytes += pkt_len; |
port->rx_bytes += pkt_len; |
2499 |
|
|
2500 |
port = &d->eth_ports[port_id]; |
port = &d->eth_ports[port_id]; |
2501 |
|
|
2502 |
/* Check if RX DMA is active */ |
GT_LOCK(d); |
2503 |
if (!(port->sdcmr & GT_SDCMR_ERD)) |
|
2504 |
|
/* Check if RX DMA is active */ |
2505 |
|
if (!(port->sdcmr & GT_SDCMR_ERD)) { |
2506 |
|
GT_UNLOCK(d); |
2507 |
return(FALSE); |
return(FALSE); |
2508 |
|
} |
2509 |
|
|
2510 |
queue = 0; /* At this time, only put packet in queue 0 */ |
queue = 0; /* At this time, only put packet in queue 0 */ |
2511 |
gt_eth_handle_rxqueue(d,port_id,queue,pkt,pkt_len); |
gt_eth_handle_rxqueue(d,port_id,queue,pkt,pkt_len); |
2512 |
|
GT_UNLOCK(d); |
2513 |
return(TRUE); |
return(TRUE); |
2514 |
} |
} |
2515 |
|
|
2543 |
} |
} |
2544 |
|
|
2545 |
memset(d,0,sizeof(*d)); |
memset(d,0,sizeof(*d)); |
2546 |
|
pthread_mutex_init(&d->lock,NULL); |
2547 |
d->vm = vm; |
d->vm = vm; |
2548 |
d->bus[0] = vm->pci_bus[0]; |
d->bus[0] = vm->pci_bus[0]; |
2549 |
|
d->gt_update_irq_status = gt64k_update_irq_status; |
2550 |
|
|
2551 |
vm_object_init(&d->vm_obj); |
vm_object_init(&d->vm_obj); |
2552 |
d->vm_obj.name = name; |
d->vm_obj.name = name; |
2606 |
} |
} |
2607 |
|
|
2608 |
memset(d,0,sizeof(*d)); |
memset(d,0,sizeof(*d)); |
2609 |
|
pthread_mutex_init(&d->lock,NULL); |
2610 |
d->vm = vm; |
d->vm = vm; |
2611 |
d->bus[0] = vm->pci_bus[0]; |
d->bus[0] = vm->pci_bus[0]; |
2612 |
d->bus[1] = vm->pci_bus[1]; |
d->bus[1] = vm->pci_bus[1]; |
2613 |
|
d->gt_update_irq_status = gt64k_update_irq_status; |
2614 |
|
|
2615 |
vm_object_init(&d->vm_obj); |
vm_object_init(&d->vm_obj); |
2616 |
d->vm_obj.name = name; |
d->vm_obj.name = name; |
2660 |
/* Create a new GT96100 controller */ |
/* Create a new GT96100 controller */ |
2661 |
int dev_gt96100_init(vm_instance_t *vm,char *name, |
int dev_gt96100_init(vm_instance_t *vm,char *name, |
2662 |
m_uint64_t paddr,m_uint32_t len, |
m_uint64_t paddr,m_uint32_t len, |
2663 |
u_int dma_irq,u_int eth_irq) |
u_int int0_irq,u_int int1_irq, |
2664 |
|
u_int serint0_irq,u_int serint1_irq) |
2665 |
{ |
{ |
2666 |
struct gt_data *d; |
struct gt_data *d; |
2667 |
|
u_int i; |
2668 |
|
|
2669 |
if (!(d = malloc(sizeof(*d)))) { |
if (!(d = malloc(sizeof(*d)))) { |
2670 |
fprintf(stderr,"gt96100: unable to create device data.\n"); |
fprintf(stderr,"gt96100: unable to create device data.\n"); |
2672 |
} |
} |
2673 |
|
|
2674 |
memset(d,0,sizeof(*d)); |
memset(d,0,sizeof(*d)); |
2675 |
|
pthread_mutex_init(&d->lock,NULL); |
2676 |
d->name = name; |
d->name = name; |
2677 |
d->vm = vm; |
d->vm = vm; |
2678 |
d->eth_irq = eth_irq; |
d->gt_update_irq_status = gt96k_update_irq_status; |
2679 |
|
|
2680 |
|
for(i=0;i<GT_SDMA_CHANNELS;i++) { |
2681 |
|
d->sdma[0][i].id = i; |
2682 |
|
d->sdma[1][i].id = i; |
2683 |
|
} |
2684 |
|
|
2685 |
|
/* IRQ setup */ |
2686 |
|
d->int0_irq = int0_irq; |
2687 |
|
d->int1_irq = int1_irq; |
2688 |
|
d->serint0_irq = serint0_irq; |
2689 |
|
d->serint1_irq = serint1_irq; |
2690 |
|
|
2691 |
d->bus[0] = vm->pci_bus[0]; |
d->bus[0] = vm->pci_bus[0]; |
2692 |
d->bus[1] = vm->pci_bus[1]; |
d->bus[1] = vm->pci_bus[1]; |
2693 |
|
|
2707 |
if (!pci_dev_lookup(d->bus[0],0,0,0)) { |
if (!pci_dev_lookup(d->bus[0],0,0,0)) { |
2708 |
d->pci_dev = pci_dev_add(d->bus[0],name, |
d->pci_dev = pci_dev_add(d->bus[0],name, |
2709 |
PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT96100, |
PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT96100, |
2710 |
0,0,dma_irq,d,NULL,pci_gt96100_read,NULL); |
0,0,-1,d,NULL,pci_gt96100_read,NULL); |
2711 |
if (!d->pci_dev) { |
if (!d->pci_dev) { |
2712 |
fprintf(stderr,"gt96100: unable to create PCI device.\n"); |
fprintf(stderr,"gt96100: unable to create PCI device.\n"); |
2713 |
return(-1); |
return(-1); |
2723 |
return(0); |
return(0); |
2724 |
} |
} |
2725 |
|
|
2726 |
/* Bind a NIO to GT96100 device */ |
/* Bind a NIO to GT96100 Ethernet device */ |
2727 |
int dev_gt96100_set_nio(struct gt_data *d,u_int port_id,netio_desc_t *nio) |
int dev_gt96100_eth_set_nio(struct gt_data *d,u_int port_id,netio_desc_t *nio) |
2728 |
{ |
{ |
2729 |
struct eth_port *port; |
struct eth_port *port; |
2730 |
|
|
2731 |
if (port_id >= GT_ETH_PORTS) |
if (!d || (port_id >= GT_ETH_PORTS)) |
2732 |
return(-1); |
return(-1); |
2733 |
|
|
2734 |
port = &d->eth_ports[port_id]; |
port = &d->eth_ports[port_id]; |
2744 |
} |
} |
2745 |
|
|
2746 |
/* Unbind a NIO from a GT96100 device */ |
/* Unbind a NIO from a GT96100 device */ |
2747 |
int dev_gt96100_unset_nio(struct gt_data *d,u_int port_id) |
int dev_gt96100_eth_unset_nio(struct gt_data *d,u_int port_id) |
2748 |
{ |
{ |
2749 |
struct eth_port *port; |
struct eth_port *port; |
2750 |
|
|
2751 |
if (port_id >= GT_ETH_PORTS) |
if (!d || (port_id >= GT_ETH_PORTS)) |
2752 |
return(-1); |
return(-1); |
2753 |
|
|
2754 |
port = &d->eth_ports[port_id]; |
port = &d->eth_ports[port_id]; |
2760 |
|
|
2761 |
return(0); |
return(0); |
2762 |
} |
} |
2763 |
|
|
2764 |
|
/* Show debugging information */ |
2765 |
|
static void dev_gt96100_show_eth_info(struct gt_data *d,u_int port_id) |
2766 |
|
{ |
2767 |
|
struct eth_port *port; |
2768 |
|
|
2769 |
|
port = &d->eth_ports[port_id]; |
2770 |
|
|
2771 |
|
printf("GT96100 Ethernet port %u:\n",port_id); |
2772 |
|
printf(" PCR = 0x%8.8x\n",port->pcr); |
2773 |
|
printf(" PCXR = 0x%8.8x\n",port->pcxr); |
2774 |
|
printf(" PCMR = 0x%8.8x\n",port->pcmr); |
2775 |
|
printf(" PSR = 0x%8.8x\n",port->psr); |
2776 |
|
printf(" ICR = 0x%8.8x\n",port->icr); |
2777 |
|
printf(" IMR = 0x%8.8x\n",port->imr); |
2778 |
|
|
2779 |
|
printf("\n"); |
2780 |
|
} |
2781 |
|
|
2782 |
|
/* Show debugging information */ |
2783 |
|
int dev_gt96100_show_info(struct gt_data *d) |
2784 |
|
{ |
2785 |
|
GT_LOCK(d); |
2786 |
|
dev_gt96100_show_eth_info(d,0); |
2787 |
|
dev_gt96100_show_eth_info(d,1); |
2788 |
|
GT_UNLOCK(d); |
2789 |
|
return(0); |
2790 |
|
} |