NIC register
IntrStatus = 0x3E, /* Interrupt status */
int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|- dev->irq = pdev->irq;
Int Cp_open(struct net_device *dev)
|-Request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name , dev);
#define cpr16(reg) readw(cp->regs + (reg))
cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
Irqreturn_t cp_interrupt(int irq, void *dev_instance){
//...
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
if (napi_schedule_prep(&cp->napi)) {
// 关闭 recv中断相关寄存器 进入NAPI模式
cpw16_f(IntrMask, cp_norx_intr_mask);
__napi_schedule(&cp->napi);
|-____napi_schedule(&__get_cpu_var(softnet_data), n);
|- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
status = cpr16(IntrStatus);
//...
}
static inline int napi_schedule_prep(struct napi_struct *n)
|-test_and_set_bit(NAPI_STATE_SCHED, &n->state);
:/proc/sys/net/core$ cat netdev_budget 300
Static void net_rx_action(struct softirq_action *h){
struct softnet_data *sd = &__get_cpu_var(softnet_data);
while (!list_empty(&sd->poll_list)) {
//...
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}
WARN_ON_ONCE(work > weight);
budget -= work; //你懂的
//..
}
}
Recall to :
process_backlog()
|-struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
|-__netif_receive_skb
// ok 忘记吧!
-----------------------------------------------------------------------------------------------------
Here we back to 8139 driver :
// 开始从DMA 搬运到 cp->rx_skb
void cp_rx_poll(struct napi_struct *napi, int budget){
//...
While(1){
const unsigned buflen = cp->rx_buf_sz;
skb = cp->rx_skb[rx_tail];
len = (status & 0x1fff) - 4; //CRC
// |- cp_change_mtu(); -> cp_set_rxbufsize();
new_skb = netdev_alloc_skb_ip_align(dev, buflen);
|-__netdev_alloc_skb+GFP_ATOMIC
|-skb_reserve+NET_IP_ALIGN
/*ummpa上次的dma 地址 last transfer is done */
dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
/*用new_skb->data做为下一次的mapping 并保存*/
mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,PCI_DMA_FROMDEVICE);
/*记录在 rx_skb数组tail*/
cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
|-__napi_gro_receive 使用GRO开始做拆包的组装
|-dev_gro_receive
|-ptype->gro_receive(&napi->gro_list, skb); 先是inet_gro_receive 接下来如果tcp 注册的是tcp4_gro_receive skb都存到了skb_shared_info frags上
cp->rx_ring[rx_tail].opts2 = 0;
cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
//some statistics
if (rx < budget) {// done !}
}
//...
}
The streaming DMA mapping routines can be called from interrupt context.
不需要CPU 和device 之间 in parallel access
This of "streaming" as "asynchronous" or "outside the coherency domain".
对于 ring descriptors 需要Consistent DMA
static int cp_alloc_rings (struct cp_private *cp) { void *mem; mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; // 记住操作此mem 如果不想发生奇怪的事情一定要wmb()一下! cp->rx_ring = mem; cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];//64 return cp_init_rings(cp);
这里就要说一下8139c+的 transmit receive mode了
rtl8139c+ 为收发单独提供了【64】个连续的内存描述符 有3种ring 描述符,
1 高优先级传输ring
2 正常优先级传输ring
3 正常接收ring
每一个ring描述符 由4个连续的双字组成 ,开始地址也需要256位对齐, 这里可以看到预先用consistent DMA分配了这块
下面是区段对这个的定义,很简单? NO
struct cp_desc { __le32 opts1; __le32 opts2; __le64 addr; };
事实上应该如此:
分配ring 数组
int cp_alloc_rings(struct cp_private *cp)
|- dma_alloc_coherent + GFP_KERNEL
|- __get_free_pages /

-----------------------------------------------------------
E2prom :
cp_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;