xhci_event_ring_init 初始化& U盘数据传输

static int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
{
        int ret = 0;

        /* primary + secondary */
        xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);

        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                "// Allocating primary event ring");


        // xhci->run_regs = hcd->regs +
        //        (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);

        /* Set ir_set to interrupt register set 0 */
        xhci->ir_set = &xhci->run_regs->ir_set[0];
        //获取ir_set 寄存器
        ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
                &xhci->erst, 0, flags);
        if (ret) {
                xhci_err(xhci, "failed to setup primary event ring\n");
                goto fail;
        }

        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                "// Allocating sec event ring related pointers");

        xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
                                sizeof(*xhci->sec_ir_set), flags);
        if (!xhci->sec_ir_set) {
                ret = -ENOMEM;
                goto fail;
        }

        xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
                                sizeof(*xhci->sec_event_ring), flags);
        if (!xhci->sec_event_ring) {
                ret = -ENOMEM;
                goto fail;
        }

        xhci->sec_erst = kcalloc(xhci->max_interrupters,
                                sizeof(*xhci->sec_erst), flags);
        if (!xhci->sec_erst)
                ret = -ENOMEM;
fail:
        return ret;
}

struct xhci_ring {
        struct xhci_segment     *first_seg;
        struct xhci_segment     *last_seg;
        union  xhci_trb         *enqueue;
        struct xhci_segment     *enq_seg;
        union  xhci_trb         *dequeue;
        struct xhci_segment     *deq_seg;
        struct list_head        td_list;
        /*
         * Write the cycle state into the TRB cycle field to give ownership of
         * the TRB to the host controller (if we are the producer), or to check
         * if we own the TRB (if we are the consumer).  See section 4.9.1.
         */
        u32                     cycle_state;
        unsigned int            err_count;
        unsigned int            stream_id;
        unsigned int            num_segs;
        unsigned int            num_trbs_free;
        unsigned int            num_trbs_free_temp;
        unsigned int            bounce_buf_len;
        enum xhci_ring_type     type;
        bool                    last_td_was_short;
        struct radix_tree_root  *trb_address_map;
};

xhci_event_ring_setup

static int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
        struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
        unsigned int intr_num, gfp_t flags)
{
        dma_addr_t deq;
        u64 val_64;
        unsigned int val;
        int ret;

        *er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
        if (!*er)
                return -ENOMEM;

        ret = xhci_alloc_erst(xhci, *er, erst, flags);
        if (ret)
                return ret;

        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                "intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx",
                        intr_num,
                        erst->num_entries,
                        erst->entries,
                        (unsigned long long)erst->erst_dma_addr);

        /* set ERST count with the number of entries in the segment table */
        val = readl_relaxed(&ir_set->erst_size);
        val &= ERST_SIZE_MASK;
        val |= ERST_NUM_SEGS;
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                "Write ERST size = %i to ir_set %d (some bits preserved)", val,
                intr_num);
        writel_relaxed(val, &ir_set->erst_size);

        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "intr# %d: Set ERST entries to point to event ring.",
                        intr_num);
        /* set the segment table base address */
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "Set ERST base address for ir_set %d = 0x%llx",
                        intr_num,
                        (unsigned long long)erst->erst_dma_addr);
        val_64 = xhci_read_64(xhci, &ir_set->erst_base);
        val_64 &= ERST_PTR_MASK;
        val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
        xhci_write_64(xhci, val_64, &ir_set->erst_base);
        
        //(*er)->dequeue =  ring->first_seg->trbs
        /* Set the event ring dequeue address */
        deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
        if (deq == 0 && !in_interrupt())
                xhci_warn(xhci,
                "intr# %d:WARN something wrong with SW event ring deq ptr.\n",
                intr_num);
        /* Update HC event ring dequeue pointer */
        val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
        val_64 &= ERST_PTR_MASK;
        /* Don't clear the EHB bit (which is RW1C) because
         * there might be more events to service.
         */
        val_64 &= ~ERST_EHB;
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                "intr# %d:Write event ring dequeue pointer, preserving EHB bit",
                intr_num);
                //将dma adddr 写入 ir_set->erst_dequeue
        xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
                        &ir_set->erst_dequeue);
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "Wrote ERST address to ir_set %d.", intr_num);

        return 0;
}

xhci_ring_alloc

*
 * Link each segment together into a ring.
 * Set the end flag and the cycle toggle bit on the last segment.
 * See section 4.9.1 and figures 15 and 16.
 */
   *er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
                unsigned int num_segs, unsigned int cycle_state,
                enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
        struct xhci_ring        *ring;
        int ret;
        struct device *dev = xhci_to_hcd(xhci)->self.sysdev;

        ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
        if (!ring)
                return NULL;

        ring->num_segs = num_segs;
        ring->bounce_buf_len = max_packet;
        INIT_LIST_HEAD(&ring->td_list);
        ring->type = type;
        if (num_segs == 0)
                return ring;
      //dma_pool分配内存空间ring->first_seg->trbs  ring->last_seg->trbs
        ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
                        &ring->last_seg, num_segs, cycle_state, type,
                        max_packet, flags);
        if (ret)
                goto fail;

        /* Only event ring does not use link TRB */
        if (type != TYPE_EVENT) {
                /* See section 4.9.2.1 and 6.4.4.1 */
                ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
                        cpu_to_le32(LINK_TOGGLE);
        }
        xhci_initialize_ring_info(ring, cycle_state);
        trace_xhci_ring_alloc(ring);
        return ring;

fail:
        kfree(ring);
        return NULL;
}

xhci_alloc_segments_for_ring

/* Allocate segments and lin
k them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
                struct xhci_segment **first, struct xhci_segment **last,
                unsigned int num_segs, unsigned int cycle_state,
                enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
        struct xhci_segment *prev;

       //给xhci_segment分配内存 
        prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
        if (!prev)
                return -ENOMEM;
        num_segs--;

        *first = prev;
        while (num_segs > 0) {
                struct xhci_segment     *next;

                next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
                if (!next) {
                        prev = *first;
                        while (prev) {
                                next = prev->next;
                                xhci_segment_free(xhci, prev);
                                prev = next;
                        }
                        return -ENOMEM;
                }
                xhci_link_segments(xhci, prev, next, type);

                prev = next;
                num_segs--;
        }
        xhci_link_segments(xhci, prev, *first, type);
        *last = prev;

        return 0;
}

xhci_segment_alloc

 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
                        TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);

/*
 * Allocates a generic ring segment from the ring pool, sets the dma address,
 * initializes the segment to zero, and sets the private next pointer to NULL.
 *
 * Section 4.11.1.1:
 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
 */
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
                                               unsigned int cycle_state,
                                               unsigned int max_packet,
                                               gfp_t flags)
{
        struct xhci_segment *seg;
        dma_addr_t      dma;
        int             i;
        struct device *dev = xhci_to_hcd(xhci)->self.sysdev;

        seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
        if (!seg)
                return NULL;

        seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
        if (!seg->trbs) {
                kfree(seg);
                return NULL;
        }

        if (max_packet) {
                seg->bounce_buf = kzalloc_node(max_packet, flags,
                                        dev_to_node(dev));
                if (!seg->bounce_buf) {
                        dma_pool_free(xhci->segment_pool, seg->trbs, dma);
                        kfree(seg);
                        return NULL;
                }
        }
        /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
        if (cycle_state == 0) {
                for (i = 0; i < TRBS_PER_SEGMENT; i++)
                        seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
        }
        seg->dma = dma;
        seg->next = NULL;

        return seg;
}

xhci_link_segments

/*
 * Make the prev segment point to the next segment.
 *
 * Change the last TRB in the prev segment to be a Link TRB which points to the
 * DMA address of the next segment.  The caller needs to set any Link TRB
 * related flags, such as End TRB, Toggle Cycle, and no snoop.
 */
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
                struct xhci_segment *next, enum xhci_ring_type type)
{
        u32 val;

        if (!prev || !next)
                return;
        prev->next = next;
        if (type != TYPE_EVENT) {
                prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
                        cpu_to_le64(next->dma);

                /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
                val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
                val &= ~TRB_TYPE_BITMASK;
                val |= TRB_TYPE(TRB_LINK);
                /* Always set the chain bit with 0.95 hardware */
                /* Set chain bit for isoc rings on AMD 0.96 host */
                if (xhci_link_trb_quirk(xhci) ||
                                (type == TYPE_ISOC &&
                                 (xhci->quirks & XHCI_AMD_0x96_HOST)))
                        val |= TRB_CHAIN;
                prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
        }
}

xhci_alloc_erst

int xhci_alloc_erst(struct xhci_hcd *xhci,
                    struct xhci_ring *evt_ring,
                    struct xhci_erst *erst,
                    gfp_t flags)
{
        size_t size;
        unsigned int val;
        struct xhci_segment *seg;
        struct xhci_erst_entry *entry;

        size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
        //分配dma内存地址
        erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
                                           size, &erst->erst_dma_addr, flags);
        if (!erst->entries)
                return -ENOMEM;

        erst->num_entries = evt_ring->num_segs;

        seg = evt_ring->first_seg;
        for (val = 0; val < evt_ring->num_segs; val++) {
                entry = &erst->entries[val];
                entry->seg_addr = cpu_to_le64(seg->dma);
                entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
                entry->rsvd = 0;
                seg = seg->next;
        }

        return 0;
}

xhci_initialize_ring_info

static void xhci_initialize_ring_info(struct xhci_ring *ring,
                                        unsigned int cycle_state)
{
        /* The ring is empty, so the enqueue pointer == dequeue pointer */
        ring->enqueue = ring->first_seg->trbs;
        ring->enq_seg = ring->first_seg;
        ring->dequeue = ring->enqueue;
        ring->deq_seg = ring->first_seg;
        /* The ring is initialized to 0. The producer must write 1 to the cycle
         * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
         * compare CCS to the cycle bit to check ownership, so CCS = 1.
         *
         * New rings are initialized with cycle state equal to 1; if we are
         * handling ring expansion, set the cycle state equal to the old ring.
         */
        ring->cycle_state = cycle_state;

        /*
         * Each segment has a link TRB, and leave an extra TRB for SW
         * accounting purpose
         */
        ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}

xhci_trb_virt_to_dma

deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg= ring->first_seg,
                union xhci_trb *trb = ring->first_seg->trbs)
{
        unsigned long segment_offset;

        if (!seg || !trb || trb < seg->trbs)
                return 0;
        /* offset in TRBs */
        //计算struct xhci_segment trbs的偏移
        segment_offset = trb - seg->trbs;
        if (segment_offset >= TRBS_PER_SEGMENT)
                return 0;
        return seg->dma + (segment_offset * sizeof(*trb));
}

设置complete 回调函数

static inline void usb_fill_bulk_urb(struct urb *urb,
                                     struct usb_device *dev,
                                     unsigned int pipe,
                                     void *transfer_buffer,
                                     int buffer_length,
                                     usb_complete_t complete_fn,
                                     void *context)
{
        urb->dev = dev;
        urb->pipe = pipe;
        urb->transfer_buffer = transfer_buffer;
        urb->transfer_buffer_length = buffer_length;
        urb->complete = complete_fn;
        urb->context = context;
}

uas_alloc_data_urb

uas_alloc_data_urb
static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
                                      struct scsi_cmnd *cmnd,
                                      enum dma_data_direction dir)
{
        struct usb_device *udev = devinfo->udev;
        struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
        struct urb *urb = usb_alloc_urb(0, gfp);
        struct scsi_data_buffer *sdb = &cmnd->sdb;
        unsigned int pipe = (dir == DMA_FROM_DEVICE)
                ? devinfo->data_in_pipe : devinfo->data_out_pipe;

        if (!urb)
                goto out;
        usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
                          uas_data_cmplt, cmnd);
        if (devinfo->use_streams)
                urb->stream_id = cmdinfo->uas_tag;
        urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
        urb->sg = sdb->table.sgl;
 out:
        return urb;
}

hcd 传输数据给storage 驱动

static void uas_data_cmplt(struct urb *urb)
{
        struct scsi_cmnd *cmnd = urb->context;
        struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
        struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
        struct scsi_data_buffer *sdb = &cmnd->sdb;
        unsigned long flags;
        int status = urb->status;

        spin_lock_irqsave(&devinfo->lock, flags);

        if (cmdinfo->data_in_urb == urb) {
                cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
                cmdinfo->data_in_urb = NULL;
        } else if (cmdinfo->data_out_urb == urb) {
                cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
                cmdinfo->data_out_urb = NULL;
        }

        if (devinfo->resetting)
                goto out;

        /* Data urbs should not complete before the cmd urb is submitted */
        if (cmdinfo->state & SUBMIT_CMD_URB) {
                uas_log_cmd_state(cmnd, "unexpected data cmplt", 0);
                goto out;
        }

        if (status) {
                if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
                        uas_log_cmd_state(cmnd, "data cmplt err", status);
                /* error: no data transfered */
                scsi_set_resid(cmnd, sdb->length);
        } else {
                scsi_set_resid(cmnd, sdb->length - urb->actual_length);
        }
        uas_try_complete(cmnd, __func__);
out:
        usb_free_urb(urb);
        spin_unlock_irqrestore(&devinfo->lock, flags);
}

static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
{
        struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
        struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;

        lockdep_assert_held(&devinfo->lock);
        if (cmdinfo->state & (COMMAND_INFLIGHT |
                              DATA_IN_URB_INFLIGHT |
                              DATA_OUT_URB_INFLIGHT |
                              COMMAND_ABORTED))
                return -EBUSY;
        devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
        uas_free_unsubmitted_urbs(cmnd);
        cmnd->scsi_done(cmnd);//scsi_mq_done
        return 0;
}

== usb_submit_urb(hub->urb, GFP_NOIO);                //提交hub->urb
        == usb_hcd_submit_urb()
                == rh_urb_enqueue(hcd, urb);                //root hub
                        == rh_call_control()                        //control urb
                                == usb_hcd_link_urb_to_ep(hcd, urb);
                                == hcd->driver->hub_control()        // non-generic request
                                        == xhci_hub_control()
                                                == xhci_get_port_status()
                                == usb_hcd_unlink_urb_from_ep(hcd, urb);
                                == usb_hcd_giveback_urb(hcd, urb, status);                // 返回给设备,即rhdev
                        == rh_queue_status(hcd, urb);                //int urb 循环检测hub 的状态
                                == usb_hcd_link_urb_to_ep(hcd, urb);
                                == mod_timer(&hcd->rh_timer, jiffies);
                                        == timer_setup(&hcd->rh_timer, rh_timer_func, 0);
                                        == rh_timer_func()
                                                == usb_hcd_poll_rh_status(_hcd);
                                                        == hcd->driver->hub_status_data(hcd, buffer);
                                                        == xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
                                                        == usb_hcd_unlink_urb_from_ep(hcd, urb);        // 如果有change,返回给设备即rhdev
                                                        == usb_hcd_giveback_urb(hcd, urb, 0);

 设备驱动submit urb 到hcd U盘数据传输

SUBMIT_CMD_URB :
== uas_submit_urbs(struct scsi_cmnd *cmnd, struct uas_dev_info *devinfo)
	== usb_submit_urb(cmdinfo->cmd_urb, GFP_ATOMIC);
		== usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags)
			hcd->driver->urb_enqueue(hcd, urb, mem_flags);
			== xhci_urb_enqueue();				trace_xhci_urb_enqueue(urb);
				== xhci_queue_bulk_tx();		//批量传输
					== prepare_transfer();
						== usb_hcd_link_urb_to_ep()		//add an URB to its endpoint queue
							== list_add_tail(&urb->urb_list, &urb->ep->urb_list);

 usb_submit_urb

uas_queuecommand_lck
uas_submit_urbs
usb_submit_urb

int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
        int                             xfertype, max;
        struct usb_device               *dev;
        struct usb_host_endpoint        *ep;
        int                             is_out;
        unsigned int                    allowed;

        if (!urb || !urb->complete)
                return -EINVAL;
        if (urb->hcpriv) {
                WARN_ONCE(1, "URB %pK submitted while active\n", urb);
                return -EBUSY;
        }

        dev = urb->dev;
        if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
                return -ENODEV;

        /* For now, get the endpoint from the pipe.  Eventually drivers
         * will be required to set urb->ep directly and we will eliminate
         * urb->pipe.
         */
        ep = usb_pipe_endpoint(dev, urb->pipe);	//获取端点
        if (!ep)
                return -ENOENT;

        urb->ep = ep;//设置端点
        urb->status = -EINPROGRESS;
        urb->actual_length = 0;

        /* Lots of sanity checks, so HCDs can rely on clean data
         * and don't need to duplicate tests
         */
        xfertype = usb_endpoint_type(&ep->desc);
        if (xfertype == USB_ENDPOINT_XFER_CONTROL) {//控制传输
                struct usb_ctrlrequest *setup =
                                (struct usb_ctrlrequest *) urb->setup_packet;//设置setup包

                if (!setup)
                        return -ENOEXEC;
                is_out = !(setup->bRequestType & USB_DIR_IN) ||
                                !setup->wLength;
        } else {
                is_out = usb_endpoint_dir_out(&ep->desc);
        }

        /* Clear the internal flags and cache the direction for later use */
        urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
                        URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
                        URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
                        URB_DMA_SG_COMBINED);
        urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);//输入还是输出端点

        if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
                        dev->state < USB_STATE_CONFIGURED)
                return -ENODEV;

        max = usb_endpoint_maxp(&ep->desc);
        if (max <= 0) {
                dev_dbg(&dev->dev,
                        "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
                        usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
                        __func__, max);
                return -EMSGSIZE;
        }

        /* periodic transfers limit size per frame/uframe,
         * but drivers only control those sizes for ISO.
         * while we're checking, initialize return status.
         */
        if (xfertype == USB_ENDPOINT_XFER_ISOC) {//同步传输
                int     n, len;

                /* SuperSpeed isoc endpoints have up to 16 bursts of up to
                 * 3 packets each
                 */
                if (dev->speed >= USB_SPEED_SUPER) {
                        int     burst = 1 + ep->ss_ep_comp.bMaxBurst;
                        int     mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
                        max *= burst;
                        max *= mult;
                }

                if (dev->speed == USB_SPEED_SUPER_PLUS &&
                    USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) {
                        struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp;

                        isoc_ep_comp = &ep->ssp_isoc_ep_comp;
                        max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval);
                }

                /* "high bandwidth" mode, 1-3 packets/uframe? */
                if (dev->speed == USB_SPEED_HIGH)
                        max *= usb_endpoint_maxp_mult(&ep->desc);

                if (urb->number_of_packets <= 0)
                        return -EINVAL;
                for (n = 0; n < urb->number_of_packets; n++) {
                        len = urb->iso_frame_desc[n].length;
                        if (len < 0 || len > max)
                                return -EMSGSIZE;
                        urb->iso_frame_desc[n].status = -EXDEV;
                        urb->iso_frame_desc[n].actual_length = 0;
                }
        } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
                        dev->speed != USB_SPEED_WIRELESS) {
                struct scatterlist *sg;
                int i;

                for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
                        if (sg->length % max)
                                return -EINVAL;
        }

        /* the I/O buffer must be mapped/unmapped, except when length=0 */
        if (urb->transfer_buffer_length > INT_MAX)
                return -EMSGSIZE;

        /*
         * stuff that drivers shouldn't do, but which shouldn't
         * cause problems in HCDs if they get it wrong.
         */

        /* Check that the pipe's type matches the endpoint's type */
        if (usb_urb_ep_type_check(urb))
                dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
                        usb_pipetype(urb->pipe), pipetypes[xfertype]);

        /* Check against a simple/standard policy */
        allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
                        URB_FREE_BUFFER);
        switch (xfertype) {	//传输类型
        case USB_ENDPOINT_XFER_BULK:
        case USB_ENDPOINT_XFER_INT:
                if (is_out)
                        allowed |= URB_ZERO_PACKET;
                /* FALLTHROUGH */
        default:                        /* all non-iso endpoints */
                if (!is_out)
                        allowed |= URB_SHORT_NOT_OK;
                break;
        case USB_ENDPOINT_XFER_ISOC:
                allowed |= URB_ISO_ASAP;
                break;
        }
        allowed &= urb->transfer_flags;

        /* warn if submitter gave bogus flags */
        if (allowed != urb->transfer_flags)
                dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
                        urb->transfer_flags, allowed);

        /*
         * Force periodic transfer intervals to be legal values that are
         * a power of two (so HCDs don't need to).
         *
         * FIXME want bus->{intr,iso}_sched_horizon values here.  Each HC
         * supports different values... this uses EHCI/UHCI defaults (and
         * EHCI can use smaller non-default values).
         */
        switch (xfertype) {
        case USB_ENDPOINT_XFER_ISOC://同步传输
        case USB_ENDPOINT_XFER_INT://中断传输
                /* too small? */
                switch (dev->speed) {
                case USB_SPEED_WIRELESS:
                        if ((urb->interval < 6)
                                && (xfertype == USB_ENDPOINT_XFER_INT))
                                return -EINVAL;
                        /* fall through */
                default:
                        if (urb->interval <= 0)
                                return -EINVAL;
                        break;
                }
                /* too big? */
                switch (dev->speed) {
                case USB_SPEED_SUPER_PLUS: //超速设备 
                case USB_SPEED_SUPER:   /* units are 125us */
                        /* Handle up to 2^(16-1) microframes */
                        if (urb->interval > (1 << 15))
                                return -EINVAL;
                        max = 1 << 15;
                        break;
                case USB_SPEED_WIRELESS://无线
                        if (urb->interval > 16)
                                return -EINVAL;
                        break;
                case USB_SPEED_HIGH:  /* units are microframes *///高速设备
                        /* NOTE usb handles 2^15 */
                        if (urb->interval > (1024 * 8))
                                urb->interval = 1024 * 8;
                        max = 1024 * 8;
                        break;
                case USB_SPEED_FULL:    /* units are frames/msec *///全速
                case USB_SPEED_LOW:     //低速
                        if (xfertype == USB_ENDPOINT_XFER_INT) {
                                if (urb->interval > 255)
                                        return -EINVAL;
                                /* NOTE ohci only handles up to 32 */
                                max = 128;
                        } else {
                                if (urb->interval > 1024)
                                        urb->interval = 1024;
                                /* NOTE usb and ohci handle up to 2^15 */
                                max = 1024;
                        }
                        break;
                default:
                        return -EINVAL;
                }
                if (dev->speed != USB_SPEED_WIRELESS) {
                        /* Round down to a power of 2, no more than max */
                        urb->interval = min(max, 1 << ilog2(urb->interval));
                }
        }

        return usb_hcd_submit_urb(urb, mem_flags);
}
EXPORT_SYMBOL_GPL(usb_submit_urb);

​​​​​​usb host 驱动之 urb_usb urb-优快云博客

usb_hcd_submit_urb

/*-------------------------------------------------------------------------*/

/* may be called in any context with a valid urb->dev usecount
 * caller surrenders "ownership" of urb
 * expects usb_submit_urb() to have sanity checked and conditioned all
 * inputs in the urb
 */
int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
{
        int                     status;
        struct usb_hcd          *hcd = bus_to_hcd(urb->dev->bus);

        /* increment urb's reference count as part of giving it to the HCD
         * (which will control it).  HCD guarantees that it either returns
         * an error or calls giveback(), but not both.
         */
        usb_get_urb(urb);
        atomic_inc(&urb->use_count);
        atomic_inc(&urb->dev->urbnum);
        usbmon_urb_submit(&hcd->self, urb);

        /* NOTE requirements on root-hub callers (usbfs and the hub
         * driver, for now):  URBs' urb->transfer_buffer must be
         * valid and usb_buffer_{sync,unmap}() not be needed, since
         * they could clobber root hub response data.  Also, control
         * URBs must be submitted in process context with interrupts
         * enabled.
         */

        if (is_root_hub(urb->dev)) {
                status = rh_urb_enqueue(hcd, urb);
        } else {
                status = map_urb_for_dma(hcd, urb, mem_flags);
                if (likely(status == 0)) {
                        //调用主控器的urb_enqueue方法
                        status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
                        if (unlikely(status))
                                unmap_urb_for_dma(hcd, urb);
                }
        }

        if (unlikely(status)) {
                usbmon_urb_submit_error(&hcd->self, urb, status);
                urb->hcpriv = NULL;
                INIT_LIST_HEAD(&urb->urb_list);
                atomic_dec(&urb->use_count);
                /*
                 * Order the write of urb->use_count above before the read
                 * of urb->reject below.  Pairs with the memory barriers in
                 * usb_kill_urb() and usb_poison_urb().
                 */
                smp_mb__after_atomic();

                atomic_dec(&urb->dev->urbnum);
                if (atomic_read(&urb->reject))
                        wake_up(&usb_kill_urb_queue);
                usb_put_urb(urb);
        }
        return status;
}

xhci_urb_enqueue 使用控制器的enqueue

xhci_urb_enqueue
/*
 * non-error returns are a promise to giveback() the urb later
 * we drop ownership so next owner (or urb unlink) can get it
 */
static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        unsigned long flags;
        int ret = 0;
        unsigned int slot_id, ep_index;
        unsigned int *ep_state;
        struct urb_priv *urb_priv;
        int num_tds;

        if (!urb)
                return -EINVAL;
        ret = xhci_check_args(hcd, urb->dev, urb->ep,
                                        true, true, __func__);
        if (ret <= 0)
                return ret ? ret : -EINVAL;

        slot_id = urb->dev->slot_id;
        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
        ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;

        if (!HCD_HW_ACCESSIBLE(hcd)) {
                if (!in_interrupt())
                        xhci_dbg(xhci, "urb submitted during PCI suspend\n");
                return -ESHUTDOWN;
        }
        if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
                xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
                return -ENODEV;
        }

        if (usb_endpoint_xfer_isoc(&urb->ep->desc))
                num_tds = urb->number_of_packets;
        else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
            urb->transfer_buffer_length > 0 &&
            urb->transfer_flags & URB_ZERO_PACKET &&
            !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
                num_tds = 2;
        else
                num_tds = 1;

        urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
        if (!urb_priv)
                return -ENOMEM;

        urb_priv->num_tds = num_tds;
        urb_priv->num_tds_done = 0;
        urb->hcpriv = urb_priv;

        trace_xhci_urb_enqueue(urb);

        if (usb_endpoint_xfer_control(&urb->ep->desc)) {
                /* Check to see if the max packet size for the default control
                 * endpoint changed during FS device enumeration
                 */
                if (urb->dev->speed == USB_SPEED_FULL) {
                        ret = xhci_check_maxpacket(xhci, slot_id,
                                        ep_index, urb, mem_flags);
                        if (ret < 0) {
                                xhci_urb_free_priv(urb_priv);
                                urb->hcpriv = NULL;
                                return ret;
                        }
                }
        }

        spin_lock_irqsave(&xhci->lock, flags);

        if (xhci->xhc_state & XHCI_STATE_DYING) {
                xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
                         urb->ep->desc.bEndpointAddress, urb);
                ret = -ESHUTDOWN;
                goto free_priv;
        }
        if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
                xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
                          *ep_state);
                ret = -EINVAL;
                goto free_priv;
        }
        if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
                xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
                ret = -EINVAL;
                goto free_priv;
        }

        switch (usb_endpoint_type(&urb->ep->desc)) {

        case USB_ENDPOINT_XFER_CONTROL:
                ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
                                         slot_id, ep_index);
                break;
        case USB_ENDPOINT_XFER_BULK:
                ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
                                         slot_id, ep_index);
                break;
        case USB_ENDPOINT_XFER_INT:
                ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
                                slot_id, ep_index);
                break;
        case USB_ENDPOINT_XFER_ISOC:
                ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
                                slot_id, ep_index);
        }

        if (ret) {
free_priv:
                xhci_urb_free_priv(urb_priv);
                urb->hcpriv = NULL;
        }
        spin_unlock_irqrestore(&xhci->lock, flags);
        return ret;
}

xhci_queue_bulk_tx 进行bulkonly传输数据


/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
{
        struct xhci_ring *ring;
        struct urb_priv *urb_priv;
        struct xhci_td *td;
        struct xhci_generic_trb *start_trb;
        struct scatterlist *sg = NULL;
        bool more_trbs_coming = true;
        bool need_zero_pkt = false;
        bool first_trb = true;
        unsigned int num_trbs;
        unsigned int start_cycle, num_sgs = 0;
        unsigned int enqd_len, block_len, trb_buff_len, full_len;
        int sent_len, ret;
        u32 field, length_field, remainder;
        u64 addr, send_addr;

        ring = xhci_urb_to_transfer_ring(xhci, urb);
        if (!ring)
                return -EINVAL;

        full_len = urb->transfer_buffer_length;
        /* If we have scatter/gather list, we use it. */
        if (urb->num_sgs) {
                num_sgs = urb->num_mapped_sgs;
                sg = urb->sg;
                addr = (u64) sg_dma_address(sg);
                block_len = sg_dma_len(sg);
                num_trbs = count_sg_trbs_needed(urb);
        } else {
                num_trbs = count_trbs_needed(urb);
                addr = (u64) urb->transfer_dma;
                block_len = full_len;
        }
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
                        num_trbs, urb, 0, mem_flags);
        if (unlikely(ret < 0))
                return ret;

        urb_priv = urb->hcpriv;

        /* Deal with URB_ZERO_PACKET - need one more td/trb */
        if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
                need_zero_pkt = true;

        td = &urb_priv->td[0];

        /*
         * Don't give the first TRB to the hardware (by toggling the cycle bit)
         * until we've finished creating all the other TRBs.  The ring's cycle
         * state may change as we enqueue the other TRBs, so save it too.
         */
        start_trb = &ring->enqueue->generic;
        start_cycle = ring->cycle_state;
        send_addr = addr;

        /* Queue the TRBs, even if they are zero-length */
        for (enqd_len = 0; first_trb || enqd_len < full_len;
                        enqd_len += trb_buff_len) {
                field = TRB_TYPE(TRB_NORMAL);

                /* TRB buffer should not cross 64KB boundaries */
                trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
                trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);

                if (enqd_len + trb_buff_len > full_len)
                        trb_buff_len = full_len - enqd_len;

                /* Don't change the cycle bit of the first TRB until later */
                if (first_trb) {
                        first_trb = false;
                        if (start_cycle == 0)
                                field |= TRB_CYCLE;
                } else
                        field |= ring->cycle_state;

                /* Chain all the TRBs together; clear the chain bit in the last
                 * TRB to indicate it's the last TRB in the chain.
                 */
                if (enqd_len + trb_buff_len < full_len) {
                        field |= TRB_CHAIN;
                        if (trb_is_link(ring->enqueue + 1)) {
                                if (xhci_align_td(xhci, urb, enqd_len,
                                                  &trb_buff_len,
                                                  ring->enq_seg)) {
                                        send_addr = ring->enq_seg->bounce_dma;
                                        /* assuming TD won't span 2 segs */
                                        td->bounce_seg = ring->enq_seg;
                                }
                        }
                }
                if (enqd_len + trb_buff_len >= full_len) {
                        field &= ~TRB_CHAIN;
                        field |= TRB_IOC;
                        more_trbs_coming = false;
                        td->last_trb = ring->enqueue;

                        if (xhci_urb_suitable_for_idt(urb)) {
                                memcpy(&send_addr, urb->transfer_buffer,
                                       trb_buff_len);
                                le64_to_cpus(&send_addr);
                                field |= TRB_IDT;
                        }
                }

                /* Only set interrupt on short packet for IN endpoints */
                if (usb_urb_dir_in(urb))
                        field |= TRB_ISP;

                /* Set the TRB length, TD size, and interrupter fields. */
                remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
                                              full_len, urb, more_trbs_coming);

                length_field = TRB_LEN(trb_buff_len) |
                        TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);

                queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
                                lower_32_bits(send_addr),
                                upper_32_bits(send_addr),
                                length_field,
                                field);

                addr += trb_buff_len;
                sent_len = trb_buff_len;

                while (sg && sent_len >= block_len) {
                        /* New sg entry */
                        --num_sgs;
                        sent_len -= block_len;
                        sg = sg_next(sg);
                        if (num_sgs != 0 && sg) {
                                block_len = sg_dma_len(sg);
                                addr = (u64) sg_dma_address(sg);
                                addr += sent_len;
                        }
                }
                block_len -= sent_len;
                send_addr = addr;
        }

        if (need_zero_pkt) {
                ret = prepare_transfer(xhci, xhci->devs[slot_id],
                                       ep_index, urb->stream_id,
                                       1, urb, 1, mem_flags);
                urb_priv->td[1].last_trb = ring->enqueue;
                field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
                queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
        }

        check_trb_math(urb, enqd_len);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
                        start_cycle, start_trb);
        return 0;
}

prepare_transfer

static int prepare_transfer(struct xhci_hcd *xhci,
                struct xhci_virt_device *xdev,
                unsigned int ep_index,
                unsigned int stream_id,
                unsigned int num_trbs,
                struct urb *urb,
                unsigned int td_index,
                gfp_t mem_flags)
{
        int ret;
        struct urb_priv *urb_priv;
        struct xhci_td  *td;
        struct xhci_ring *ep_ring;
        struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

        ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
        if (!ep_ring) {
                xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
                                stream_id);
                return -EINVAL;
        }

        ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
                           num_trbs, mem_flags);
        if (ret)
                return ret;

        urb_priv = urb->hcpriv;
        td = &urb_priv->td[td_index];

        INIT_LIST_HEAD(&td->td_list);
        INIT_LIST_HEAD(&td->cancelled_td_list);

        if (td_index == 0) {
                ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
                if (unlikely(ret))
                        return ret;
        }

        td->urb = urb;
        /* Add this TD to the tail of the endpoint ring's TD list */
        list_add_tail(&td->td_list, &ep_ring->td_list);
        td->start_seg = ep_ring->enq_seg;
        td->first_trb = ep_ring->enqueue;

        return 0;
}

 usb_hcd_link_urb_to_ep

int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
{
        int             rc = 0;

        spin_lock(&hcd_urb_list_lock);

        /* Check that the URB isn't being killed */
        if (unlikely(atomic_read(&urb->reject))) {
                rc = -EPERM;
                goto done;
        }

        if (unlikely(!urb->ep->enabled)) {
                rc = -ENOENT;
                goto done;
        }

        if (unlikely(!urb->dev->can_submit)) {
                rc = -EHOSTUNREACH;
                goto done;
        }

        /*
         * Check the host controller's state and add the URB to the
         * endpoint's queue.
         */
        if (HCD_RH_RUNNING(hcd)) {
                urb->unlinked = 0;
                //将数据urb 添加到urb->ep->urb_list上
                list_add_tail(&urb->urb_list, &urb->ep->urb_list);
        } else {
                rc = -ESHUTDOWN;
                goto done;
        }
 done:
        spin_unlock(&hcd_urb_list_lock);
        return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep);

queue_trb

/****           Endpoint Ring Operations        ****/

/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
 *
 * @more_trbs_coming:   Will you enqueue more TRBs before calling
 *                      prepare_transfer()?
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
                bool more_trbs_coming,
                u32 field1, u32 field2, u32 field3, u32 field4)
{
        struct xhci_generic_trb *trb;

        trb = &ring->enqueue->generic;
        trb->field[0] = cpu_to_le32(field1);
        trb->field[1] = cpu_to_le32(field2);
        trb->field[2] = cpu_to_le32(field3);
        /* make sure TRB is fully written before giving it to the controller */
        wmb();
        trb->field[3] = cpu_to_le32(field4);

        trace_xhci_queue_trb(ring, trb);

        inc_enq(xhci, ring, more_trbs_coming);
}

giveback_first_trb

static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
                unsigned int ep_index, unsigned int stream_id, int start_cycle,
                struct xhci_generic_trb *start_trb)
{
        /*
         * Pass all the TRBs to the hardware at once and make sure this write
         * isn't reordered.
         */
        wmb();
        if (start_cycle)
                start_trb->field[3] |= cpu_to_le32(start_cycle);
        else
                start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}

 xhci_ring_ep_doorbell 触发doorbell 寄存器

void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
                unsigned int slot_id,
                unsigned int ep_index,
                unsigned int stream_id)
{
        __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
        unsigned int ep_state = ep->ep_state;

        /* Don't ring the doorbell for this endpoint if there are pending
         * cancellations because we don't want to interrupt processing.
         * We don't want to restart any stream rings if there's a set dequeue
         * pointer command pending because the device can choose to start any
         * stream once the endpoint is on the HW schedule.
         */
        if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
            (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
                return;

        trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));

        writel(DB_VALUE(ep_index, stream_id), db_addr);
        /* The CPU has better things to do at this point than wait for a
         * write-posting flush.  It'll get there soon enough.
         */
}

 

U盘接收数据后

hcd 接收中断 giveback urb 给设备驱动

data urb:

inc_deq														trace_xhci_inc_deq(ring);
== xhci_irq(struct usb_hcd *hcd)
	== xhci_handle_event(struct xhci_hcd *xhci)				trace_xhci_handle_event(xhci->event_ring, &event->generic);
		== handle_tx_event()								trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb)
			== process_bulk_intr_td()		//Process bulk and interrupt tds, update urb status and actual_length.
				== finish_td()
					== xhci_td_cleanup()
						== xhci_giveback_urb_in_irq()		trace_xhci_urb_giveback(urb);
							== usb_hcd_unlink_urb_from_ep(hcd, urb);
							== usb_hcd_giveback_urb(hcd, urb, status);

 usb_hcd_unlink_urb_from_ep

/**
 * usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
 * @hcd: host controller to which @urb was submitted
 * @urb: URB being unlinked
 *
 * Host controller drivers should call this routine before calling
 * usb_hcd_giveback_urb().  The HCD's private spinlock must be held and
 * interrupts must be disabled.  The actions carried out here are required
 * for URB completion.
 */
void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
{
        /* clear all state linking urb to this dev (and hcd) */
        spin_lock(&hcd_urb_list_lock);
        list_del_init(&urb->urb_list);
        spin_unlock(&hcd_urb_list_lock);
}
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);

/*
 * This is the common part of the URB message submission code
 *
 * All URBs from the usb-storage driver involved in handling a queued scsi
 * command _must_ pass through this function (or something like it) for the
 * abort mechanisms to work properly.
 */
static int usb_stor_msg_common(struct us_data *us, int timeout)
{
        struct completion urb_done;
        long timeleft;
        int status;

        /* don't submit URBs during abort processing */
        if (test_bit(US_FLIDX_ABORTING, &us->dflags))
                return -EIO;

        /* set up data structures for the wakeup system */
        init_completion(&urb_done);

        /* fill the common fields in the URB */
        us->current_urb->context = &urb_done;
        us->current_urb->transfer_flags = 0;

        /*
         * we assume that if transfer_buffer isn't us->iobuf then it
         * hasn't been mapped for DMA.  Yes, this is clunky, but it's
         * easier than always having the caller tell us whether the
         * transfer buffer has already been mapped.
         */
        if (us->current_urb->transfer_buffer == us->iobuf)
                us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
        us->current_urb->transfer_dma = us->iobuf_dma;

        /* submit the URB */
        status = usb_submit_urb(us->current_urb, GFP_NOIO);
        if (status) {
                /* something went wrong */
                return status;
        }

        /*
         * since the URB has been submitted successfully, it's now okay
         * to cancel it
         */
        set_bit(US_FLIDX_URB_ACTIVE, &us->dflags);

        /* did an abort occur during the submission? */
        if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {

                /* cancel the URB, if it hasn't been cancelled already */
                if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
                        usb_stor_dbg(us, "-- cancelling URB\n");
                        usb_unlink_urb(us->current_urb);
                }
        }

        /* wait for the completion of the URB */
       //等待usb_stor_blocking_completion执行完成唤醒当前线程
        timeleft = wait_for_completion_interruptible_timeout(
                        &urb_done, timeout ? : MAX_SCHEDULE_TIMEOUT);

        clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags);

        if (timeleft <= 0) {
                usb_stor_dbg(us, "%s -- cancelling URB\n",
                             timeleft == 0 ? "Timeout" : "Signal");
                usb_kill_urb(us->current_urb);
        }

        /* return the URB status */
        return us->current_urb->status;
}

usb_stor_bulk_transfer_buf

/*
 * Transfer one buffer via bulk pipe, without timeouts, but allowing early
 * termination.  Return codes are USB_STOR_XFER_xxx.  If the bulk pipe
 * stalls during the transfer, the halt is automatically cleared.
 */
int usb_stor_bulk_transfer_buf(struct us_data *us, unsigned int pipe,
        void *buf, unsigned int length, unsigned int *act_len)
{
        int result;

        usb_stor_dbg(us, "xfer %u bytes\n", length);

        /* fill and submit the URB */
        usb_fill_bulk_urb(us->current_urb, us->pusb_dev, pipe, buf, length,
                      usb_stor_blocking_completion, NULL);
        result = usb_stor_msg_common(us, 0);

        /* store the actual length of the data transferred */
        if (act_len)
                *act_len = us->current_urb->actual_length;
        return interpret_urb_result(us, pipe, length, result,
                        us->current_urb->actual_length);
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_buf);


int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
{
        struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
        struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
        unsigned int transfer_length = scsi_bufflen(srb);
        unsigned int residue;
        int result;
        int fake_sense = 0;
        unsigned int cswlen;
        unsigned int cbwlen = US_BULK_CB_WRAP_LEN;

        /* Take care of BULK32 devices; set extra byte to 0 */
        if (unlikely(us->fflags & US_FL_BULK32)) {
                cbwlen = 32;
                us->iobuf[31] = 0;
        }

        /* set up the command wrapper */
        bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
        bcb->DataTransferLength = cpu_to_le32(transfer_length);
        bcb->Flags = srb->sc_data_direction == DMA_FROM_DEVICE ?
                US_BULK_FLAG_IN : 0;
        bcb->Tag = ++us->tag;
        bcb->Lun = srb->device->lun;
        if (us->fflags & US_FL_SCM_MULT_TARG)
                bcb->Lun |= srb->device->id << 4;
        bcb->Length = srb->cmd_len;

        /* copy the command payload */
        memset(bcb->CDB, 0, sizeof(bcb->CDB));
        memcpy(bcb->CDB, srb->cmnd, bcb->Length);

        /* send it to out endpoint */
        usb_stor_dbg(us, "Bulk Command S 0x%x T 0x%x L %d F %d Trg %d LUN %d CL %d\n",
                     le32_to_cpu(bcb->Signature), bcb->Tag,
                     le32_to_cpu(bcb->DataTransferLength), bcb->Flags,
                     (bcb->Lun >> 4), (bcb->Lun & 0x0F),
                     bcb->Length);
         // 进行usb bulk 传输
        result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
                                bcb, cbwlen, NULL);
        usb_stor_dbg(us, "Bulk command transfer result=%d\n", result);
        if (result != USB_STOR_XFER_GOOD)
                return USB_STOR_TRANSPORT_ERROR;

        /* DATA STAGE */
        /* send/receive data payload, if there is any */

        /*
         * Some USB-IDE converter chips need a 100us delay between the
         * command phase and the data phase.  Some devices need a little
         * more than that, probably because of clock rate inaccuracies.
         */
        if (unlikely(us->fflags & US_FL_GO_SLOW))
                usleep_range(125, 150);

        if (transfer_length) {
                //判断dma的方向
                unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
                                us->recv_bulk_pipe : us->send_bulk_pipe;
                //进行真实的数据传输sglist 构建
                result = usb_stor_bulk_srb(us, pipe, srb);
                usb_stor_dbg(us, "Bulk data transfer result 0x%x\n", result);
                if (result == USB_STOR_XFER_ERROR)
                        return USB_STOR_TRANSPORT_ERROR;

                /*
                 * If the device tried to send back more data than the
                 * amount requested, the spec requires us to transfer
                 * the CSW anyway.  Since there's no point retrying the
                 * the command, we'll return fake sense data indicating
                 * Illegal Request, Invalid Field in CDB.
                 */
                if (result == USB_STOR_XFER_LONG)
                        fake_sense = 1;

                /*
                 * Sometimes a device will mistakenly skip the data phase
                 * and go directly to the status phase without sending a
                 * zero-length packet.  If we get a 13-byte response here,
                 * check whether it really is a CSW.
                 */
                if (result == USB_STOR_XFER_SHORT &&
                                srb->sc_data_direction == DMA_FROM_DEVICE &&
                                transfer_length - scsi_get_resid(srb) ==
                                        US_BULK_CS_WRAP_LEN) {
                        struct scatterlist *sg = NULL;
                        unsigned int offset = 0;

                        if (usb_stor_access_xfer_buf((unsigned char *) bcs,
                                        US_BULK_CS_WRAP_LEN, srb, &sg,
                                        &offset, FROM_XFER_BUF) ==
                                                US_BULK_CS_WRAP_LEN &&
                                        bcs->Signature ==
                                                cpu_to_le32(US_BULK_CS_SIGN)) {
                                usb_stor_dbg(us, "Device skipped data phase\n");
                                scsi_set_resid(srb, transfer_length);
                                goto skipped_data_phase;
                        }
                }
        }

        /*
         * See flow chart on pg 15 of the Bulk Only Transport spec for
         * an explanation of how this code works.
         */

        /* get CSW for device status */
        usb_stor_dbg(us, "Attempting to get CSW...\n");
        result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
                                bcs, US_BULK_CS_WRAP_LEN, &cswlen);

        /*
         * Some broken devices add unnecessary zero-length packets to the
         * end of their data transfers.  Such packets show up as 0-length
         * CSWs.  If we encounter such a thing, try to read the CSW again.
         */
        if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
                usb_stor_dbg(us, "Received 0-length CSW; retrying...\n");
                result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
                                bcs, US_BULK_CS_WRAP_LEN, &cswlen);
        }

        /* did the attempt to read the CSW fail? */
        if (result == USB_STOR_XFER_STALLED) {

                /* get the status again */
                usb_stor_dbg(us, "Attempting to get CSW (2nd try)...\n");
                result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
                                bcs, US_BULK_CS_WRAP_LEN, NULL);
        }

        /* if we still have a failure at this point, we're in trouble */
        usb_stor_dbg(us, "Bulk status result = %d\n", result);
        if (result != USB_STOR_XFER_GOOD)
                return USB_STOR_TRANSPORT_ERROR;

 skipped_data_phase:
        /* check bulk status */
        residue = le32_to_cpu(bcs->Residue);
        usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
                     le32_to_cpu(bcs->Signature), bcs->Tag,
                     residue, bcs->Status);
        if (!(bcs->Tag == us->tag || (us->fflags & US_FL_BULK_IGNORE_TAG)) ||
                bcs->Status > US_BULK_STAT_PHASE) {
                usb_stor_dbg(us, "Bulk logical error\n");
                return USB_STOR_TRANSPORT_ERROR;
        }

        /*
         * Some broken devices report odd signatures, so we do not check them
         * for validity against the spec. We store the first one we see,
         * and check subsequent transfers for validity against this signature.
         */
        if (!us->bcs_signature) {
                us->bcs_signature = bcs->Signature;
                if (us->bcs_signature != cpu_to_le32(US_BULK_CS_SIGN))
                        usb_stor_dbg(us, "Learnt BCS signature 0x%08X\n",
                                     le32_to_cpu(us->bcs_signature));
        } else if (bcs->Signature != us->bcs_signature) {
                usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n",
                             le32_to_cpu(bcs->Signature),
                             le32_to_cpu(us->bcs_signature));
                return USB_STOR_TRANSPORT_ERROR;
        }

        /*
         * try to compute the actual residue, based on how much data
         * was really transferred and what the device tells us
         */
        if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {

                /*
                 * Heuristically detect devices that generate bogus residues
                 * by seeing what happens with INQUIRY and READ CAPACITY
                 * commands.
                 */
                if (bcs->Status == US_BULK_STAT_OK &&
                                scsi_get_resid(srb) == 0 &&
                                        ((srb->cmnd[0] == INQUIRY &&
                                                transfer_length == 36) ||
                                        (srb->cmnd[0] == READ_CAPACITY &&
                                                transfer_length == 8))) {
                        us->fflags |= US_FL_IGNORE_RESIDUE;

                } else {
                        residue = min(residue, transfer_length);
                        scsi_set_resid(srb, max(scsi_get_resid(srb),
                                                               (int) residue));
                }
        }

        /* based on the status code, we report good or bad */
        switch (bcs->Status) {
                case US_BULK_STAT_OK:
                        /* device babbled -- return fake sense data */
                        if (fake_sense) {
                                memcpy(srb->sense_buffer,
                                       usb_stor_sense_invalidCDB,
                                       sizeof(usb_stor_sense_invalidCDB));
                                return USB_STOR_TRANSPORT_NO_SENSE;
                        }

                        /* command good -- note that data could be short */
                        return USB_STOR_TRANSPORT_GOOD;

                case US_BULK_STAT_FAIL:
                        /* command failed */
                        return USB_STOR_TRANSPORT_FAILED;

                case US_BULK_STAT_PHASE:
                        /*
                         * phase error -- note that a transport reset will be
                         * invoked by the invoke_transport() function
                         */
                        return USB_STOR_TRANSPORT_ERROR;
        }

        /* we should never get here, but if we do, we're in trouble */
        return USB_STOR_TRANSPORT_ERROR;
}
EXPORT_SYMBOL_GPL(usb_stor_Bulk_transport);


usb_stor_bulk_srb 进行真实的数据传输sglist 

/*
 * Common used function. Transfer a complete command
 * via usb_stor_bulk_transfer_sglist() above. Set cmnd resid
 */
int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
                      struct scsi_cmnd* srb)
{
        unsigned int partial;
       //初始化srb sg list
        int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
                                      scsi_sg_count(srb), scsi_bufflen(srb),
                                      &partial);

        scsi_set_resid(srb, scsi_bufflen(srb) - partial);
        return result;
}
EXPORT_SYMBOL_GPL(usb_stor_bulk_srb);

/*
 * Transfer a scatter-gather list via bulk transfer
 *
 * This function does basically the same thing as usb_stor_bulk_transfer_buf()
 * above, but it uses the usbcore scatter-gather library.
 */
static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe,
                struct scatterlist *sg, int num_sg, unsigned int length,
                unsigned int *act_len)
{
        int result;

        /* don't submit s-g requests during abort processing */
        if (test_bit(US_FLIDX_ABORTING, &us->dflags))
                return USB_STOR_XFER_ERROR;

        /* initialize the scatter-gather request block */
        usb_stor_dbg(us, "xfer %u bytes, %d entries\n", length, num_sg);
       //初始化sglist
        result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0,
                        sg, num_sg, length, GFP_NOIO);
        if (result) {
                usb_stor_dbg(us, "usb_sg_init returned %d\n", result);
                return USB_STOR_XFER_ERROR;
        }

        /*
         * since the block has been initialized successfully, it's now
         * okay to cancel it
         */
        set_bit(US_FLIDX_SG_ACTIVE, &us->dflags);

        /* did an abort occur during the submission? */
        if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {

                /* cancel the request, if it hasn't been cancelled already */
                if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags)) {
                        usb_stor_dbg(us, "-- cancelling sg request\n");
                        usb_sg_cancel(&us->current_sg);
                }
        }

        /* wait for the completion of the transfer */
        //等待传输完成
        usb_sg_wait(&us->current_sg);
        clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags);

        result = us->current_sg.status;
        if (act_len)
                *act_len = us->current_sg.bytes;
        return interpret_urb_result(us, pipe, length, result,
                        us->current_sg.bytes);
}

void usb_sg_wait(struct usb_sg_request *io)
{
        int i;
        int entries = io->entries;

        /* queue the urbs.  */
        spin_lock_irq(&io->lock);
        i = 0;
        while (i < entries && !io->status) {
                int retval;

                io->urbs[i]->dev = io->dev;
                spin_unlock_irq(&io->lock);
                //提交真正的io请求
                retval = usb_submit_urb(io->urbs[i], GFP_NOIO);

                switch (retval) {
                        /* maybe we retrying will recover */
                case -ENXIO:    /* hc didn't queue this one */
                case -EAGAIN:
                case -ENOMEM:
                        retval = 0;
                        yield();
                        break;

                        /* no error? continue immediately.
                         *
                         * NOTE: to work better with UHCI (4K I/O buffer may
                         * need 3K of TDs) it may be good to limit how many
                         * URBs are queued at once; N milliseconds?
                         */
                case 0:
                        ++i;
                        cpu_relax();
                        break;

                        /* fail any uncompleted urbs */
                default:
                        io->urbs[i]->status = retval;
                        dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
                                __func__, retval);
                        usb_sg_cancel(io);
                }
                spin_lock_irq(&io->lock);
                if (retval && (io->status == 0 || io->status == -ECONNRESET))
                        io->status = retval;
        }
        io->count -= entries - i;
        if (io->count == 0)
          //如果IO请求完成释放 io->complete 信号
                complete(&io->complete);
        spin_unlock_irq(&io->lock);

        /* OK, yes, this could be packaged as non-blocking.
         * So could the submit loop above ... but it's easier to
         * solve neither problem than to solve both!
         */
        wait_for_completion(&io->complete);

        sg_clean(io);
}
EXPORT_SYMBOL_GPL(usb_sg_wait);

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值