aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/usb/host/xhci-dbg.c81
-rw-r--r--drivers/usb/host/xhci-hcd.c5
-rw-r--r--drivers/usb/host/xhci-mem.c305
-rw-r--r--drivers/usb/host/xhci.h296
4 files changed, 686 insertions, 1 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index a7798b46049..5724683cef1 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -56,6 +56,8 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
xhci_dbg(xhci, "// @%x = 0x%x DBOFF\n",
(unsigned int) &xhci->cap_regs->db_off, temp);
+ xhci_dbg(xhci, "// Doorbell array at 0x%x:\n",
+ (unsigned int) xhci->dba);
}
void xhci_print_cap_regs(struct xhci_hcd *xhci)
@@ -227,3 +229,82 @@ void xhci_print_registers(struct xhci_hcd *xhci)
xhci_print_cap_regs(xhci);
xhci_print_op_regs(xhci);
}
+
+
+/**
+ * Debug a segment with an xHCI ring.
+ *
+ * @return The Link TRB of the segment, or NULL if there is no Link TRB
+ * (which is a bug, since all segments must have a Link TRB).
+ *
+ * Prints out all TRBs in the segment, even those after the Link TRB.
+ *
+ * XXX: should we print out TRBs that the HC owns? As long as we don't
+ * write, that should be fine... We shouldn't expect that the memory pointed to
+ * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
+ * for HC debugging.
+ */
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ int i;
+ u32 addr = (u32) seg->dma;
+ union xhci_trb *trb = seg->trbs;
+
+ for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
+ trb = &seg->trbs[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
+ (unsigned int) trb->link.segment_ptr[0],
+ (unsigned int) trb->link.segment_ptr[1],
+ (unsigned int) trb->link.intr_target,
+ (unsigned int) trb->link.control);
+ addr += sizeof(*trb);
+ }
+}
+
+/**
+ * Debugging for an xHCI ring, which is a queue broken into multiple segments.
+ *
+ * Print out each segment in the ring. Check that the DMA address in
+ * each link segment actually matches the segment's stored DMA address.
+ * Check that the link end bit is only set at the end of the ring.
+ * Check that the dequeue and enqueue pointers point to real data in this ring
+ * (not some other ring).
+ */
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ /* FIXME: Throw an error if any segment doesn't have a Link TRB */
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg = ring->first_seg;
+ xhci_debug_segment(xhci, first_seg);
+
+ for (seg = first_seg->next; seg != first_seg; seg = seg->next)
+ xhci_debug_segment(xhci, seg);
+}
+
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+ u32 addr = (u32) erst->erst_dma_addr;
+ int i;
+ struct xhci_erst_entry *entry;
+
+ for (i = 0; i < erst->num_entries; ++i) {
+ entry = &erst->entries[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
+ (unsigned int) addr,
+ (unsigned int) entry->seg_addr[0],
+ (unsigned int) entry->seg_addr[1],
+ (unsigned int) entry->seg_size,
+ (unsigned int) entry->rsvd);
+ addr += sizeof(*entry);
+ }
+}
+
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
+{
+ u32 val;
+
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+ xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
+ xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
+}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 64fcc22e9d5..011f4781066 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -266,6 +266,11 @@ int xhci_run(struct usb_hcd *hcd)
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_dbg(xhci, "Command ring memory map follows:\n");
+ xhci_debug_ring(xhci, xhci->cmd_ring);
+ xhci_dbg(xhci, "ERST memory map follows:\n");
+ xhci_dbg_erst(xhci, &xhci->erst);
+
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_RUN);
xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0e383f9c380..7cf15ca854b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -21,18 +21,215 @@
*/
#include <linux/usb.h>
+#include <linux/pci.h>
#include "xhci.h"
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * Section 4.11.1.1:
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_segment *seg;
+ dma_addr_t dma;
+
+ seg = kzalloc(sizeof *seg, flags);
+ if (!seg)
+ return 0;
+ xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n",
+ (unsigned int) seg);
+
+ seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
+ if (!seg->trbs) {
+ kfree(seg);
+ return 0;
+ }
+ xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n",
+ (unsigned int) seg->trbs, (u32) dma);
+
+ memset(seg->trbs, 0, SEGMENT_SIZE);
+ seg->dma = dma;
+ seg->next = NULL;
+
+ return seg;
+}
+
+static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ if (!seg)
+ return;
+ if (seg->trbs) {
+ xhci_dbg(xhci, "Freeing DMA segment at 0x%x"
+ " (virtual) 0x%x (DMA)\n",
+ (unsigned int) seg->trbs, (u32) seg->dma);
+ dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
+ seg->trbs = NULL;
+ }
+ xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n",
+ (unsigned int) seg);
+ kfree(seg);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment. The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+ struct xhci_segment *next, bool link_trbs)
+{
+ u32 val;
+
+ if (!prev || !next)
+ return;
+ prev->next = next;
+ if (link_trbs) {
+ prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
+
+ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+ val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
+ }
+ xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n",
+ prev->dma, next->dma);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg;
+
+ if (!ring || !ring->first_seg)
+ return;
+ first_seg = ring->first_seg;
+ seg = first_seg->next;
+ xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring);
+ while (seg != first_seg) {
+ struct xhci_segment *next = seg->next;
+ xhci_segment_free(xhci, seg);
+ seg = next;
+ }
+ xhci_segment_free(xhci, first_seg);
+ ring->first_seg = NULL;
+ kfree(ring);
+}
+
+/**
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ unsigned int num_segs, bool link_trbs, gfp_t flags)
+{
+ struct xhci_ring *ring;
+ struct xhci_segment *prev;
+
+ ring = kzalloc(sizeof *(ring), flags);
+ xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring);
+ if (!ring)
+ return 0;
+
+ if (num_segs == 0)
+ return ring;
+
+ ring->first_seg = xhci_segment_alloc(xhci, flags);
+ if (!ring->first_seg)
+ goto fail;
+ num_segs--;
+
+ prev = ring->first_seg;
+ while (num_segs > 0) {
+ struct xhci_segment *next;
+
+ next = xhci_segment_alloc(xhci, flags);
+ if (!next)
+ goto fail;
+ xhci_link_segments(xhci, prev, next, link_trbs);
+
+ prev = next;
+ num_segs--;
+ }
+ xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+
+ if (link_trbs) {
+ /* See section 4.9.2.1 and 6.4.4.1 */
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
+ xhci_dbg(xhci, "Wrote link toggle flag to"
+ " segment 0x%x (virtual), 0x%x (DMA)\n",
+ (unsigned int) prev, (u32) prev->dma);
+ }
+ /* The ring is empty, so the enqueue pointer == dequeue pointer */
+ ring->enqueue = ring->first_seg->trbs;
+ ring->dequeue = ring->enqueue;
+ /* The ring is initialized to 0. The producer must write 1 to the cycle
+ * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+ * compare CCS to the cycle bit to check ownership, so CCS = 1.
+ */
+ ring->cycle_state = 1;
+
+ return ring;
+
+fail:
+ xhci_ring_free(xhci, ring);
+ return 0;
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ int size;
+
+ /* XXX: Free all the segments in the various rings */
+
+ /* Free the Event Ring Segment Table and the actual Event Ring */
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
+ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+ if (xhci->erst.entries)
+ pci_free_consistent(pdev, size,
+ xhci->erst.entries, xhci->erst.erst_dma_addr);
+ xhci->erst.entries = NULL;
+ xhci_dbg(xhci, "Freed ERST\n");
+ if (xhci->event_ring)
+ xhci_ring_free(xhci, xhci->event_ring);
+ xhci->event_ring = NULL;
+ xhci_dbg(xhci, "Freed event ring\n");
+
+ xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
+ xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
+ if (xhci->cmd_ring)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+ xhci_dbg(xhci, "Freed command ring\n");
+ if (xhci->segment_pool)
+ dma_pool_destroy(xhci->segment_pool);
+ xhci->segment_pool = NULL;
+ xhci_dbg(xhci, "Freed segment pool\n");
xhci->page_size = 0;
xhci->page_shift = 0;
}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
+ dma_addr_t dma;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
unsigned int val, val2;
+ struct xhci_segment *seg;
u32 page_size;
int i;
@@ -65,7 +262,113 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(unsigned int) val);
xhci_writel(xhci, val, &xhci->op_regs->config_reg);
- xhci->ir_set = &xhci->run_regs->ir_set[0];
+ /*
+ * Initialize the ring segment pool. The ring must be a contiguous
+ * structure comprised of TRBs. The TRBs must be 16 byte aligned,
+ * however, the command ring segment needs 64-byte aligned segments,
+ * so we pick the greater alignment need.
+ */
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ SEGMENT_SIZE, 64, xhci->page_size);
+ if (!xhci->segment_pool)
+ goto fail;
+
+ /* Set up the command ring to have one segments for now. */
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
+ xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma);
+
+ /* Set the address in the Command Ring Control register */
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+ val = (val & ~CMD_RING_ADDR_MASK) |
+ (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
+ xhci->cmd_ring->cycle_state;
+ xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
+ xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
+ xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
+ xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
+ xhci_dbg_cmd_ptrs(xhci);
+
+ val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ val &= DBOFF_MASK;
+ xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr\n", val);
+ xhci->dba = (void *) xhci->cap_regs + val;
+ xhci_dbg_regs(xhci);
+ xhci_print_run_regs(xhci);
+ /* Set ir_set to interrupt register set 0 */
+ xhci->ir_set = (void *) xhci->run_regs->ir_set;
+
+ /*
+ * Event ring setup: Allocate a normal ring, but also setup
+ * the event ring segment table (ERST). Section 4.9.3.
+ */
+ xhci_dbg(xhci, "// Allocating event ring\n");
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+ if (!xhci->event_ring)
+ goto fail;
+
+ xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
+ sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+ if (!xhci->erst.entries)
+ goto fail;
+ xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma);
+
+ memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+ xhci->erst.num_entries = ERST_NUM_SEGS;
+ xhci->erst.erst_dma_addr = dma;
+ xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n",
+ xhci->erst.num_entries,
+ (unsigned int) xhci->erst.entries,
+ xhci->erst.erst_dma_addr);
+
+ /* set ring base address and size for each segment table entry */
+ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
+ struct xhci_erst_entry *entry = &xhci->erst.entries[val];
+ entry->seg_addr[1] = 0;
+ entry->seg_addr[0] = seg->dma;
+ entry->seg_size = TRBS_PER_SEGMENT;
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ /* set ERST count with the number of entries in the segment table */
+ val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ val &= ERST_SIZE_MASK;
+ val |= ERST_NUM_SEGS;
+ xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ val);
+ xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+ xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ /* set the segment table base address */
+ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n",
+ xhci->erst.erst_dma_addr);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+ val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
+ val &= ERST_PTR_MASK;
+ val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
+ xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
+
+ /* Set the event ring dequeue address */
+ xhci_dbg(xhci, "// Set ERST dequeue address for ir_set 0 = 0x%x%x\n",
+ xhci->erst.entries[0].seg_addr[1], xhci->erst.entries[0].seg_addr[0]);
+ val = xhci_readl(xhci, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
+ val &= ERST_PTR_MASK;
+ val |= (xhci->erst.entries[0].seg_addr[0] & ~ERST_PTR_MASK);
+ xhci_writel(xhci, val, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
+ xhci_writel(xhci, xhci->erst.entries[0].seg_addr[1],
+ &xhci->run_regs->ir_set[0].erst_dequeue[1]);
+ xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ /*
+ * XXX: Might need to set the Interrupter Moderation Register to
+ * something other than the default (~1ms minimum between interrupts).
+ * See section 5.5.1.2.
+ */
return 0;
fail:
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 59fae2e5ea5..ed331310f1a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -241,6 +241,18 @@ struct xhci_op_regs {
*/
#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
+/* bit 0 is the command ring cycle state */
+/* stop ring operation after completion of the currently executing command */
+#define CMD_RING_PAUSE (1 << 1)
+/* stop ring immediately - abort the currently executing command */
+#define CMD_RING_ABORT (1 << 2)
+/* true: command ring is running */
+#define CMD_RING_RUNNING (1 << 3)
+/* bits 4:5 reserved and should be preserved */
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_ADDR_MASK (0xffffffc0)
+
/* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
#define MAX_DEVS(p) ((p) & 0xff)
@@ -391,6 +403,7 @@ struct intr_reg {
* a work queue (or delayed service routine)?
*/
#define ERST_EHB (1 << 3)
+#define ERST_PTR_MASK (0xf)
/**
* struct xhci_run_regs
@@ -407,6 +420,275 @@ struct xhci_run_regs {
struct intr_reg ir_set[128];
} __attribute__ ((packed));
+/**
+ * struct doorbell_array
+ *
+ * Section 5.6
+ */
+struct xhci_doorbell_array {
+ u32 doorbell[256];
+} __attribute__ ((packed));
+
+#define DB_TARGET_MASK 0xFFFFFF00
+#define DB_STREAM_ID_MASK 0x0000FFFF
+#define DB_TARGET_HOST 0x0
+#define DB_STREAM_ID_HOST 0x0
+#define DB_MASK (0xff << 8)
+
+
+struct xhci_transfer_event {
+ /* 64-bit buffer address, or immediate data */
+ u32 buffer[2];
+ u32 transfer_len;
+ /* This field is interpreted differently based on the type of TRB */
+ u32 flags;
+} __attribute__ ((packed));
+
+/* Completion Code - only applicable for some types of TRBs */
+#define COMP_CODE_MASK (0xff << 24)
+#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_SUCCESS 1
+/* Data Buffer Error */
+#define COMP_DB_ERR 2
+/* Babble Detected Error */
+#define COMP_BABBLE 3
+/* USB Transaction Error */
+#define COMP_TX_ERR 4
+/* TRB Error - some TRB field is invalid */
+#define COMP_TRB_ERR 5
+/* Stall Error - USB device is stalled */
+#define COMP_STALL 6
+/* Resource Error - HC doesn't have memory for that device configuration */
+#define COMP_ENOMEM 7
+/* Bandwidth Error - not enough room in schedule for this dev config */
+#define COMP_BW_ERR 8
+/* No Slots Available Error - HC ran out of device slots */
+#define COMP_ENOSLOTS 9
+/* Invalid Stream Type Error */
+#define COMP_STREAM_ERR 10
+/* Slot Not Enabled Error - doorbell rung for disabled device slot */
+#define COMP_EBADSLT 11
+/* Endpoint Not Enabled Error */
+#define COMP_EBADEP 12
+/* Short Packet */
+#define COMP_SHORT_TX 13
+/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
+#define COMP_UNDERRUN 14
+/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
+#define COMP_OVERRUN 15
+/* Virtual Function Event Ring Full Error */
+#define COMP_VF_FULL 16
+/* Parameter Error - Context parameter is invalid */
+#define COMP_EINVAL 17
+/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
+#define COMP_BW_OVER 18
+/* Context State Error - illegal context state transition requested */
+#define COMP_CTX_STATE 19
+/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
+#define COMP_PING_ERR 20
+/* Event Ring is full */
+#define COMP_ER_FULL 21
+/* Missed Service Error - HC couldn't service an isoc ep within interval */
+#define COMP_MISSED_INT 23
+/* Successfully stopped command ring */
+#define COMP_CMD_STOP 24
+/* Successfully aborted current command and stopped command ring */
+#define COMP_CMD_ABORT 25
+/* Stopped - transfer was terminated by a stop endpoint command */
+#define COMP_STOP 26
+/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
+#define COMP_STOP_INVAL 27
+/* Control Abort Error - Debug Capability - control pipe aborted */
+#define COMP_DBG_ABORT 28
+/* TRB type 29 and 30 reserved */
+/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
+#define COMP_BUFF_OVER 31
+/* Event Lost Error - xHC has an "internal event overrun condition" */
+#define COMP_ISSUES 32
+/* Undefined Error - reported when other error codes don't apply */
+#define COMP_UNKNOWN 33
+/* Invalid Stream ID Error */
+#define COMP_STRID_ERR 34
+/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+/* FIXME - check for this */
+#define COMP_2ND_BW_ERR 35
+/* Split Transaction Error */
+#define COMP_SPLIT_ERR 36
+
+struct xhci_link_trb {
+ /* 64-bit segment pointer*/
+ u32 segment_ptr[2];
+ u32 intr_target;
+ u32 control;
+} __attribute__ ((packed));
+
+/* control bitfields */
+#define LINK_TOGGLE (0x1<<1)
+
+
+union xhci_trb {
+ struct xhci_link_trb link;
+ struct xhci_transfer_event trans_event;
+};
+
+/* Normal TRB fields */
+/* transfer_len bitmasks - bits 0:16 */
+#define TRB_LEN(p) ((p) & 0x1ffff)
+/* TD size - number of bytes remaining in the TD (including this TRB):
+ * bits 17 - 21. Shift the number of bytes by 10. */
+#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at */
+#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
+#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
+
+/* Cycle bit - indicates TRB ownership by HC or HCD */
+#define TRB_CYCLE (1<<0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT (1<<1)
+/* Interrupt on short packet */
+#define TRB_ISP (1<<2)
+/* Set PCIe no snoop attribute */
+#define TRB_NO_SNOOP (1<<3)
+/* Chain multiple TRBs into a TD */
+#define TRB_CHAIN (1<<4)
+/* Interrupt on completion */
+#define TRB_IOC (1<<5)
+/* The buffer pointer contains immediate data */
+#define TRB_IDT (1<<6)
+
+
+/* Control transfer TRB specific fields */
+#define TRB_DIR_IN (1<<16)
+
+/* TRB bit mask */
+#define TRB_TYPE_BITMASK (0xfc00)
+#define TRB_TYPE(p) ((p) << 10)
+/* TRB type IDs */
+/* bulk, interrupt, isoc scatter/gather, and control data stage */
+#define TRB_NORMAL 1
+/* setup stage for control transfers */
+#define TRB_SETUP 2
+/* data stage for control transfers */
+#define TRB_DATA 3
+/* status stage for control transfers */
+#define TRB_STATUS 4
+/* isoc transfers */
+#define TRB_ISOC 5
+/* TRB for linking ring segments */
+#define TRB_LINK 6
+#define TRB_EVENT_DATA 7
+/* Transfer Ring No-op (not for the command ring) */
+#define TRB_TR_NOOP 8
+/* Command TRBs */
+/* Enable Slot Command */
+#define TRB_ENABLE_SLOT 9
+/* Disable Slot Command */
+#define TRB_DISABLE_SLOT 10
+/* Address Device Command */
+#define TRB_ADDR_DEV 11
+/* Configure Endpoint Command */
+#define TRB_CONFIG_EP 12
+/* Evaluate Context Command */
+#define TRB_EVAL_CONTEXT 13
+/* Reset Transfer Ring Command */
+#define TRB_RESET_RING 14
+/* Stop Transfer Ring Command */
+#define TRB_STOP_RING 15
+/* Set Transfer Ring Dequeue Pointer Command */
+#define TRB_SET_DEQ 16
+/* Reset Device Command */
+#define TRB_RESET_DEV 17
+/* Force Event Command (opt) */
+#define TRB_FORCE_EVENT 18
+/* Negotiate Bandwidth Command (opt) */
+#define TRB_NEG_BANDWIDTH 19
+/* Set Latency Tolerance Value Command (opt) */
+#define TRB_SET_LT 20
+/* Get port bandwidth Command */
+#define TRB_GET_BW 21
+/* Force Header Command - generate a transaction or link management packet */
+#define TRB_FORCE_HEADER 22
+/* No-op Command - not for transfer rings */
+#define TRB_CMD_NOOP 23
+/* TRB IDs 24-31 reserved */
+/* Event TRBS */
+/* Transfer Event */
+#define TRB_TRANSFER 32
+/* Command Completion Event */
+#define TRB_COMPLETION 33
+/* Port Status Change Event */
+#define TRB_PORT_STATUS 34
+/* Bandwidth Request Event (opt) */
+#define TRB_BANDWIDTH_EVENT 35
+/* Doorbell Event (opt) */
+#define TRB_DOORBELL 36
+/* Host Controller Event */
+#define TRB_HC_EVENT 37
+/* Device Notification Event - device sent function wake notification */
+#define TRB_DEV_NOTE 38
+/* MFINDEX Wrap Event - microframe counter wrapped */
+#define TRB_MFINDEX_WRAP 39
+/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4,
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT 64
+#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+
+struct xhci_segment {
+ union xhci_trb *trbs;
+ /* private to HCD */
+ struct xhci_segment *next;
+ dma_addr_t dma;
+} __attribute__ ((packed));
+
+struct xhci_ring {
+ struct xhci_segment *first_seg;
+ union xhci_trb *enqueue;
+ union xhci_trb *dequeue;
+ /*
+ * Write the cycle state into the TRB cycle field to give ownership of
+ * the TRB to the host controller (if we are the producer), or to check
+ * if we own the TRB (if we are the consumer). See section 4.9.1.
+ */
+ u32 cycle_state;
+};
+
+struct xhci_erst_entry {
+ /* 64-bit event ring segment address */
+ u32 seg_addr[2];
+ u32 seg_size;
+ /* Set to zero */
+ u32 rsvd;
+} __attribute__ ((packed));
+
+struct xhci_erst {
+ struct xhci_erst_entry *entries;
+ unsigned int num_entries;
+ /* xhci->event_ring keeps track of segment dma addresses */
+ dma_addr_t erst_dma_addr;
+ /* Num entries the ERST can contain */
+ unsigned int erst_size;
+};
+
+/*
+ * Each segment table entry is 4*32bits long. 1K seems like an ok size:
+ * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
+ * meaning 64 ring segments.
+ * Initial allocated size of the ERST, in number of entries */
+#define ERST_NUM_SEGS 1
+/* Initial allocated size of the ERST, in number of entries */
+#define ERST_SIZE 64
+/* Initial number of event segment rings allocated */
+#define ERST_ENTRIES 1
+/* XXX: Make these module parameters */
+
/* There is one ehci_hci structure per controller */
struct xhci_hcd {
@@ -414,6 +696,7 @@ struct xhci_hcd {
struct xhci_cap_regs __iomem *cap_regs;
struct xhci_op_regs __iomem *op_regs;
struct xhci_run_regs __iomem *run_regs;
+ struct xhci_doorbell_array __iomem *dba;
/* Our HCD's current interrupter register set */
struct intr_reg __iomem *ir_set;
@@ -441,6 +724,14 @@ struct xhci_hcd {
/* only one MSI vector for now, but might need more later */
int msix_count;
struct msix_entry *msix_entries;
+ /* data structures */
+ struct xhci_ring *cmd_ring;
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+
+ /* DMA pools */
+ struct dma_pool *device_pool;
+ struct dma_pool *segment_pool;
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -488,6 +779,11 @@ static inline void xhci_writel(const struct xhci_hcd *xhci,
/* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, struct intr_reg *ir_set, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
+void xhci_dbg_regs(struct xhci_hcd *xhci);
+void xhci_print_run_regs(struct xhci_hcd *xhci);
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
/* xHCI memory managment */
void xhci_mem_cleanup(struct xhci_hcd *xhci);