diff options
Diffstat (limited to 'src/gallium/winsys/dri/intel/intel_batchbuffer.c')
-rw-r--r-- | src/gallium/winsys/dri/intel/intel_batchbuffer.c | 488 |
1 files changed, 294 insertions, 194 deletions
diff --git a/src/gallium/winsys/dri/intel/intel_batchbuffer.c b/src/gallium/winsys/dri/intel/intel_batchbuffer.c index 5830b88b37..05223bf700 100644 --- a/src/gallium/winsys/dri/intel/intel_batchbuffer.c +++ b/src/gallium/winsys/dri/intel/intel_batchbuffer.c @@ -25,108 +25,95 @@ * **************************************************************************/ -#include <errno.h> -#include <stdio.h> #include "intel_batchbuffer.h" -#include "intel_context.h" -#include "intel_screen.h" -#include "intel_reg.h" -#include "drm.h" - -/* Relocations in kernel space: - * - pass dma buffer seperately - * - memory manager knows how to patch - * - pass list of dependent buffers - * - pass relocation list - * - * Either: - * - get back an offset for buffer to fire - * - memory manager knows how to fire buffer - * - * Really want the buffer to be AGP and pinned. - * - */ - -/* Cliprect fence: The highest fence protecting a dma buffer - * containing explicit cliprect information. Like the old drawable - * lock but irq-driven. X server must wait for this fence to expire - * before changing cliprects [and then doing sw rendering?]. For - * other dma buffers, the scheduler will grab current cliprect info - * and mix into buffer. X server must hold the lock while changing - * cliprects??? Make per-drawable. Need cliprects in shared memory - * -- beats storing them with every cmd buffer in the queue. - * - * ==> X server must wait for this fence to expire before touching the - * framebuffer with new cliprects. - * - * ==> Cliprect-dependent buffers associated with a - * cliprect-timestamp. All of the buffers associated with a timestamp - * must go to hardware before any buffer with a newer timestamp. - * - * ==> Dma should be queued per-drawable for correct X/GL - * synchronization. Or can fences be used for this? - * - * Applies to: Blit operations, metaops, X server operations -- X - * server automatically waits on its own dma to complete before - * modifying cliprects ??? - */ +#include "intel_ioctl.h" +#include <errno.h> +#if 0 static void -intel_dump_batchbuffer(uint offset, uint * ptr, uint count) +intel_dump_batchbuffer(GLuint offset, GLuint * ptr, GLuint count) { int i; - printf("\n\n\nSTART BATCH (%d dwords):\n", count / 4); - for (i = 0; i < count / 4; i += 1) - printf("\t0x%08x\n", ptr[i]); - printf("END BATCH\n\n\n"); + fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count / 4); + for (i = 0; i < count / 4; i += 4) + fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n", + offset + i * 4, ptr[i], ptr[i + 1], ptr[i + 2], ptr[i + 3]); + fprintf(stderr, "END BATCH\n\n\n"); +} +#endif + +static void +intel_realloc_relocs(struct intel_batchbuffer *batch, int num_relocs) +{ + unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER; + + size *= sizeof(uint32_t); + batch->reloc = realloc(batch->reloc, size); + batch->reloc_size = num_relocs; } void intel_batchbuffer_reset(struct intel_batchbuffer *batch) { - int i; - - if (batch->map) { - driBOUnmap(batch->buffer); - batch->map = NULL; - } - /* * Get a new, free batchbuffer. */ - batch->size = BATCH_SZ; - driBOData(batch->buffer, batch->size, NULL, 0); + drmBO *bo; + struct drm_bo_info_req *req; + + driBOUnrefUserList(batch->list); + driBOResetList(batch->list); - driBOResetList(&batch->list); + batch->size = 4096; // ZZZ JB batch->intel->intelScreen->maxBatchSize; + driBOData(batch->buffer, batch->size, NULL, NULL, 0); /* - * Unreference buffers previously on the relocation list. + * Add the batchbuffer to the validate list. */ - for (i = 0; i < batch->nr_relocs; i++) { - struct buffer_reloc *r = &batch->reloc[i]; - driBOUnReference(r->buf); - } - batch->list_count = 0; - batch->nr_relocs = 0; - batch->flags = 0; + driBOAddListItem(batch->list, batch->buffer, + DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT, + DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM, + &batch->dest_location, &batch->node); + + req = &batch->node->bo_arg.d.req.bo_req; /* - * We don't refcount the batchbuffer itself since we can't destroy it - * while it's on the list. + * Set up information needed for us to make relocations + * relative to the underlying drm buffer objects. */ - driBOAddListItem(&batch->list, batch->buffer, - DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, - DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE); + driReadLockKernelBO(); + bo = driBOKernel(batch->buffer); + req->presumed_offset = (uint64_t) bo->offset; + req->hint = DRM_BO_HINT_PRESUMED_OFFSET; + batch->drmBOVirtual = (uint8_t *) bo->virtual; + driReadUnlockKernelBO(); + /* + * Adjust the relocation buffer size. + */ + + if (batch->reloc_size > INTEL_MAX_RELOCS || + batch->reloc == NULL) + intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS); + + assert(batch->reloc != NULL); + batch->reloc[0] = 0; /* No relocs yet. */ + batch->reloc[1] = 1; /* Reloc type 1 */ + batch->reloc[2] = 0; /* Only a single relocation list. */ + batch->reloc[3] = 0; /* Only a single relocation list. */ batch->map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0); + batch->poolOffset = driBOPoolOffset(batch->buffer); batch->ptr = batch->map; + batch->dirty_state = ~0; + batch->nr_relocs = 0; + batch->flags = 0; + batch->id = 0;//batch->intel->intelScreen->batch_id++; } - /*====================================================================== * Public functions */ @@ -141,121 +128,253 @@ intel_batchbuffer_alloc(struct intel_context *intel) &batch->buffer, 4096, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0); batch->last_fence = NULL; - driBOCreateList(20, &batch->list); + batch->list = driBOCreateList(20); + batch->reloc = NULL; intel_batchbuffer_reset(batch); return batch; } - void intel_batchbuffer_free(struct intel_batchbuffer *batch) { if (batch->last_fence) { driFenceFinish(batch->last_fence, - DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW, - GL_FALSE); - driFenceUnReference(batch->last_fence); - batch->last_fence = NULL; + DRM_FENCE_TYPE_EXE, GL_FALSE); + driFenceUnReference(&batch->last_fence); } if (batch->map) { driBOUnmap(batch->buffer); batch->map = NULL; } driBOUnReference(batch->buffer); + driBOFreeList(batch->list); + if (batch->reloc) + free(batch->reloc); batch->buffer = NULL; free(batch); } +void +intel_offset_relocation(struct intel_batchbuffer *batch, + unsigned pre_add, + struct _DriBufferObject *driBO, + uint64_t val_flags, + uint64_t val_mask) +{ + int itemLoc; + struct _drmBONode *node; + uint32_t *reloc; + struct drm_bo_info_req *req; + + driBOAddListItem(batch->list, driBO, val_flags, val_mask, + &itemLoc, &node); + req = &node->bo_arg.d.req.bo_req; + + if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) { + + /* + * Stop other threads from tampering with the underlying + * drmBO while we're reading its offset. + */ + + driReadLockKernelBO(); + req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset; + driReadUnlockKernelBO(); + req->hint = DRM_BO_HINT_PRESUMED_OFFSET; + } + + pre_add += driBOPoolOffset(driBO); + + if (batch->nr_relocs == batch->reloc_size) + intel_realloc_relocs(batch, batch->reloc_size * 2); + + reloc = batch->reloc + + (I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE); + + reloc[0] = ((uint8_t *)batch->ptr - batch->drmBOVirtual); + intel_batchbuffer_emit_dword(batch, req->presumed_offset + pre_add); + reloc[1] = pre_add; + reloc[2] = itemLoc; + reloc[3] = batch->dest_location; + batch->nr_relocs++; +} static void -intel_batch_ioctl(struct intel_context *intel, - uint start_offset, uint used, boolean allow_unlock) +i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf) { - drmI830BatchBuffer batch; - - batch.start = start_offset; - batch.used = used; - batch.cliprects = NULL; /* unused */ - batch.num_cliprects = 0; - batch.DR1 = 0; - batch.DR4 = 0; /* still need this ? */ - - DBG(IOCTL, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n", - __FUNCTION__, - batch.start, - batch.start + batch.used * 4, batch.DR4, batch.num_cliprects); - - if (drmCommandWrite(intel->driFd, DRM_I830_BATCHBUFFER, &batch, - sizeof(batch))) { - printf("DRM_I830_BATCHBUFFER: %d\n", -errno); - UNLOCK_HARDWARE(intel); - exit(1); - } + buf->handle = rep->handle; + buf->flags = rep->flags; + buf->size = rep->size; + buf->offset = rep->offset; + buf->mapHandle = rep->arg_handle; + buf->proposedFlags = rep->proposed_flags; + buf->start = rep->buffer_start; + buf->fenceFlags = rep->fence_flags; + buf->replyFlags = rep->rep_flags; + buf->pageAlignment = rep->page_alignment; } +static int +i915_execbuf(struct intel_batchbuffer *batch, + GLuint used, + GLboolean ignore_cliprects, + drmBOList *list, + struct drm_i915_execbuffer *ea) +{ + struct intel_context *intel = batch->intel; + drmBONode *node; + drmMMListHead *l; + struct drm_i915_op_arg *arg, *first; + struct drm_bo_op_req *req; + struct drm_bo_info_rep *rep; + uint64_t *prevNext = NULL; + drmBO *buf; + int ret = 0; + uint32_t count = 0; + + first = NULL; + for (l = list->list.next; l != &list->list; l = l->next) { + node = DRMLISTENTRY(drmBONode, l, head); + + arg = &node->bo_arg; + req = &arg->d.req; + + if (!first) + first = arg; + + if (prevNext) + *prevNext = (unsigned long)arg; + + prevNext = &arg->next; + req->bo_req.handle = node->buf->handle; + req->op = drm_bo_validate; + req->bo_req.flags = node->arg0; + req->bo_req.mask = node->arg1; + req->bo_req.hint |= 0; + count++; + } + + memset(ea, 0, sizeof(*ea)); + ea->num_buffers = count; + ea->batch.start = batch->poolOffset; + ea->batch.used = used; +#if 0 /* ZZZ JB: no cliprects used */ + ea->batch.cliprects = intel->pClipRects; + ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects; + ea->batch.DR1 = 0; + ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) | + (((GLuint) intel->drawY) << 16)); +#else + ea->batch.cliprects = NULL; + ea->batch.num_cliprects = 0; + ea->batch.DR1 = 0; + ea->batch.DR4 = 0; +#endif + ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED; + ea->ops_list = (unsigned long) first; + first->reloc_ptr = (unsigned long) batch->reloc; + batch->reloc[0] = batch->nr_relocs; + + //return -EFAULT; + do { + ret = drmCommandWriteRead(intel->driFd, DRM_I915_EXECBUFFER, ea, + sizeof(*ea)); + } while (ret == -EAGAIN); + + if (ret != 0) + return ret; + + for (l = list->list.next; l != &list->list; l = l->next) { + node = DRMLISTENTRY(drmBONode, l, head); + arg = &node->bo_arg; + rep = &arg->d.rep.bo_info; + + if (!arg->handled) { + return -EFAULT; + } + if (arg->d.rep.ret) + return arg->d.rep.ret; + + buf = node->buf; + i915_drm_copy_reply(rep, buf); + } + return 0; +} /* TODO: Push this whole function into bufmgr. */ -static void +static struct _DriFenceObject * do_flush_locked(struct intel_batchbuffer *batch, - uint used, boolean allow_unlock) + GLuint used, + GLboolean ignore_cliprects, GLboolean allow_unlock) { - uint *ptr; - uint i, fenceFlags; + struct intel_context *intel = batch->intel; struct _DriFenceObject *fo; + drmFence fence; + drmBOList *boList; + struct drm_i915_execbuffer ea; + int ret = 0; + + driBOValidateUserList(batch->list); + boList = driGetdrmBOList(batch->list); + +#if 0 /* ZZZ JB Allways run */ + if (!(intel->numClipRects == 0 && !ignore_cliprects)) { +#else + if (1) { +#endif + ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea); + } else { + driPutdrmBOList(batch->list); + fo = NULL; + goto out; + } + driPutdrmBOList(batch->list); + if (ret) + abort(); - driBOValidateList(batch->intel->driFd, &batch->list); - - /* Apply the relocations. This nasty map indicates to me that the - * whole task should be done internally by the memory manager, and - * that dma buffers probably need to be pinned within agp space. - */ - ptr = (uint *) driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, - DRM_BO_HINT_ALLOW_UNFENCED_MAP); + if (ea.fence_arg.error != 0) { - for (i = 0; i < batch->nr_relocs; i++) { - struct buffer_reloc *r = &batch->reloc[i]; + /* + * The hardware has been idled by the kernel. + * Don't fence the driBOs. + */ - ptr[r->offset / 4] = driBOOffset(r->buf) + r->delta; + if (batch->last_fence) + driFenceUnReference(&batch->last_fence); +#if 0 /* ZZZ JB: no _mesa_* funcs in gallium */ + _mesa_printf("fence error\n"); +#endif + batch->last_fence = NULL; + fo = NULL; + goto out; } - if (0) - intel_dump_batchbuffer(0, ptr, used); + fence.handle = ea.fence_arg.handle; + fence.fence_class = ea.fence_arg.fence_class; + fence.type = ea.fence_arg.type; + fence.flags = ea.fence_arg.flags; + fence.signaled = ea.fence_arg.signaled; - driBOUnmap(batch->buffer); - batch->map = NULL; - - intel_batch_ioctl(batch->intel, - driBOOffset(batch->buffer), - used, allow_unlock); - - /* - * Kernel fencing. The flags tells the kernel that we've - * programmed an MI_FLUSH. - */ - fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED; - fo = driFenceBuffers(batch->intel->driFd, "Batch fence", fenceFlags); + fo = driBOFenceUserList(batch->intel->intelScreen->mgr, batch->list, + "SuperFence", &fence); + if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) { + if (batch->last_fence) + driFenceUnReference(&batch->last_fence); /* - * User space fencing. - */ - driBOFence(batch->buffer, fo); - - if (driFenceType(fo) == DRM_FENCE_TYPE_EXE) { - /* - * Oops. We only validated a batch buffer. This means we - * didn't do any proper rendering. Discard this fence object. - */ - driFenceUnReference(fo); - } - else { - driFenceUnReference(batch->last_fence); - batch->last_fence = fo; - for (i = 0; i < batch->nr_relocs; i++) { - struct buffer_reloc *r = &batch->reloc[i]; - driBOFence(r->buf, fo); - } - } + * FIXME: Context last fence?? + */ + batch->last_fence = fo; + driFenceReference(fo); + } + out: +#if 0 /* ZZZ JB: fix this */ + intel->vtbl.lost_hardware(intel); +#else + (void)intel; +#endif + return fo; } @@ -263,29 +382,41 @@ struct _DriFenceObject * intel_batchbuffer_flush(struct intel_batchbuffer *batch) { struct intel_context *intel = batch->intel; - uint used = batch->ptr - batch->map; - const boolean was_locked = intel->locked; + GLuint used = batch->ptr - batch->map; + GLboolean was_locked = intel->locked; + struct _DriFenceObject *fence; if (used == 0) return batch->last_fence; -#define MI_FLUSH ((0 << 29) | (4 << 23)) - /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a * performance drain that we would like to avoid. */ +#if 0 /* ZZZ JB: what should we do here? */ if (used & 4) { - ((int *) batch->ptr)[0] = MI_FLUSH; + ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd(); ((int *) batch->ptr)[1] = 0; ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END; used += 12; } else { - ((int *) batch->ptr)[0] = MI_FLUSH; + ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd(); ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END; used += 8; } - +#else + if (used & 4) { + ((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH; + ((int *) batch->ptr)[1] = 0; + ((int *) batch->ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END; + used += 12; + } + else { + ((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH; + ((int *) batch->ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END; + used += 8; + } +#endif driBOUnmap(batch->buffer); batch->ptr = NULL; batch->map = NULL; @@ -296,7 +427,8 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch) if (!was_locked) LOCK_HARDWARE(intel); - do_flush_locked(batch, used, GL_FALSE); + fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS), + GL_FALSE); if (!was_locked) UNLOCK_HARDWARE(intel); @@ -304,52 +436,20 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch) /* Reset the buffer: */ intel_batchbuffer_reset(batch); - return batch->last_fence; + return fence; } - void intel_batchbuffer_finish(struct intel_batchbuffer *batch) { struct _DriFenceObject *fence = intel_batchbuffer_flush(batch); - if (fence) { - driFenceReference(fence); - driFenceFinish(fence, - DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW, - GL_FALSE); - driFenceUnReference(fence); - } + driFenceFinish(fence, driFenceType(fence), GL_FALSE); + driFenceUnReference(&fence); } - -/* This is the only way buffers get added to the validate list. - */ -boolean -intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, - struct _DriBufferObject *buffer, - uint flags, uint mask, uint delta) -{ - assert(batch->nr_relocs < MAX_RELOCS); - - driBOAddListItem(&batch->list, buffer, flags, mask); - - { - struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++]; - driBOReference(buffer); - r->buf = buffer; - r->offset = batch->ptr - batch->map; - r->delta = delta; - *(uint *) batch->ptr = 0x12345678; - } - - batch->ptr += 4; - return GL_TRUE; -} - - void intel_batchbuffer_data(struct intel_batchbuffer *batch, - const void *data, uint bytes, uint flags) + const void *data, GLuint bytes, GLuint flags) { assert((bytes & 3) == 0); intel_batchbuffer_require_space(batch, bytes, flags); |