diff options
author | Eric Anholt <eric@anholt.net> | 2008-01-09 12:33:39 -0800 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2008-01-09 14:41:40 -0800 |
commit | beddf653a914903156712aa472b5deaddb7bbaed (patch) | |
tree | c6ef9aaa10a92f88b435fb332f6950cfd7c5186c /src/mesa/drivers/dri/intel/intel_batchbuffer.c | |
parent | 7ce12b0863f1cc03bdd7c65c0c0733b2ff903e40 (diff) |
[intel] Clean up cliprect handling in intel drivers.
In particular, batch buffers are no longer flushed when switching from
CLIPRECTS to NO_CLIPRECTS or vice versa, and 965 just uses DRM cliprect
handling for primitives instead of trying to sneak in its own to avoid the
DRM stuff. The disadvantage is that we will re-execute state updates per
cliprect, but the advantage is that we will be able to accumulate larger
batch buffers, which were proving to be a major overhead.
Diffstat (limited to 'src/mesa/drivers/dri/intel/intel_batchbuffer.c')
-rw-r--r-- | src/mesa/drivers/dri/intel/intel_batchbuffer.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c index cbd6d729d6..7ad9a1a0a4 100644 --- a/src/mesa/drivers/dri/intel/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c @@ -87,6 +87,7 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch) batch->ptr = batch->map; batch->dirty_state = ~0; batch->id = batch->intel->batch_id++; + batch->cliprects_enable = INTEL_BATCH_NO_CLIPRECTS; } struct intel_batchbuffer * @@ -124,8 +125,7 @@ intel_batchbuffer_free(struct intel_batchbuffer *batch) */ static void do_flush_locked(struct intel_batchbuffer *batch, - GLuint used, - GLboolean ignore_cliprects, GLboolean allow_unlock) + GLuint used, GLboolean allow_unlock) { struct intel_context *intel = batch->intel; void *start; @@ -136,28 +136,33 @@ do_flush_locked(struct intel_batchbuffer *batch, batch->map = NULL; batch->ptr = NULL; - batch->flags = 0; /* Throw away non-effective packets. Won't work once we have * hardware contexts which would preserve statechanges beyond a * single buffer. */ - if (!(intel->numClipRects == 0 && !ignore_cliprects)) { + if (!(intel->numClipRects == 0 && + batch->cliprects_enable == INTEL_BATCH_CLIPRECTS)) { if (intel->ttm == GL_TRUE) { intel_exec_ioctl(batch->intel, - used, ignore_cliprects, allow_unlock, + used, + batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS, + allow_unlock, start, count, &batch->last_fence); } else { intel_batch_ioctl(batch->intel, batch->buf->offset, - used, ignore_cliprects, allow_unlock); + used, + batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS, + allow_unlock); } } dri_post_submit(batch->buf, &batch->last_fence); - if (intel->numClipRects == 0 && !ignore_cliprects) { + if (intel->numClipRects == 0 && + batch->cliprects_enable == INTEL_BATCH_CLIPRECTS) { if (allow_unlock) { /* If we are not doing any actual user-visible rendering, * do a sched_yield to keep the app from pegging the cpu while @@ -212,9 +217,8 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch) if (!was_locked) LOCK_HARDWARE(intel); - do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS), - GL_FALSE); - + do_flush_locked(batch, used, GL_FALSE); + if (!was_locked) UNLOCK_HARDWARE(intel); @@ -258,10 +262,11 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, void intel_batchbuffer_data(struct intel_batchbuffer *batch, - const void *data, GLuint bytes, GLuint flags) + const void *data, GLuint bytes, + enum cliprects_enable cliprects_enable) { assert((bytes & 3) == 0); - intel_batchbuffer_require_space(batch, bytes, flags); + intel_batchbuffer_require_space(batch, bytes, cliprects_enable); __memcpy(batch->ptr, data, bytes); batch->ptr += bytes; } |