aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--configure.ac68
-rw-r--r--libdrm/Makefile.am8
-rw-r--r--libdrm/dri_bufmgr.c141
-rw-r--r--libdrm/dri_bufmgr.h174
-rw-r--r--libdrm/intel/Makefile.am38
-rw-r--r--libdrm/intel/intel_bufmgr.h95
-rw-r--r--libdrm/intel/intel_bufmgr_fake.c1218
-rw-r--r--libdrm/intel/intel_bufmgr_gem.c833
-rw-r--r--libdrm/intel/mm.c281
-rw-r--r--libdrm/intel/mm.h88
-rw-r--r--libdrm/xf86drm.c2
-rw-r--r--libdrm/xf86drm.h1
-rw-r--r--libdrm/xf86mm.h12
-rw-r--r--linux-core/Makefile1
-rw-r--r--linux-core/Makefile.kernel6
-rw-r--r--linux-core/drm-gem.txt805
-rw-r--r--linux-core/drmP.h227
-rw-r--r--linux-core/drm_agpsupport.c43
-rw-r--r--linux-core/drm_bo.c38
-rw-r--r--linux-core/drm_bo_move.c2
-rw-r--r--linux-core/drm_drv.c11
-rw-r--r--linux-core/drm_fops.c6
-rw-r--r--linux-core/drm_gem.c639
-rw-r--r--linux-core/drm_irq.c1
-rw-r--r--linux-core/drm_lock.c58
-rw-r--r--linux-core/drm_memory.c2
-rw-r--r--linux-core/drm_memrange.c (renamed from linux-core/drm_mm.c)100
-rw-r--r--linux-core/drm_objects.h13
-rw-r--r--linux-core/drm_proc.c79
-rw-r--r--linux-core/drm_sman.c22
-rw-r--r--linux-core/drm_sman.h4
-rw-r--r--linux-core/drm_stub.c15
-rw-r--r--linux-core/i915_drv.c6
-rw-r--r--linux-core/i915_gem.c1759
-rw-r--r--linux-core/nouveau_bo.c2
-rw-r--r--linux-core/nouveau_sgdma.c2
-rw-r--r--shared-core/drm.h98
-rw-r--r--shared-core/i915_dma.c70
-rw-r--r--shared-core/i915_drm.h154
-rw-r--r--shared-core/i915_drv.h184
-rw-r--r--shared-core/i915_init.c45
-rw-r--r--shared-core/i915_irq.c103
-rw-r--r--shared-core/radeon_cp.c26
-rw-r--r--shared-core/radeon_drv.h4
-rw-r--r--tests/Makefile.am5
-rw-r--r--tests/drmtest.c2
-rw-r--r--tests/gem_basic.c97
-rw-r--r--tests/gem_mmap.c131
-rw-r--r--tests/gem_readwrite.c125
50 files changed, 7655 insertions, 192 deletions
diff --git a/.gitignore b/.gitignore
index 0991da8c..c8a22ea3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -58,6 +58,9 @@ tests/getclient
tests/getstats
tests/getversion
tests/lock
+tests/gem_basic
+tests/gem_mmap
+tests/gem_readwrite
tests/openclose
tests/setversion
tests/updatedraw
diff --git a/configure.ac b/configure.ac
index 78203343..a8855684 100644
--- a/configure.ac
+++ b/configure.ac
@@ -35,9 +35,77 @@ AC_SYS_LARGEFILE
pkgconfigdir=${libdir}/pkgconfig
AC_SUBST(pkgconfigdir)
+
+dnl ===========================================================================
+dnl check compiler flags
+AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
+ AC_MSG_CHECKING([whether $CC supports $1])
+
+ libdrm_save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $1"
+
+ AC_COMPILE_IFELSE([ ], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
+ CFLAGS="$libdrm_save_CFLAGS"
+
+ if test "x$libdrm_cc_flag" = "xyes"; then
+ ifelse([$2], , :, [$2])
+ else
+ ifelse([$3], , :, [$3])
+ fi
+ AC_MSG_RESULT([$libdrm_cc_flag])
+])
+
+dnl Use lots of warning flags with with gcc and compatible compilers
+
+dnl Note: if you change the following variable, the cache is automatically
+dnl skipped and all flags rechecked. So there's no need to do anything
+dnl else. If for any reason you need to force a recheck, just change
+dnl MAYBE_WARN in an ignorable way (like adding whitespace)
+
+MAYBE_WARN="-Wall -Wextra \
+-Wsign-compare -Werror-implicit-function-declaration \
+-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
+-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
+-Wpacked -Wswitch-enum -Wmissing-format-attribute \
+-Wstrict-aliasing=2 -Winit-self -Wunsafe-loop-optimizations \
+-Wdeclaration-after-statement -Wold-style-definition \
+-Wno-missing-field-initializers -Wno-unused-parameter \
+-Wno-attributes -Wno-long-long -Winline"
+
+# invalidate cached value if MAYBE_WARN has changed
+if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
+ unset libdrm_cv_warn_cflags
+fi
+AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
+ echo
+ WARN_CFLAGS=""
+
+ # Some warning options are not supported by all versions of
+ # gcc, so test all desired options against the current
+ # compiler.
+ #
+ # Note that there are some order dependencies
+ # here. Specifically, an option that disables a warning will
+ # have no net effect if a later option then enables that
+ # warnings, (perhaps implicitly). So we put some grouped
+ # options (-Wall and -Wextra) up front and the -Wno options
+ # last.
+
+ for W in $MAYBE_WARN; do
+ LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
+ done
+
+ libdrm_cv_warn_cflags=$WARN_CFLAGS
+ libdrm_cv_warn_maybe=$MAYBE_WARN
+
+ AC_MSG_CHECKING([which warning flags were supported])])
+WARN_CFLAGS="$libdrm_cv_warn_cflags"
+
+AC_SUBST(WARN_CFLAGS)
AC_OUTPUT([
Makefile
libdrm/Makefile
+ libdrm/intel/Makefile
shared-core/Makefile
tests/Makefile
libdrm.pc])
diff --git a/libdrm/Makefile.am b/libdrm/Makefile.am
index 24c32038..624f6ffb 100644
--- a/libdrm/Makefile.am
+++ b/libdrm/Makefile.am
@@ -18,14 +18,18 @@
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+SUBDIRS = intel
+
libdrm_la_LTLIBRARIES = libdrm.la
libdrm_ladir = $(libdir)
libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
AM_CFLAGS = -I$(top_srcdir)/shared-core
-libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c xf86drmMode.c
+libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \
+ xf86drmMode.c dri_bufmgr.c
+libdrm_la_LIBADD = intel/libdrm_intel.la
libdrmincludedir = ${includedir}
-libdrminclude_HEADERS = xf86drm.h xf86mm.h xf86drmMode.h
+libdrminclude_HEADERS = xf86drm.h xf86mm.h xf86drmMode.h dri_bufmgr.h
EXTRA_DIST = ChangeLog TODO
diff --git a/libdrm/dri_bufmgr.c b/libdrm/dri_bufmgr.c
new file mode 100644
index 00000000..7657df61
--- /dev/null
+++ b/libdrm/dri_bufmgr.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "dri_bufmgr.h"
+
+/** @file dri_bufmgr.c
+ *
+ * Convenience functions for buffer management methods.
+ */
+
+dri_bo *
+dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
+ unsigned int alignment)
+{
+ return bufmgr->bo_alloc(bufmgr, name, size, alignment);
+}
+
+void
+dri_bo_reference(dri_bo *bo)
+{
+ bo->bufmgr->bo_reference(bo);
+}
+
+void
+dri_bo_unreference(dri_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ bo->bufmgr->bo_unreference(bo);
+}
+
+int
+dri_bo_map(dri_bo *buf, int write_enable)
+{
+ return buf->bufmgr->bo_map(buf, write_enable);
+}
+
+int
+dri_bo_unmap(dri_bo *buf)
+{
+ return buf->bufmgr->bo_unmap(buf);
+}
+
+int
+dri_bo_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ int ret;
+ if (bo->bufmgr->bo_subdata)
+ return bo->bufmgr->bo_subdata(bo, offset, size, data);
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = dri_bo_map(bo, 1);
+ if (ret)
+ return ret;
+ memcpy((unsigned char *)bo->virtual + offset, data, size);
+ dri_bo_unmap(bo);
+ return 0;
+}
+
+int
+dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ int ret;
+ if (bo->bufmgr->bo_subdata)
+ return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
+
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = dri_bo_map(bo, 0);
+ if (ret)
+ return ret;
+ memcpy(data, (unsigned char *)bo->virtual + offset, size);
+ dri_bo_unmap(bo);
+ return 0;
+}
+
+void
+dri_bo_wait_rendering(dri_bo *bo)
+{
+ bo->bufmgr->bo_wait_rendering(bo);
+}
+
+void
+dri_bufmgr_destroy(dri_bufmgr *bufmgr)
+{
+ bufmgr->destroy(bufmgr);
+}
+
+void *dri_process_relocs(dri_bo *batch_buf)
+{
+ return batch_buf->bufmgr->process_relocs(batch_buf);
+}
+
+void dri_post_submit(dri_bo *batch_buf)
+{
+ batch_buf->bufmgr->post_submit(batch_buf);
+}
+
+void
+dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug)
+{
+ bufmgr->debug = enable_debug;
+}
+
+int
+dri_bufmgr_check_aperture_space(dri_bo *bo)
+{
+ return bo->bufmgr->check_aperture_space(bo);
+}
diff --git a/libdrm/dri_bufmgr.h b/libdrm/dri_bufmgr.h
new file mode 100644
index 00000000..a5ae6c0f
--- /dev/null
+++ b/libdrm/dri_bufmgr.h
@@ -0,0 +1,174 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _DRI_BUFMGR_H_
+#define _DRI_BUFMGR_H_
+#include <xf86drm.h>
+
+typedef struct _dri_bufmgr dri_bufmgr;
+typedef struct _dri_bo dri_bo;
+
+struct _dri_bo {
+ /**
+ * Size in bytes of the buffer object.
+ *
+ * The size may be larger than the size originally requested for the
+ * allocation, such as being aligned to page size.
+ */
+ unsigned long size;
+ /**
+ * Card virtual address (offset from the beginning of the aperture) for the
+ * object. Only valid while validated.
+ */
+ unsigned long offset;
+ /**
+ * Virtual address for accessing the buffer data. Only valid while mapped.
+ */
+ void *virtual;
+ /** Buffer manager context associated with this buffer object */
+ dri_bufmgr *bufmgr;
+};
+
+/**
+ * Context for a buffer manager instance.
+ *
+ * Contains public methods followed by private storage for the buffer manager.
+ */
+struct _dri_bufmgr {
+ /**
+ * Allocate a buffer object.
+ *
+ * Buffer objects are not necessarily initially mapped into CPU virtual
+ * address space or graphics device aperture. They must be mapped using
+ * bo_map() to be used by the CPU, and validated for use using bo_validate()
+ * to be used from the graphics device.
+ */
+ dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
+ unsigned long size, unsigned int alignment);
+
+ /** Takes a reference on a buffer object */
+ void (*bo_reference)(dri_bo *bo);
+
+ /**
+ * Releases a reference on a buffer object, freeing the data if
+ * rerefences remain.
+ */
+ void (*bo_unreference)(dri_bo *bo);
+
+ /**
+ * Maps the buffer into userspace.
+ *
+ * This function will block waiting for any existing execution on the
+ * buffer to complete, first. The resulting mapping is available at
+ * buf->virtual.
+ */
+ int (*bo_map)(dri_bo *buf, int write_enable);
+
+ /** Reduces the refcount on the userspace mapping of the buffer object. */
+ int (*bo_unmap)(dri_bo *buf);
+
+ /**
+ * Write data into an object.
+ *
+ * This is an optional function, if missing,
+ * dri_bo will map/memcpy/unmap.
+ */
+ int (*bo_subdata) (dri_bo *buf, unsigned long offset,
+ unsigned long size, const void *data);
+
+ /**
+ * Read data from an object
+ *
+ * This is an optional function, if missing,
+ * dri_bo will map/memcpy/unmap.
+ */
+ int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+
+ /**
+ * Waits for rendering to an object by the GPU to have completed.
+ *
+ * This is not required for any access to the BO by bo_map, bo_subdata, etc.
+ * It is merely a way for the driver to implement glFinish.
+ */
+ void (*bo_wait_rendering) (dri_bo *bo);
+
+ /**
+ * Tears down the buffer manager instance.
+ */
+ void (*destroy)(dri_bufmgr *bufmgr);
+
+ /**
+ * Processes the relocations, either in userland or by converting the list
+ * for use in batchbuffer submission.
+ *
+ * Kernel-based implementations will return a pointer to the arguments
+ * to be handed with batchbuffer submission to the kernel. The userland
+ * implementation performs the buffer validation and emits relocations
+ * into them the appopriate order.
+ *
+ * \param batch_buf buffer at the root of the tree of relocations
+ * \return argument to be completed and passed to the execbuffers ioctl
+ * (if any).
+ */
+ void *(*process_relocs)(dri_bo *batch_buf);
+
+ void (*post_submit)(dri_bo *batch_buf);
+
+ int (*check_aperture_space)(dri_bo *bo);
+ int debug; /**< Enables verbose debugging printouts */
+};
+
+dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
+ unsigned int alignment);
+void dri_bo_reference(dri_bo *bo);
+void dri_bo_unreference(dri_bo *bo);
+int dri_bo_map(dri_bo *buf, int write_enable);
+int dri_bo_unmap(dri_bo *buf);
+
+int dri_bo_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+void dri_bo_wait_rendering(dri_bo *bo);
+
+void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
+void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
+
+void *dri_process_relocs(dri_bo *batch_buf);
+void dri_post_process_relocs(dri_bo *batch_buf);
+void dri_post_submit(dri_bo *batch_buf);
+int dri_bufmgr_check_aperture_space(dri_bo *bo);
+
+#endif
diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am
new file mode 100644
index 00000000..111204b1
--- /dev/null
+++ b/libdrm/intel/Makefile.am
@@ -0,0 +1,38 @@
+# Copyright © 2008 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Eric Anholt <eric@anholt.net>
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir)/shared-core
+
+noinst_LTLIBRARIES = libdrm_intel.la
+
+libdrm_intel_la_SOURCES = \
+ intel_bufmgr_fake.c \
+ intel_bufmgr_gem.c \
+ mm.c \
+ mm.h
+
+libdrm_intelincludedir = ${includedir}
+libdrm_intelinclude_HEADERS = intel_bufmgr.h
diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h
new file mode 100644
index 00000000..1cf0d518
--- /dev/null
+++ b/libdrm/intel/intel_bufmgr.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr.h
+ *
+ * Public definitions of Intel-specific bufmgr functions.
+ */
+
+#ifndef INTEL_BUFMGR_GEM_H
+#define INTEL_BUFMGR_GEM_H
+
+#include "dri_bufmgr.h"
+
+/**
+ * Intel-specific bufmgr bits that follow immediately after the
+ * generic bufmgr structure.
+ */
+struct intel_bufmgr {
+ /**
+ * Add relocation entry in reloc_buf, which will be updated with the
+ * target buffer's real offset on on command submission.
+ *
+ * Relocations remain in place for the lifetime of the buffer object.
+ *
+ * \param reloc_buf Buffer to write the relocation into.
+ * \param read_domains GEM read domains which the buffer will be read into
+ * by the command that this relocation is part of.
+ * \param write_domains GEM read domains which the buffer will be dirtied
+ * in by the command that this relocation is part of.
+ * \param delta Constant value to be added to the relocation target's
+ * offset.
+ * \param offset Byte offset within batch_buf of the relocated pointer.
+ * \param target Buffer whose offset should be written into the relocation
+ * entry.
+ */
+ int (*emit_reloc)(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target);
+};
+
+/* intel_bufmgr_gem.c */
+dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
+dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
+ unsigned int handle);
+void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
+
+/* intel_bufmgr_fake.c */
+dri_bufmgr *intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
+ unsigned long size,
+ unsigned int (*fence_emit)(void *private),
+ int (*fence_wait)(void *private,
+ unsigned int cookie),
+ void *driver_priv);
+dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
+ unsigned long offset, unsigned long size,
+ void *virtual);
+
+void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
+void intel_bo_fake_disable_backing_store(dri_bo *bo,
+ void (*invalidate_cb)(dri_bo *bo,
+ void *ptr),
+ void *ptr);
+void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
+
+int intel_bo_emit_reloc(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_buf);
+
+#endif /* INTEL_BUFMGR_GEM_H */
+
diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c
new file mode 100644
index 00000000..3f5a22d3
--- /dev/null
+++ b/libdrm/intel/intel_bufmgr_fake.c
@@ -0,0 +1,1218 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Originally a fake version of the buffer manager so that we can
+ * prototype the changes in a driver fairly quickly, has been fleshed
+ * out to a fully functional interim solution.
+ *
+ * Basically wraps the old style memory management in the new
+ * programming interface, but is more expressive and avoids many of
+ * the bugs in the old texture manager.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include "dri_bufmgr.h"
+#include "intel_bufmgr.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "mm.h"
+
+#define DBG(...) do { \
+ if (bufmgr_fake->bufmgr.debug) \
+ drmMsg(__VA_ARGS__); \
+} while (0)
+
+/* Internal flags:
+ */
+#define BM_NO_BACKING_STORE 0x00000001
+#define BM_NO_FENCE_SUBDATA 0x00000002
+#define BM_PINNED 0x00000004
+
+/* Wrapper around mm.c's mem_block, which understands that you must
+ * wait for fences to expire before memory can be freed. This is
+ * specific to our use of memcpy for uploads - an upload that was
+ * processed through the command queue wouldn't need to care about
+ * fences.
+ */
+#define MAX_RELOCS 4096
+
+struct fake_buffer_reloc
+{
+ /** Buffer object that the relocation points at. */
+ dri_bo *target_buf;
+ /** Offset of the relocation entry within reloc_buf. */
+ uint32_t offset;
+ /** Cached value of the offset when we last performed this relocation. */
+ uint32_t last_target_offset;
+ /** Value added to target_buf's offset to get the relocation entry. */
+ uint32_t delta;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+};
+
+struct block {
+ struct block *next, *prev;
+ struct mem_block *mem; /* BM_MEM_AGP */
+
+ /**
+ * Marks that the block is currently in the aperture and has yet to be
+ * fenced.
+ */
+ unsigned on_hardware:1;
+ /**
+ * Marks that the block is currently fenced (being used by rendering) and
+ * can't be freed until @fence is passed.
+ */
+ unsigned fenced:1;
+
+ /** Fence cookie for the block. */
+ unsigned fence; /* Split to read_fence, write_fence */
+
+ dri_bo *bo;
+ void *virtual;
+};
+
+typedef struct _bufmgr_fake {
+ dri_bufmgr bufmgr;
+ struct intel_bufmgr intel_bufmgr;
+
+ unsigned long low_offset;
+ unsigned long size;
+ void *virtual;
+
+ struct mem_block *heap;
+
+ unsigned buf_nr; /* for generating ids */
+
+ /**
+ * List of blocks which are currently in the GART but haven't been
+ * fenced yet.
+ */
+ struct block on_hardware;
+ /**
+ * List of blocks which are in the GART and have an active fence on them.
+ */
+ struct block fenced;
+ /**
+ * List of blocks which have an expired fence and are ready to be evicted.
+ */
+ struct block lru;
+
+ unsigned int last_fence;
+
+ unsigned fail:1;
+ unsigned need_fence:1;
+ int thrashing;
+
+ /**
+ * Driver callback to emit a fence, returning the cookie.
+ *
+ * Currently, this also requires that a write flush be emitted before
+ * emitting the fence, but this should change.
+ */
+ unsigned int (*fence_emit)(void *private);
+ /** Driver callback to wait for a fence cookie to have passed. */
+ int (*fence_wait)(void *private, unsigned int fence_cookie);
+ /** Driver-supplied argument to driver callbacks */
+ void *driver_priv;
+
+ int debug;
+
+ int performed_rendering;
+
+ /* keep track of the current total size of objects we have relocs for */
+ unsigned long current_total_size;
+} dri_bufmgr_fake;
+
+typedef struct _dri_bo_fake {
+ dri_bo bo;
+
+ unsigned id; /* debug only */
+ const char *name;
+
+ unsigned dirty:1;
+ unsigned size_accounted:1; /*this buffers size has been accounted against the aperture */
+ unsigned card_dirty:1; /* has the card written to this buffer - we make need to copy it back */
+ unsigned int refcount;
+ /* Flags may consist of any of the DRM_BO flags, plus
+ * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
+ * driver private flags.
+ */
+ uint64_t flags;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+
+ unsigned int alignment;
+ int is_static, validated;
+ unsigned int map_count;
+
+ /** relocation list */
+ struct fake_buffer_reloc *relocs;
+ int nr_relocs;
+
+ struct block *block;
+ void *backing_store;
+ void (*invalidate_cb)(dri_bo *bo, void *ptr);
+ void *invalidate_ptr;
+} dri_bo_fake;
+
+static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+ unsigned int fence_cookie);
+
+static int dri_fake_check_aperture_space(dri_bo *bo);
+
+#define MAXFENCE 0x7fffffff
+
+static int FENCE_LTE( unsigned a, unsigned b )
+{
+ if (a == b)
+ return 1;
+
+ if (a < b && b - a < (1<<24))
+ return 1;
+
+ if (a > b && MAXFENCE - a + b < (1<<24))
+ return 1;
+
+ return 0;
+}
+
+static unsigned int
+_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
+{
+ bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
+ return bufmgr_fake->last_fence;
+}
+
+static void
+_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
+{
+ int ret;
+
+ ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie);
+ if (ret != 0) {
+ drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__);
+ abort();
+ }
+ clear_fenced(bufmgr_fake, cookie);
+}
+
+static int
+_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ /* Slight problem with wrap-around:
+ */
+ return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
+}
+
+/**
+ * Allocate a memory manager block for the buffer.
+ */
+static int
+alloc_block(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
+ struct block *block = (struct block *)calloc(sizeof *block, 1);
+ unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
+ unsigned int sz;
+
+ if (!block)
+ return 1;
+
+ sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+ block->mem = drmmmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
+ if (!block->mem) {
+ free(block);
+ return 0;
+ }
+
+ DRMINITLISTHEAD(block);
+
+ /* Insert at head or at tail???
+ */
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+
+ block->virtual = (uint8_t *)bufmgr_fake->virtual +
+ block->mem->ofs - bufmgr_fake->low_offset;
+ block->bo = bo;
+
+ bo_fake->block = block;
+
+ return 1;
+}
+
+/* Release the card storage associated with buf:
+ */
+static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
+{
+ dri_bo_fake *bo_fake;
+ DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
+
+ if (!block)
+ return;
+
+ bo_fake = (dri_bo_fake *)block->bo;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) {
+ memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
+ bo_fake->card_dirty = 1;
+ bo_fake->dirty = 1;
+ }
+
+ if (block->on_hardware) {
+ block->bo = NULL;
+ }
+ else if (block->fenced) {
+ block->bo = NULL;
+ }
+ else {
+ DBG(" - free immediately\n");
+ DRMLISTDEL(block);
+
+ drmmmFreeMem(block->mem);
+ free(block);
+ }
+}
+
+static void
+alloc_backing_store(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ assert(!bo_fake->backing_store);
+ assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
+
+ bo_fake->backing_store = malloc(bo->size);
+
+ DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size);
+ assert(bo_fake->backing_store);
+}
+
+static void
+free_backing_store(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->backing_store) {
+ assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
+ free(bo_fake->backing_store);
+ bo_fake->backing_store = NULL;
+ }
+}
+
+static void
+set_dirty(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
+ bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
+
+ assert(!(bo_fake->flags & BM_PINNED));
+
+ DBG("set_dirty - buf %d\n", bo_fake->id);
+ bo_fake->dirty = 1;
+}
+
+static int
+evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __FUNCTION__);
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+ if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence))
+ return 0;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+evict_mru(dri_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __FUNCTION__);
+
+ DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
+ dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+ if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes all objects from the fenced list older than the given fence.
+ */
+static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+ unsigned int fence_cookie)
+{
+ struct block *block, *tmp;
+ int ret = 0;
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
+ assert(block->fenced);
+
+ if (_fence_test(bufmgr_fake, block->fence)) {
+
+ block->fenced = 0;
+
+ if (!block->bo) {
+ DBG("delayed free: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ drmmmFreeMem(block->mem);
+ free(block);
+ }
+ else {
+ DBG("return to lru: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+ }
+
+ ret = 1;
+ }
+ else {
+ /* Blocks are ordered by fence, so if one fails, all from
+ * here will fail also:
+ */
+ DBG("fence not passed: offset %x sz %x %d %d \n",
+ block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence);
+ break;
+ }
+ }
+
+ DBG("%s: %d\n", __FUNCTION__, ret);
+ return ret;
+}
+
+static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ struct block *block, *tmp;
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
+ block->mem->size, block->mem->ofs, block->bo, fence);
+ block->fence = fence;
+
+ block->on_hardware = 0;
+ block->fenced = 1;
+
+ /* Move to tail of pending list here
+ */
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
+ }
+
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+}
+
+static int evict_and_alloc_block(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ assert(bo_fake->block == NULL);
+
+ /* Search for already free memory:
+ */
+ if (alloc_block(bo))
+ return 1;
+
+ /* If we're not thrashing, allow lru eviction to dig deeper into
+ * recently used textures. We'll probably be thrashing soon:
+ */
+ if (!bufmgr_fake->thrashing) {
+ while (evict_lru(bufmgr_fake, 0))
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ /* Keep thrashing counter alive?
+ */
+ if (bufmgr_fake->thrashing)
+ bufmgr_fake->thrashing = 20;
+
+ /* Wait on any already pending fences - here we are waiting for any
+ * freed memory that has been submitted to hardware and fenced to
+ * become available:
+ */
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+ }
+
+ if (!bufmgr_fake->thrashing) {
+ DBG("thrashing\n");
+ }
+ bufmgr_fake->thrashing = 20;
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ while (evict_mru(bufmgr_fake))
+ if (alloc_block(bo))
+ return 1;
+
+ DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
+
+ return 0;
+}
+
+/***********************************************************************
+ * Public functions
+ */
+
+/**
+ * Wait for hardware idle by emitting a fence and waiting for it.
+ */
+static void
+dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
+{
+ unsigned int cookie;
+
+ cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
+ _fence_wait_internal(bufmgr_fake, cookie);
+}
+
+/**
+ * Wait for rendering to a buffer to complete.
+ *
+ * It is assumed that the bathcbuffer which performed the rendering included
+ * the necessary flushing.
+ */
+static void
+dri_fake_bo_wait_rendering(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->block == NULL || !bo_fake->block->fenced)
+ return;
+
+ _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+}
+
+/* Specifically ignore texture memory sharing.
+ * -- just evict everything
+ * -- and wait for idle
+ */
+void
+intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ struct block *block, *tmp;
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ assert(_fence_test(bufmgr_fake, block->fence));
+ set_dirty(block->bo);
+ }
+}
+
+static dri_bo *
+dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+ dri_bo_fake *bo_fake;
+
+ bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = -1;
+ bo_fake->bo.virtual = NULL;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+
+ /* Alignment must be a power of two */
+ assert((alignment & (alignment - 1)) == 0);
+ if (alignment == 0)
+ alignment = 1;
+ bo_fake->alignment = alignment;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = 0;
+ bo_fake->is_static = 0;
+
+ DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+dri_bo *
+intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
+ unsigned long offset, unsigned long size,
+ void *virtual)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+ dri_bo_fake *bo_fake;
+
+ bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = offset;
+ bo_fake->bo.virtual = virtual;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
+ bo_fake->is_static = 1;
+
+ DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+static void
+dri_fake_bo_reference(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ bo_fake->refcount++;
+}
+
+static void
+dri_fake_bo_unreference(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i;
+
+ if (!bo)
+ return;
+
+ if (--bo_fake->refcount == 0) {
+ assert(bo_fake->map_count == 0);
+ /* No remaining references, so free it */
+ if (bo_fake->block)
+ free_block(bufmgr_fake, bo_fake->block);
+ free_backing_store(bo);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++)
+ dri_bo_unreference(bo_fake->relocs[i].target_buf);
+
+ DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
+
+ free(bo_fake->relocs);
+ free(bo);
+
+ return;
+ }
+}
+
+/**
+ * Set the buffer as not requiring backing store, and instead get the callback
+ * invoked whenever it would be set dirty.
+ */
+void intel_bo_fake_disable_backing_store(dri_bo *bo,
+ void (*invalidate_cb)(dri_bo *bo,
+ void *ptr),
+ void *ptr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->backing_store)
+ free_backing_store(bo);
+
+ bo_fake->flags |= BM_NO_BACKING_STORE;
+
+ DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
+ bo_fake->dirty = 1;
+ bo_fake->invalidate_cb = invalidate_cb;
+ bo_fake->invalidate_ptr = ptr;
+
+ /* Note that it is invalid right from the start. Also note
+ * invalidate_cb is called with the bufmgr locked, so cannot
+ * itself make bufmgr calls.
+ */
+ if (invalidate_cb != NULL)
+ invalidate_cb(bo, ptr);
+}
+
+/**
+ * Map a buffer into bo->virtual, allocating either card memory space (If
+ * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
+ */
+static int
+dri_fake_bo_map(dri_bo *bo, int write_enable)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static)
+ return 0;
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops, and it is used internally in bufmgr_fake
+ * for relocation.
+ */
+ if (bo_fake->map_count++ != 0)
+ return 0;
+
+ {
+ DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ if (bo->virtual != NULL) {
+ drmMsg("%s: already mapped\n", __FUNCTION__);
+ abort();
+ }
+ else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) {
+
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ DBG("%s: alloc failed\n", __FUNCTION__);
+ bufmgr_fake->fail = 1;
+ return 1;
+ }
+ else {
+ assert(bo_fake->block);
+ bo_fake->dirty = 0;
+
+ if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
+ bo_fake->block->fenced) {
+ dri_fake_bo_wait_rendering(bo);
+ }
+
+ bo->virtual = bo_fake->block->virtual;
+ }
+ }
+ else {
+ if (write_enable)
+ set_dirty(bo);
+
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+
+ bo->virtual = bo_fake->backing_store;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dri_fake_bo_unmap(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static)
+ return 0;
+
+ assert(bo_fake->map_count != 0);
+ if (--bo_fake->map_count != 0)
+ return 0;
+
+ DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ bo->virtual = NULL;
+
+ return 0;
+}
+
+static void
+dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ bufmgr_fake->performed_rendering = 0;
+ /* okay for ever BO that is on the HW kick it off.
+ seriously not afraid of the POLICE right now */
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+ block->on_hardware = 0;
+ free_block(bufmgr_fake, block);
+ bo_fake->block = NULL;
+ bo_fake->validated = 0;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE))
+ bo_fake->dirty = 1;
+ }
+}
+
+static int
+dri_fake_bo_validate(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ /* XXX: Sanity-check whether we've already validated this one under
+ * different flags. See drmAddValidateItem().
+ */
+ bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+
+ DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ /* Sanity check: Buffers should be unmapped before being validated.
+ * This is not so much of a problem for bufmgr_fake, but TTM refuses,
+ * and the problem is harder to debug there.
+ */
+ assert(bo_fake->map_count == 0);
+
+ if (bo_fake->is_static) {
+ /* Add it to the needs-fence list */
+ bufmgr_fake->need_fence = 1;
+ return 0;
+ }
+
+ /* reset size accounted */
+ bo_fake->size_accounted = 0;
+
+ /* Allocate the card memory */
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ bufmgr_fake->fail = 1;
+ DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name);
+ return -1;
+ }
+
+ assert(bo_fake->block);
+ assert(bo_fake->block->bo == &bo_fake->bo);
+
+ bo->offset = bo_fake->block->mem->ofs;
+
+ /* Upload the buffer contents if necessary */
+ if (bo_fake->dirty) {
+ DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
+ bo_fake->name, bo->size, bo_fake->block->mem->ofs);
+
+ assert(!(bo_fake->flags &
+ (BM_NO_BACKING_STORE|BM_PINNED)));
+
+ /* Actually, should be able to just wait for a fence on the memory,
+ * which we would be tracking when we free it. Waiting for idle is
+ * a sufficiently large hammer for now.
+ */
+ dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* we may never have mapped this BO so it might not have any backing
+ * store if this happens it should be rare, but 0 the card memory
+ * in any case */
+ if (bo_fake->backing_store)
+ memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
+ else
+ memset(bo_fake->block->virtual, 0, bo->size);
+
+ bo_fake->dirty = 0;
+ }
+
+ bo_fake->block->fenced = 0;
+ bo_fake->block->on_hardware = 1;
+ DRMLISTDEL(bo_fake->block);
+ DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
+
+ bo_fake->validated = 1;
+ bufmgr_fake->need_fence = 1;
+
+ return 0;
+}
+
+static void
+dri_fake_fence_validated(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ unsigned int cookie;
+
+ cookie = _fence_emit_internal(bufmgr_fake);
+ fence_blocks(bufmgr_fake, cookie);
+
+ DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
+}
+
+static void
+dri_fake_destroy(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+ drmmmDestroy(bufmgr_fake->heap);
+ free(bufmgr);
+}
+
+static int
+dri_fake_emit_reloc(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_buf)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr;
+ struct fake_buffer_reloc *r;
+ dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
+ dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
+ int i;
+
+ assert(reloc_buf);
+ assert(target_buf);
+
+ assert(target_fake->is_static || target_fake->size_accounted);
+
+ if (reloc_fake->relocs == NULL) {
+ reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
+ MAX_RELOCS);
+ }
+
+ r = &reloc_fake->relocs[reloc_fake->nr_relocs++];
+
+ assert(reloc_fake->nr_relocs <= MAX_RELOCS);
+
+ dri_bo_reference(target_buf);
+
+ r->target_buf = target_buf;
+ r->offset = offset;
+ r->last_target_offset = target_buf->offset;
+ r->delta = delta;
+ r->read_domains = read_domains;
+ r->write_domain = write_domain;
+
+ if (bufmgr_fake->debug) {
+ /* Check that a conflicting relocation hasn't already been emitted. */
+ for (i = 0; i < reloc_fake->nr_relocs - 1; i++) {
+ struct fake_buffer_reloc *r2 = &reloc_fake->relocs[i];
+
+ assert(r->offset != r2->offset);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Incorporates the validation flags associated with each relocation into
+ * the combined validation flags for the buffer on this batchbuffer submission.
+ */
+static void
+dri_fake_calculate_domains(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+
+ /* Do the same for the tree of buffers we depend on */
+ dri_fake_calculate_domains(r->target_buf);
+
+ target_fake->read_domains |= r->read_domains;
+ if (target_fake->write_domain != 0)
+ target_fake->write_domain = r->write_domain;
+ }
+}
+
+
+static int
+dri_fake_reloc_and_validate_buffer(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i, ret;
+
+ assert(bo_fake->map_count == 0);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+ uint32_t reloc_data;
+
+ /* Validate the target buffer if that hasn't been done. */
+ if (!target_fake->validated) {
+ ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
+ if (ret != 0) {
+ if (bo->virtual != NULL)
+ dri_bo_unmap(bo);
+ return ret;
+ }
+ }
+
+ /* Calculate the value of the relocation entry. */
+ if (r->target_buf->offset != r->last_target_offset) {
+ reloc_data = r->target_buf->offset + r->delta;
+
+ if (bo->virtual == NULL)
+ dri_bo_map(bo, 1);
+
+ *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
+
+ r->last_target_offset = r->target_buf->offset;
+ }
+ }
+
+ if (bo->virtual != NULL)
+ dri_bo_unmap(bo);
+
+ if (bo_fake->write_domain != 0) {
+ if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+
+ bo_fake->card_dirty = 1;
+ }
+ bufmgr_fake->performed_rendering = 1;
+ }
+
+ return dri_fake_bo_validate(bo);
+}
+
+static void *
+dri_fake_process_relocs(dri_bo *batch_buf)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
+ dri_bo_fake *batch_fake = (dri_bo_fake *)batch_buf;
+ int ret;
+ int retry_count = 0;
+
+ bufmgr_fake->performed_rendering = 0;
+
+ dri_fake_calculate_domains(batch_buf);
+
+ batch_fake->read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
+
+ /* we've ran out of RAM so blow the whole lot away and retry */
+ restart:
+ ret = dri_fake_reloc_and_validate_buffer(batch_buf);
+ if (bufmgr_fake->fail == 1) {
+ if (retry_count == 0) {
+ retry_count++;
+ dri_fake_kick_all(bufmgr_fake);
+ bufmgr_fake->fail = 0;
+ goto restart;
+ } else /* dump out the memory here */
+ drmmmDumpMemInfo(bufmgr_fake->heap);
+ }
+
+ assert(ret == 0);
+
+ bufmgr_fake->current_total_size = 0;
+ return NULL;
+}
+
+static void
+dri_bo_fake_post_submit(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+
+ if (target_fake->validated)
+ dri_bo_fake_post_submit(r->target_buf);
+
+ DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
+ bo_fake->name, (uint32_t)bo->offset, r->offset,
+ target_fake->name, (uint32_t)r->target_buf->offset, r->delta);
+ }
+
+ assert(bo_fake->map_count == 0);
+ bo_fake->validated = 0;
+ bo_fake->read_domains = 0;
+ bo_fake->write_domain = 0;
+}
+
+
+static void
+dri_fake_post_submit(dri_bo *batch_buf)
+{
+ dri_fake_fence_validated(batch_buf->bufmgr);
+
+ dri_bo_fake_post_submit(batch_buf);
+}
+
+static int
+dri_fake_check_aperture_space(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ unsigned int sz;
+
+ sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+ if (bo_fake->size_accounted || bo_fake->is_static)
+ return 0;
+
+ if (bufmgr_fake->current_total_size + sz > bufmgr_fake->size) {
+ DBG("check_space: %s bo %d %d overflowed bufmgr size %d\n", bo_fake->name, bo_fake->id, sz, bufmgr_fake->size);
+ return -1;
+ }
+
+ bufmgr_fake->current_total_size += sz;
+ bo_fake->size_accounted = 1;
+ DBG("drm_check_space: buf %d, %s %d %d\n", bo_fake->id, bo_fake->name, bo->size, bufmgr_fake->current_total_size);
+ return 0;
+}
+
+/**
+ * Evicts all buffers, waiting for fences to pass and copying contents out
+ * as necessary.
+ *
+ * Used by the X Server on LeaveVT, when the card memory is no longer our
+ * own.
+ */
+void
+intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ struct block *block, *tmp;
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ /* Releases the memory, and memcpys dirty contents out if necessary. */
+ free_block(bufmgr_fake, block);
+ }
+}
+
+dri_bufmgr *
+intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
+ unsigned long size,
+ unsigned int (*fence_emit)(void *private),
+ int (*fence_wait)(void *private, unsigned int cookie),
+ void *driver_priv)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+
+ bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
+
+ /* Initialize allocator */
+ DRMINITLISTHEAD(&bufmgr_fake->fenced);
+ DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
+ DRMINITLISTHEAD(&bufmgr_fake->lru);
+
+ bufmgr_fake->low_offset = low_offset;
+ bufmgr_fake->virtual = low_virtual;
+ bufmgr_fake->size = size;
+ bufmgr_fake->heap = drmmmInit(low_offset, size);
+
+ /* Hook in methods */
+ bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
+ bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
+ bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
+ bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
+ bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
+ bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
+ bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
+ bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
+ bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
+ bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
+ bufmgr_fake->bufmgr.debug = 0;
+ bufmgr_fake->intel_bufmgr.emit_reloc = dri_fake_emit_reloc;
+
+ bufmgr_fake->fence_emit = fence_emit;
+ bufmgr_fake->fence_wait = fence_wait;
+ bufmgr_fake->driver_priv = driver_priv;
+
+ return &bufmgr_fake->bufmgr;
+}
+
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
new file mode 100644
index 00000000..a65ae982
--- /dev/null
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -0,0 +1,833 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Red Hat Inc.
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ */
+
+#include <xf86drm.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include "errno.h"
+#include "dri_bufmgr.h"
+#include "intel_bufmgr.h"
+#include "string.h"
+
+#include "i915_drm.h"
+
+#define DBG(...) do { \
+ if (bufmgr_gem->bufmgr.debug) \
+ fprintf(stderr, __VA_ARGS__); \
+} while (0)
+
+typedef struct _dri_bo_gem dri_bo_gem;
+
+struct dri_gem_bo_bucket {
+ dri_bo_gem *head, **tail;
+ /**
+ * Limit on the number of entries in this bucket.
+ *
+ * 0 means that this caching at this bucket size is disabled.
+ * -1 means that there is no limit to caching at this size.
+ */
+ int max_entries;
+ int num_entries;
+};
+
+/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
+ * is 1 << 16 pages, or 256MB.
+ */
+#define INTEL_GEM_BO_BUCKETS 16
+typedef struct _dri_bufmgr_gem {
+ dri_bufmgr bufmgr;
+
+ struct intel_bufmgr intel_bufmgr;
+
+ int fd;
+
+ int max_relocs;
+
+ struct drm_i915_gem_exec_object *exec_objects;
+ dri_bo **exec_bos;
+ int exec_size;
+ int exec_count;
+
+ /** Array of lists of cached gem objects of power-of-two sizes */
+ struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
+
+ struct drm_i915_gem_execbuffer exec_arg;
+} dri_bufmgr_gem;
+
+struct _dri_bo_gem {
+ dri_bo bo;
+
+ int refcount;
+ /** Boolean whether the mmap ioctl has been called for this buffer yet. */
+ int mapped;
+ uint32_t gem_handle;
+ const char *name;
+
+ /**
+ * Index of the buffer within the validation list while preparing a
+ * batchbuffer execution.
+ */
+ int validate_index;
+
+ /**
+ * Boolean whether set_domain to CPU is current
+ * Set when set_domain has been called
+ * Cleared when a batch has been submitted
+ */
+ int cpu_domain_set;
+
+ /** Array passed to the DRM containing relocation information. */
+ struct drm_i915_gem_relocation_entry *relocs;
+ /** Array of bos corresponding to relocs[i].target_handle */
+ dri_bo **reloc_target_bo;
+ /** Number of entries in relocs */
+ int reloc_count;
+ /** Mapped address for the buffer */
+ void *virtual;
+
+ /** free list */
+ dri_bo_gem *next;
+};
+
+static int
+logbase2(int n)
+{
+ int i = 1;
+ int log2 = 0;
+
+ while (n > i) {
+ i *= 2;
+ log2++;
+ }
+
+ return log2;
+}
+
+static struct dri_gem_bo_bucket *
+dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
+{
+ int i;
+
+ /* We only do buckets in power of two increments */
+ if ((size & (size - 1)) != 0)
+ return NULL;
+
+ /* We should only see sizes rounded to pages. */
+ assert((size % 4096) == 0);
+
+ /* We always allocate in units of pages */
+ i = ffs(size / 4096) - 1;
+ if (i >= INTEL_GEM_BO_BUCKETS)
+ return NULL;
+
+ return &bufmgr_gem->cache_bucket[i];
+}
+
+
+static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
+{
+ int i, j;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ dri_bo *bo = bufmgr_gem->exec_bos[i];
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ if (bo_gem->relocs == NULL) {
+ DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
+ continue;
+ }
+
+ for (j = 0; j < bo_gem->reloc_count; j++) {
+ dri_bo *target_bo = bo_gem->reloc_target_bo[j];
+ dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
+
+ DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
+ i,
+ bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
+ target_gem->gem_handle, target_gem->name, target_bo->offset,
+ bo_gem->relocs[j].delta);
+ }
+ }
+}
+
+/**
+ * Adds the given buffer to the list of buffers to be validated (moved into the
+ * appropriate memory type) with the next batch submission.
+ *
+ * If a buffer is validated multiple times in a batch submission, it ends up
+ * with the intersection of the memory type flags and the union of the
+ * access flags.
+ */
+static void
+intel_add_validate_buffer(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ int index;
+
+ if (bo_gem->validate_index != -1)
+ return;
+
+ /* Extend the array of validation entries as necessary. */
+ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ int new_size = bufmgr_gem->exec_size * 2;
+
+ if (new_size == 0)
+ new_size = 5;
+
+ bufmgr_gem->exec_objects =
+ realloc(bufmgr_gem->exec_objects,
+ sizeof(*bufmgr_gem->exec_objects) * new_size);
+ bufmgr_gem->exec_bos =
+ realloc(bufmgr_gem->exec_bos,
+ sizeof(*bufmgr_gem->exec_bos) * new_size);
+ bufmgr_gem->exec_size = new_size;
+ }
+
+ index = bufmgr_gem->exec_count;
+ bo_gem->validate_index = index;
+ /* Fill in array entry */
+ bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
+ bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
+ bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
+ bufmgr_gem->exec_objects[index].alignment = 0;
+ bufmgr_gem->exec_objects[index].offset = 0;
+ bufmgr_gem->exec_bos[index] = bo;
+ dri_bo_reference(bo);
+ bufmgr_gem->exec_count++;
+}
+
+
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
+ sizeof(uint32_t))
+
+static int
+intel_setup_reloc_list(dri_bo *bo)
+{
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+ bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
+ sizeof(struct drm_i915_gem_relocation_entry));
+ bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
+
+ return 0;
+}
+
+static dri_bo *
+dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ dri_bo_gem *bo_gem;
+ unsigned int page_size = getpagesize();
+ int ret;
+ struct dri_gem_bo_bucket *bucket;
+ int alloc_from_cache = 0;
+ unsigned long bo_size;
+
+ /* Round the allocated size up to a power of two number of pages. */
+ bo_size = 1 << logbase2(size);
+ if (bo_size < page_size)
+ bo_size = page_size;
+ bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
+
+ /* If we don't have caching at this size, don't actually round the
+ * allocation up.
+ */
+ if (bucket == NULL || bucket->max_entries == 0) {
+ bo_size = size;
+ if (bo_size < page_size)
+ bo_size = page_size;
+ }
+
+ /* Get a buffer out of the cache if available */
+ if (bucket != NULL && bucket->num_entries > 0) {
+ struct drm_i915_gem_busy busy;
+
+ bo_gem = bucket->head;
+ busy.handle = bo_gem->gem_handle;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ alloc_from_cache = (ret == 0 && busy.busy == 0);
+
+ if (alloc_from_cache) {
+ bucket->head = bo_gem->next;
+ if (bo_gem->next == NULL)
+ bucket->tail = &bucket->head;
+ bucket->num_entries--;
+ }
+ }
+
+ if (!alloc_from_cache) {
+ struct drm_gem_create create;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = bo_size;
+ memset(&create, 0, sizeof(create));
+ create.size = bo_size;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create);
+ bo_gem->gem_handle = create.handle;
+ if (ret != 0) {
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.bufmgr = bufmgr;
+ }
+
+ bo_gem->name = name;
+ bo_gem->refcount = 1;
+ bo_gem->validate_index = -1;
+
+ DBG("bo_create: buf %d (%s) %ldb\n",
+ bo_gem->gem_handle, bo_gem->name, size);
+
+ return &bo_gem->bo;
+}
+
+/**
+ * Returns a dri_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+dri_bo *
+intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
+ unsigned int handle)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ dri_bo_gem *bo_gem;
+ int ret;
+ struct drm_gem_open open_arg;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ memset(&open_arg, 0, sizeof(open_arg));
+ open_arg.name = handle;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
+ name, handle, strerror(-ret));
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.size = open_arg.size;
+ bo_gem->bo.offset = 0;
+ bo_gem->bo.virtual = NULL;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->name = name;
+ bo_gem->refcount = 1;
+ bo_gem->validate_index = -1;
+ bo_gem->gem_handle = open_arg.handle;
+
+ DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+
+ return &bo_gem->bo;
+}
+
+static void
+dri_gem_bo_reference(dri_bo *bo)
+{
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ bo_gem->refcount++;
+}
+
+static void
+dri_gem_bo_free(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_gem_close close;
+ int ret;
+
+ if (bo_gem->mapped)
+ munmap (bo_gem->virtual, bo_gem->bo.size);
+
+ /* Close this object */
+ close.handle = bo_gem->gem_handle;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ if (ret != 0) {
+ fprintf(stderr,
+ "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+ bo_gem->gem_handle, bo_gem->name, strerror(-ret));
+ }
+ free(bo);
+}
+
+static void
+dri_gem_bo_unreference(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ if (!bo)
+ return;
+
+ if (--bo_gem->refcount == 0) {
+ struct dri_gem_bo_bucket *bucket;
+
+ if (bo_gem->relocs != NULL) {
+ int i;
+
+ /* Unreference all the target buffers */
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ dri_bo_unreference(bo_gem->reloc_target_bo[i]);
+ free(bo_gem->reloc_target_bo);
+ free(bo_gem->relocs);
+ }
+
+ DBG("bo_unreference final: %d (%s)\n",
+ bo_gem->gem_handle, bo_gem->name);
+
+ bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+ /* Put the buffer into our internal cache for reuse if we can. */
+ if (bucket != NULL &&
+ (bucket->max_entries == -1 ||
+ (bucket->max_entries > 0 &&
+ bucket->num_entries < bucket->max_entries)))
+ {
+ bo_gem->name = 0;
+ bo_gem->validate_index = -1;
+ bo_gem->relocs = NULL;
+ bo_gem->reloc_target_bo = NULL;
+ bo_gem->reloc_count = 0;
+
+ bo_gem->next = NULL;
+ *bucket->tail = bo_gem;
+ bucket->tail = &bo_gem->next;
+ bucket->num_entries++;
+ } else {
+ dri_gem_bo_free(bo);
+ }
+
+ return;
+ }
+}
+
+static int
+dri_gem_bo_map(dri_bo *bo, int write_enable)
+{
+ dri_bufmgr_gem *bufmgr_gem;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_gem_set_domain set_domain;
+ int ret;
+
+ bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops.
+ */
+ if (!bo_gem->mapped) {
+
+ assert(bo->virtual == NULL);
+
+ DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+
+ if (bo_gem->virtual == NULL) {
+ struct drm_gem_mmap mmap_arg;
+
+ memset(&mmap_arg, 0, sizeof(mmap_arg));
+ mmap_arg.handle = bo_gem->gem_handle;
+ mmap_arg.offset = 0;
+ mmap_arg.size = bo->size;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_MMAP, &mmap_arg);
+ if (ret != 0) {
+ fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name, strerror(errno));
+ }
+ bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
+ }
+ bo->virtual = bo_gem->virtual;
+ bo_gem->mapped = 1;
+ DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
+ }
+
+ if (!bo_gem->cpu_domain_set) {
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
+ set_domain.write_domain = write_enable ? DRM_GEM_DOMAIN_CPU : 0;
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
+ strerror (errno));
+ }
+ bo_gem->cpu_domain_set = 1;
+ }
+
+ return 0;
+}
+
+static int
+dri_gem_bo_unmap(dri_bo *bo)
+{
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ if (bo == NULL)
+ return 0;
+
+ assert(bo_gem->mapped);
+
+ return 0;
+}
+
+static int
+dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_gem_pwrite pwrite;
+ int ret;
+
+ memset (&pwrite, 0, sizeof (pwrite));
+ pwrite.handle = bo_gem->gem_handle;
+ pwrite.offset = offset;
+ pwrite.size = size;
+ pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PWRITE, &pwrite);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, (int) offset, (int) size,
+ strerror (errno));
+ }
+ return 0;
+}
+
+static int
+dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_gem_pread pread;
+ int ret;
+
+ memset (&pread, 0, sizeof (pread));
+ pread.handle = bo_gem->gem_handle;
+ pread.offset = offset;
+ pread.size = size;
+ pread.data_ptr = (uint64_t) (uintptr_t) data;
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PREAD, &pread);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, (int) offset, (int) size,
+ strerror (errno));
+ }
+ return 0;
+}
+
+static void
+dri_gem_bo_wait_rendering(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_gem_set_domain set_domain;
+ int ret;
+
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
+ set_domain.write_domain = 0;
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
+ strerror (errno));
+ }
+}
+
+static void
+dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ int i;
+
+ free(bufmgr_gem->exec_objects);
+ free(bufmgr_gem->exec_bos);
+
+ /* Free any cached buffer objects we were going to reuse */
+ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
+ struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+ dri_bo_gem *bo_gem;
+
+ while ((bo_gem = bucket->head) != NULL) {
+ bucket->head = bo_gem->next;
+ if (bo_gem->next == NULL)
+ bucket->tail = &bucket->head;
+ bucket->num_entries--;
+
+ dri_gem_bo_free(&bo_gem->bo);
+ }
+ }
+
+ free(bufmgr);
+}
+
+/**
+ * Adds the target buffer to the validation list and adds the relocation
+ * to the reloc_buffer's relocation list.
+ *
+ * The relocation entry at the given offset must already contain the
+ * precomputed relocation value, because the kernel will optimize out
+ * the relocation entry write when the buffer hasn't moved from the
+ * last known offset in target_bo.
+ */
+static int
+dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
+
+ /* Create a new relocation list if needed */
+ if (bo_gem->relocs == NULL)
+ intel_setup_reloc_list(bo);
+
+ /* Check overflow */
+ assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
+
+ /* Check args */
+ assert (offset <= bo->size - 4);
+ assert ((write_domain & (write_domain-1)) == 0);
+
+ bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+ bo_gem->relocs[bo_gem->reloc_count].delta = delta;
+ bo_gem->relocs[bo_gem->reloc_count].target_handle =
+ target_bo_gem->gem_handle;
+ bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+ bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+ bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
+
+ bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
+ dri_bo_reference(target_bo);
+
+ bo_gem->reloc_count++;
+ return 0;
+}
+
+/**
+ * Walk the tree of relocations rooted at BO and accumulate the list of
+ * validations to be performed and update the relocation buffers with
+ * index values into the validation list.
+ */
+static void
+dri_gem_bo_process_reloc(dri_bo *bo)
+{
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ int i;
+
+ if (bo_gem->relocs == NULL)
+ return;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ dri_bo *target_bo = bo_gem->reloc_target_bo[i];
+
+ /* Continue walking the tree depth-first. */
+ dri_gem_bo_process_reloc(target_bo);
+
+ /* Add the target to the validate list */
+ intel_add_validate_buffer(target_bo);
+ }
+}
+
+static void *
+dri_gem_process_reloc(dri_bo *batch_buf)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
+
+ /* Update indices and set up the validate list. */
+ dri_gem_bo_process_reloc(batch_buf);
+
+ /* Add the batch buffer to the validation list. There are no relocations
+ * pointing to it.
+ */
+ intel_add_validate_buffer(batch_buf);
+
+ bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
+ bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
+ bufmgr_gem->exec_arg.batch_start_offset = 0;
+ bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */
+
+ return &bufmgr_gem->exec_arg;
+}
+
+static void
+intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ dri_bo *bo = bufmgr_gem->exec_bos[i];
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ /* Update the buffer offset */
+ if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+ DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+ bo_gem->gem_handle, bo_gem->name, bo->offset,
+ bufmgr_gem->exec_objects[i].offset);
+ bo->offset = bufmgr_gem->exec_objects[i].offset;
+ }
+ }
+}
+
+static void
+dri_gem_post_submit(dri_bo *batch_buf)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
+ int i;
+
+ intel_update_buffer_offsets (bufmgr_gem);
+
+ if (bufmgr_gem->bufmgr.debug)
+ dri_gem_dump_validation_list(bufmgr_gem);
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ dri_bo *bo = bufmgr_gem->exec_bos[i];
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ /* Need to call set_domain on next bo_map */
+ bo_gem->cpu_domain_set = 0;
+
+ /* Disconnect the buffer from the validate list */
+ bo_gem->validate_index = -1;
+ dri_bo_unreference(bo);
+ bufmgr_gem->exec_bos[i] = NULL;
+ }
+ bufmgr_gem->exec_count = 0;
+}
+
+/**
+ * Enables unlimited caching of buffer objects for reuse.
+ *
+ * This is potentially very memory expensive, as the cache at each bucket
+ * size is only bounded by how many buffers of that size we've managed to have
+ * in flight at once.
+ */
+void
+intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ int i;
+
+ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
+ bufmgr_gem->cache_bucket[i].max_entries = -1;
+ }
+}
+
+/*
+ *
+ */
+static int
+dri_gem_check_aperture_space(dri_bo *bo)
+{
+ return 0;
+}
+
+/**
+ * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ */
+dri_bufmgr *
+intel_bufmgr_gem_init(int fd, int batch_size)
+{
+ dri_bufmgr_gem *bufmgr_gem;
+ int i;
+
+ bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+ bufmgr_gem->fd = fd;
+
+ /* Let's go with one relocation per every 2 dwords (but round down a bit
+ * since a power of two will mean an extra page allocation for the reloc
+ * buffer).
+ *
+ * Every 4 was too few for the blender benchmark.
+ */
+ bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
+
+ bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
+ bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
+ bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
+ bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
+ bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
+ bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
+ bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
+ bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
+ bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
+ bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
+ bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
+ bufmgr_gem->bufmgr.debug = 0;
+ bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
+ bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
+ /* Initialize the linked lists for BO reuse cache. */
+ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
+ bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
+
+ return &bufmgr_gem->bufmgr;
+}
+
+int
+intel_bo_emit_reloc(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_buf)
+{
+ struct intel_bufmgr *intel_bufmgr;
+
+ intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1);
+
+ return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
+ delta, offset, target_buf);
+}
diff --git a/libdrm/intel/mm.c b/libdrm/intel/mm.c
new file mode 100644
index 00000000..2605d8ec
--- /dev/null
+++ b/libdrm/intel/mm.c
@@ -0,0 +1,281 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "xf86drm.h"
+#include "mm.h"
+
+void
+drmmmDumpMemInfo(const struct mem_block *heap)
+{
+ drmMsg("Memory heap %p:\n", (void *)heap);
+ if (heap == 0) {
+ drmMsg(" heap == 0\n");
+ } else {
+ const struct mem_block *p;
+
+ for(p = heap->next; p != heap; p = p->next) {
+ drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ drmMsg("\nFree list:\n");
+
+ for(p = heap->next_free; p != heap; p = p->next_free) {
+ drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ }
+ drmMsg("End of memory blocks\n");
+}
+
+struct mem_block *
+drmmmInit(int ofs, int size)
+{
+ struct mem_block *heap, *block;
+
+ if (size <= 0)
+ return NULL;
+
+ heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
+ if (!heap)
+ return NULL;
+
+ block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
+ if (!block) {
+ free(heap);
+ return NULL;
+ }
+
+ heap->next = block;
+ heap->prev = block;
+ heap->next_free = block;
+ heap->prev_free = block;
+
+ block->heap = heap;
+ block->next = heap;
+ block->prev = heap;
+ block->next_free = heap;
+ block->prev_free = heap;
+
+ block->ofs = ofs;
+ block->size = size;
+ block->free = 1;
+
+ return heap;
+}
+
+
+static struct mem_block *
+SliceBlock(struct mem_block *p,
+ int startofs, int size,
+ int reserved, int alignment)
+{
+ struct mem_block *newblock;
+
+ /* break left [p, newblock, p->next], then p = newblock */
+ if (startofs > p->ofs) {
+ newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs;
+ newblock->size = p->size - (startofs - p->ofs);
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* break right, also [p, newblock, p->next] */
+ if (size < p->size) {
+ newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs + size;
+ newblock->size = p->size - size;
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size = size;
+ }
+
+ /* p = middle block */
+ p->free = 0;
+
+ /* Remove p from the free list:
+ */
+ p->next_free->prev_free = p->prev_free;
+ p->prev_free->next_free = p->next_free;
+
+ p->next_free = 0;
+ p->prev_free = 0;
+
+ p->reserved = reserved;
+ return p;
+}
+
+
+struct mem_block *
+drmmmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
+{
+ struct mem_block *p;
+ const int mask = (1 << align2)-1;
+ int startofs = 0;
+ int endofs;
+
+ if (!heap || align2 < 0 || size <= 0)
+ return NULL;
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ assert(p->free);
+
+ startofs = (p->ofs + mask) & ~mask;
+ if ( startofs < startSearch ) {
+ startofs = startSearch;
+ }
+ endofs = startofs+size;
+ if (endofs <= (p->ofs+p->size))
+ break;
+ }
+
+ if (p == heap)
+ return NULL;
+
+ assert(p->free);
+ p = SliceBlock(p,startofs,size,0,mask+1);
+
+ return p;
+}
+
+
+struct mem_block *
+drmmmFindBlock(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ if (p->ofs == start)
+ return p;
+ }
+
+ return NULL;
+}
+
+
+static int
+Join2Blocks(struct mem_block *p)
+{
+ /* XXX there should be some assertions here */
+
+ /* NOTE: heap->free == 0 */
+
+ if (p->free && p->next->free) {
+ struct mem_block *q = p->next;
+
+ assert(p->ofs + p->size == q->ofs);
+ p->size += q->size;
+
+ p->next = q->next;
+ q->next->prev = p;
+
+ q->next_free->prev_free = q->prev_free;
+ q->prev_free->next_free = q->next_free;
+
+ free(q);
+ return 1;
+ }
+ return 0;
+}
+
+int
+drmmmFreeMem(struct mem_block *b)
+{
+ if (!b)
+ return 0;
+
+ if (b->free) {
+ drmMsg("block already free\n");
+ return -1;
+ }
+ if (b->reserved) {
+ drmMsg("block is reserved\n");
+ return -1;
+ }
+
+ b->free = 1;
+ b->next_free = b->heap->next_free;
+ b->prev_free = b->heap;
+ b->next_free->prev_free = b;
+ b->prev_free->next_free = b;
+
+ Join2Blocks(b);
+ if (b->prev != b->heap)
+ Join2Blocks(b->prev);
+
+ return 0;
+}
+
+
+void
+drmmmDestroy(struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap)
+ return;
+
+ for (p = heap->next; p != heap; ) {
+ struct mem_block *next = p->next;
+ free(p);
+ p = next;
+ }
+
+ free(heap);
+}
diff --git a/libdrm/intel/mm.h b/libdrm/intel/mm.h
new file mode 100644
index 00000000..965bb0cd
--- /dev/null
+++ b/libdrm/intel/mm.h
@@ -0,0 +1,88 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * Memory manager code. Primarily used by device drivers to manage texture
+ * heaps, etc.
+ */
+
+
+#ifndef MM_H
+#define MM_H
+
+struct mem_block {
+ struct mem_block *next, *prev;
+ struct mem_block *next_free, *prev_free;
+ struct mem_block *heap;
+ int ofs,size;
+ unsigned int free:1;
+ unsigned int reserved:1;
+};
+
+
+
+/**
+ * input: total size in bytes
+ * return: a heap pointer if OK, NULL if error
+ */
+extern struct mem_block *drmmmInit(int ofs, int size);
+
+/**
+ * Allocate 'size' bytes with 2^align2 bytes alignment,
+ * restrict the search to free memory after 'startSearch'
+ * depth and back buffers should be in different 4mb banks
+ * to get better page hits if possible
+ * input: size = size of block
+ * align2 = 2^align2 bytes alignment
+ * startSearch = linear offset from start of heap to begin search
+ * return: pointer to the allocated block, 0 if error
+ */
+extern struct mem_block *drmmmAllocMem(struct mem_block *heap, int size,
+ int align2, int startSearch);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a block
+ * return: 0 if OK, -1 if error
+ */
+extern int drmmmFreeMem(struct mem_block *b);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a heap, start offset
+ * return: pointer to a block
+ */
+extern struct mem_block *drmmmFindBlock(struct mem_block *heap, int start);
+
+/**
+ * destroy MM
+ */
+extern void drmmmDestroy(struct mem_block *mmInit);
+
+/**
+ * For debuging purpose.
+ */
+extern void drmmmDumpMemInfo(const struct mem_block *mmInit);
+
+#endif
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 7b678138..e44706ed 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -113,7 +113,7 @@ static int drmDebugPrint(const char *format, va_list ap)
static int (*drm_debug_print)(const char *format, va_list ap) = drmDebugPrint;
-static void
+void
drmMsg(const char *format, ...)
{
va_list ap;
diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h
index 35780aca..b29b3e5d 100644
--- a/libdrm/xf86drm.h
+++ b/libdrm/xf86drm.h
@@ -659,6 +659,7 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key,
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
extern void drmCloseOnce(int fd);
+extern void drmMsg(const char *format, ...);
extern int drmSetMaster(int fd);
extern int drmDropMaster(int fd);
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index bb573407..a31de424 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -94,6 +94,18 @@ typedef struct _drmMMListHead
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
+#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
+
+#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
+ for ((__item) = (__list)->next, (__temp) = (__item)->next; \
+ (__item) != (__list); \
+ (__item) = (__temp), (__temp) = (__item)->next)
+
+#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
+ for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
+ (__item) != (__list); \
+ (__item) = (__temp), (__temp) = (__item)->prev)
+
typedef struct _drmFence
{
unsigned handle;
diff --git a/linux-core/Makefile b/linux-core/Makefile
index b9405bbb..846386a5 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -30,6 +30,7 @@
#
# make DRM_MODULES="r128 radeon"
#
+DRM_MODULES=i915
SHELL=/bin/sh
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index ac9baf02..29503004 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -12,16 +12,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
- drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+ drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \
drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o \
- drm_vm_nopage_compat.o drm_crtc_helper.o
+ drm_vm_nopage_compat.o drm_crtc_helper.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
- i915_buffer.o i915_execbuf.o \
+ i915_buffer.o i915_execbuf.o i915_gem.o \
intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \
diff --git a/linux-core/drm-gem.txt b/linux-core/drm-gem.txt
new file mode 100644
index 00000000..5cda87f8
--- /dev/null
+++ b/linux-core/drm-gem.txt
@@ -0,0 +1,805 @@
+ The Graphics Execution Manager
+ Part of the Direct Rendering Manager
+ ==============================
+
+ Keith Packard <keithp@keithp.com>
+ Eric Anholt <eric@anholt.net>
+ 2008-5-9
+
+Contents:
+
+ 1. GEM Overview
+ 2. API overview and conventions
+ 3. Object Creation/Destruction
+ 4. Reading/writing contents
+ 5. Mapping objects to userspace
+ 6. Memory Domains
+ 7. Execution (Intel specific)
+ 8. Other misc Intel-specific functions
+
+1. Graphics Execution Manager Overview
+
+Gem is designed to manage graphics memory, control access to the graphics
+device execution context and handle the essentially NUMA environment unique
+to modern graphics hardware. Gem allows multiple applications to share
+graphics device resources without the need to constantly reload the entire
+graphics card. Data may be shared between multiple applications with gem
+ensuring that the correct memory synchronization occurs.
+
+Graphics data can consume arbitrary amounts of memory, with 3D applications
+constructing ever larger sets of textures and vertices. With graphics cards
+memory space growing larger every year, and graphics APIs growing more
+complex, we can no longer insist that each application save a complete copy
+of their graphics state so that the card can be re-initialized from user
+space at each context switch. Ensuring that graphics data remains persistent
+across context switches allows applications significant new functionality
+while also improving performance for existing APIs.
+
+Modern linux desktops include significant 3D rendering as a fundemental
+component of the desktop image construction process. 2D and 3D applications
+paint their content to offscreen storage and the central 'compositing
+manager' constructs the final screen image from those window contents. This
+means that pixel image data from these applications must move within reach
+of the compositing manager and used as source operands for screen image
+rendering operations.
+
+Gem provides simple mechanisms to manage graphics data and control execution
+flow within the linux operating system. Using many existing kernel
+subsystems, it does this with a modest amount of code.
+
+2. API Overview and Conventions
+
+All APIs here are defined in terms of ioctls appplied to the DRM file
+descriptor. To create and manipulate objects, an application must be
+'authorized' using the DRI or DRI2 protocols with the X server. To relax
+that, we will need to implement some better access control mechanisms within
+the hardware portion of the driver to prevent inappropriate
+cross-application data access.
+
+Any DRM driver which does not support GEM will return -ENODEV for all of
+these ioctls. Invalid object handles return -EINVAL. Invalid object names
+return -ENOENT. Other errors are as documented in the specific API below.
+
+To avoid the need to translate ioctl contents on mixed-size systems (with
+32-bit user space running on a 64-bit kernel), the ioctl data structures
+contain explicitly sized objects, using 64-bits for all size and pointer
+data and 32-bits for identifiers. In addition, the 64-bit objects are all
+carefully aligned on 64-bit boundaries. Because of this, all pointers in the
+ioctl data structures are passed as uint64_t values. Suitable casts will
+be necessary.
+
+One significant operation which is explicitly left out of this API is object
+locking. Applications are expected to perform locking of shared objects
+outside of the GEM api. This kind of locking is not necessary to safely
+manipulate the graphics engine, and with multiple objects interacting in
+unknown ways, per-object locking would likely introduce all kinds of
+lock-order issues. Punting this to the application seems like the only
+sensible plan. Given that DRM already offers a global lock on the hardware,
+this doesn't change the current situation.
+
+3. Object Creation and Destruction
+
+Gem provides explicit memory management primitives. System pages are
+allocated when the object is created, either as the fundemental storage for
+hardware where system memory is used by the graphics processor directly, or
+as backing store for graphics-processor resident memory.
+
+Objects are referenced from user space using handles. These are, for all
+intents and purposes, equivalent to file descriptors. We could simply use
+file descriptors were it not for the small limit (1024) of file descriptors
+available to applications, and for the fact that the X server (a rather
+significant user of this API) uses 'select' and has a limited maximum file
+descriptor for that operation. Given the ability to allocate more file
+descriptors, and given the ability to place these 'higher' in the file
+descriptor space, we'd love to simply use file descriptors.
+
+Objects may be published with a name so that other applications can access
+them. The name remains valid as long as the object exists. Right now, our
+DRI APIs use 32-bit integer names, so that's what we expose here
+
+ A. Creation
+
+ struct drm_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object
+ * will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ /* usage */
+ create.size = 16384;
+ ret = ioctl (fd, DRM_IOCTL_GEM_CREATE, &create);
+ if (ret == 0)
+ return create.handle;
+
+ Note that the size is rounded up to a page boundary, and that
+ the rounded-up size is returned in 'size'. No name is assigned to
+ this object, making it local to this process.
+
+ If insufficient memory is availabe, -ENOMEM will be returned.
+
+ B. Closing
+
+ struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+
+ /* usage */
+ close.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+ This call makes the specified handle invalid, and if no other
+ applications are using the object, any necessary graphics hardware
+ synchronization is performed and the resources used by the object
+ released.
+
+ C. Naming
+
+ struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+ };
+
+ /* usage */
+ flink.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (ret == 0)
+ return flink.name;
+
+ Flink creates a name for the object and returns it to the
+ application. This name can be used by other applications to gain
+ access to the same object.
+
+ D. Opening by name
+
+ struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+ };
+
+ /* usage */
+ open.name = <name>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_OPEN, &open);
+ if (ret == 0) {
+ *sizep = open.size;
+ return open.handle;
+ }
+
+ Open accesses an existing object and returns a handle for it. If the
+ object doesn't exist, -ENOENT is returned. The size of the object is
+ also returned. This handle has all the same capabilities as the
+ handle used to create the object. In particular, the object is not
+ destroyed until all handles are closed.
+
+4. Basic read/write operations
+
+By default, gem objects are not mapped to the applications address space,
+getting data in and out of them is done with I/O operations instead. This
+allows the data to reside in otherwise unmapped pages, including pages in
+video memory on an attached discrete graphics card. In addition, using
+explicit I/O operations allows better control over cache contents, as
+graphics devices are generally not cache coherent with the CPU, mapping
+pages used for graphics into an application address space requires the use
+of expensive cache flushing operations. Providing direct control over
+graphics data access ensures that data are handled in the most efficient
+possible fashion.
+
+ A. Reading
+
+ struct drm_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void * */
+ };
+
+ This copies data into the specified object at the specified
+ position. Any necessary graphics device synchronization and
+ flushing will be done automatically.
+
+ struct drm_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void * */
+ };
+
+ This copies data out of the specified object into the
+ waiting user memory. Again, device synchronization will
+ be handled by the kernel to ensure user space sees a
+ consistent view of the graphics device.
+
+5. Mapping objects to user space
+
+For most objects, reading/writing is the preferred interaction mode.
+However, when the CPU is involved in rendering to cover deficiencies in
+hardware support for particular operations, the CPU will want to directly
+access the relevant objects.
+
+Because mmap is fairly heavyweight, we allow applications to retain maps to
+objects persistently and then update how they're using the memory through a
+separate interface. Applications which fail to use this separate interface
+may exhibit unpredictable behaviour as memory consistency will not be
+preserved.
+
+ A. Mapping
+
+ struct drm_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void * */
+ };
+
+ /* usage */
+ mmap.handle = <handle>;
+ mmap.offset = <offset>;
+ mmap.size = <size>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ if (ret == 0)
+ return (void *) (uintptr_t) mmap.addr_ptr;
+
+
+ B. Unmapping
+
+ munmap (addr, length);
+
+ Nothing strange here, just use the normal munmap syscall.
+
+6. Memory Domains
+
+Graphics devices remain a strong bastion of non cache-coherent memory. As a
+result, accessing data through one functional unit will end up loading that
+cache with data which then needs to be manually synchronized when that data
+is used with another functional unit.
+
+Tracking where data are resident is done by identifying how functional units
+deal with caches. Each cache is labeled as a separate memory domain. Then,
+each sequence of operations is expected to load data into various read
+domains and leave data in at most one write domain. Gem tracks the read and
+write memory domains of each object and performs the necessary
+synchronization operations when objects move from one domain set to another.
+
+For example, if operation 'A' constructs an image that is immediately used
+by operation 'B', then when the read domain for 'B' is not the same as the
+write domain for 'A', then the write domain must be flushed, and the read
+domain invalidated. If these two operations are both executed in the same
+command queue, then the flush operation can go inbetween them in the same
+queue, avoiding any kind of CPU-based synchronization and leaving the GPU to
+do the work itself.
+
+6.1 Memory Domains (GPU-independent)
+
+ * DRM_GEM_DOMAIN_CPU.
+
+ Objects in this domain are using caches which are connected to the CPU.
+ Moving objects from non-CPU domains into the CPU domain can involve waiting
+ for the GPU to finish with operations using this object. Moving objects
+ from this domain to a GPU domain can involve flushing CPU caches and chipset
+ buffers.
+
+6.1 GPU-independent memory domain ioctl
+
+This ioctl is independent of the GPU in use. So far, no use other than
+synchronizing objects to the CPU domain have been found; if that turns out
+to be generally true, this ioctl may be simplified further.
+
+ A. Explicit domain control
+
+ struct drm_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+ };
+
+ /* usage */
+ set_domain.handle = <handle>;
+ set_domain.read_domains = <read_domains>;
+ set_domain.write_domain = <write_domain>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+
+ When the application wants to explicitly manage memory domains for
+ an object, it can use this function. Usually, this is only used
+ when the application wants to synchronize object contents between
+ the GPU and CPU-based application rendering. In that case,
+ the <read_domains> would be set to DRM_GEM_DOMAIN_CPU, and if the
+ application were going to write to the object, the <write_domain>
+ would also be set to DRM_GEM_DOMAIN_CPU. After the call, gem
+ guarantees that all previous rendering operations involving this
+ object are complete. The application is then free to access the
+ object through the address returned by the mmap call. Afterwards,
+ when the application again uses the object through the GPU, any
+ necessary CPU flushing will occur and the object will be correctly
+ synchronized with the GPU.
+
+ Note that this synchronization is not required for any accesses
+ going through the driver itself. The pread, pwrite and execbuffer
+ ioctls all perform the necessary domain management internally.
+ Explicit synchronization is only necessary when accessing the object
+ through the mmap'd address.
+
+7. Execution (Intel specific)
+
+Managing the command buffers is inherently chip-specific, so the core of gem
+doesn't have any intrinsic functions. Rather, execution is left to the
+device-specific portions of the driver.
+
+The Intel DRM_I915_GEM_EXECBUFFER ioctl takes a list of gem objects, all of
+which are mapped to the graphics device. The last object in the list is the
+command buffer.
+
+7.1. Relocations
+
+Command buffers often refer to other objects, and to allow the kernel driver
+to move objects around, a sequence of relocations is associated with each
+object. Device-specific relocation operations are used to place the
+target-object relative value into the object.
+
+The Intel driver has a single relocation type:
+
+ struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this
+ * relocation entry.
+ *
+ * It's appealing to make this be an index into the
+ * mm_validate_entry list to refer to the buffer,
+ * but this allows the driver to create a relocation
+ * list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target
+ * buffer to make up the relocation entry.
+ */
+ uint32_t delta;
+
+ /**
+ * Offset in the buffer the relocation entry will be
+ * written into
+ */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the
+ * relocation entry was last written as.
+ *
+ * If the buffer has the same offset as last time, we
+ * can skip syncing and writing the relocation. This
+ * value is written back out by the execbuffer ioctl
+ * when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /*
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the
+ * whole execbuffer operation, so that where there are
+ * conflicts, the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+ };
+
+ 'target_handle', the handle to the target object. This object must
+ be one of the objects listed in the execbuffer request or
+ bad things will happen. The kernel doesn't check for this.
+
+ 'offset' is where, in the source object, the relocation data
+ are written. Each relocation value is a 32-bit value consisting
+ of the location of the target object in the GPU memory space plus
+ the 'delta' value included in the relocation.
+
+ 'presumed_offset' is where user-space believes the target object
+ lies in GPU memory space. If this value matches where the object
+ actually is, then no relocation data are written, the kernel
+ assumes that user space has set up data in the source object
+ using this presumption. This offers a fairly important optimization
+ as writing relocation data requires mapping of the source object
+ into the kernel memory space.
+
+ 'read_domains' and 'write_domains' list the usage by the source
+ object of the target object. The kernel unions all of the domain
+ information from all relocations in the execbuffer request. No more
+ than one write_domain is allowed, otherwise an EINVAL error is
+ returned. read_domains must contain write_domain. This domain
+ information is used to synchronize buffer contents as described
+ above in the section on domains.
+
+7.1.1 Memory Domains (Intel specific)
+
+The Intel GPU has several internal caches which are not coherent and hence
+require explicit synchronization. Memory domains provide the necessary data
+to synchronize what is needed while leaving other cache contents intact.
+
+ * DRM_GEM_DOMAIN_I915_RENDER.
+ The GPU 3D and 2D rendering operations use a unified rendering cache, so
+ operations doing 3D painting and 2D blts will use this domain
+
+ * DRM_GEM_DOMAIN_I915_SAMPLER
+ Textures are loaded by the sampler through a separate cache, so
+ any texture reading will use this domain. Note that the sampler
+ and renderer use different caches, so moving an object from render target
+ to texture source will require a domain transfer.
+
+ * DRM_GEM_DOMAIN_I915_COMMAND
+ The command buffer doesn't have an explicit cache (although it does
+ read ahead quite a bit), so this domain just indicates that the object
+ needs to be flushed to the GPU.
+
+ * DRM_GEM_DOMAIN_I915_INSTRUCTION
+ All of the programs on Gen4 and later chips use an instruction cache to
+ speed program execution. It must be explicitly flushed when new programs
+ are written to memory by the CPU.
+
+ * DRM_GEM_DOMAIN_I915_VERTEX
+ Vertex data uses two different vertex caches, but they're
+ both flushed with the same instruction.
+
+7.2 Execution object list (Intel specific)
+
+ struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT
+ * for this operation.
+ */
+ uint32_t handle;
+
+ /**
+ * List of relocations to be performed on this buffer
+ */
+ uint32_t relocation_count;
+ /* struct drm_i915_gem_relocation_entry *relocs */
+ uint64_t relocs_ptr;
+
+ /**
+ * Required alignment in graphics aperture
+ */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object,
+ * for future presumed_offset writes.
+ */
+ uint64_t offset;
+ };
+
+ Each object involved in a particular execution operation must be
+ listed using one of these structures.
+
+ 'handle' references the object.
+
+ 'relocs_ptr' is a user-mode pointer to a array of 'relocation_count'
+ drm_i915_gem_relocation_entry structs (see above) that
+ define the relocations necessary in this buffer. Note that all
+ relocations must reference other exec_object structures in the same
+ execbuffer ioctl and that those other buffers must come earlier in
+ the exec_object array. In other words, the dependencies mapped by the
+ exec_object relocations must form a directed acyclic graph.
+
+ 'alignment' is the byte alignment necessary for this buffer. Each
+ object has specific alignment requirements, as the kernel doesn't
+ know what each object is being used for, those requirements must be
+ provided by user mode. If an object is used in two different ways,
+ it's quite possible that the alignment requirements will differ.
+
+ 'offset' is a return value, receiving the location of the object
+ during this execbuffer operation. The application should use this
+ as the presumed offset in future operations; if the object does not
+ move, then kernel need not write relocation data.
+
+7.3 Execbuffer ioctl (Intel specific)
+
+ struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their
+ * relocations to be performend on them.
+ *
+ * These buffers must be listed in an order such that
+ * all relocations a buffer is performing refer to
+ * buffers that have already appeared in the validate
+ * list.
+ */
+ /* struct drm_i915_gem_validate_entry *buffers */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /**
+ * Offset in the batchbuffer to start execution from.
+ */
+ uint32_t batch_start_offset;
+
+ /**
+ * Bytes used in batchbuffer from batch_start_offset
+ */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+ };
+
+
+ 'buffers_ptr' is a user-mode pointer to an array of 'buffer_count'
+ drm_i915_gem_exec_object structures which contains the complete set
+ of objects required for this execbuffer operation. The last entry in
+ this array, the 'batch buffer', is the buffer of commands which will
+ be linked to the ring and executed.
+
+ 'batch_start_offset' is the byte offset within the batch buffer which
+ contains the first command to execute. So far, we haven't found a
+ reason to use anything other than '0' here, but the thought was that
+ some space might be allocated for additional initialization which
+ could be skipped in some cases. This must be a multiple of 4.
+
+ 'batch_len' is the length, in bytes, of the data to be executed
+ (i.e., the amount of data after batch_start_offset). This must
+ be a multiple of 4.
+
+ 'num_cliprects' and 'cliprects_ptr' reference an array of
+ drm_clip_rect structures that is num_cliprects long. The entire
+ batch buffer will be executed multiple times, once for each
+ rectangle in this list. If num_cliprects is 0, then no clipping
+ rectangle will be set.
+
+ 'DR1' and 'DR4' are portions of the 3DSTATE_DRAWING_RECTANGLE
+ command which will be queued when this operation is clipped
+ (num_cliprects != 0).
+
+ DR1 bit definition
+ 31 Fast Scissor Clip Disable (debug only).
+ Disables a hardware optimization that
+ improves performance. This should have
+ no visible effect, other than reducing
+ performance
+
+ 30 Depth Buffer Coordinate Offset Disable.
+ This disables the addition of the
+ depth buffer offset bits which are used
+ to change the location of the depth buffer
+ relative to the front buffer.
+
+ 27:26 X Dither Offset. Specifies the X pixel
+ offset to use when accessing the dither table
+
+ 25:24 Y Dither Offset. Specifies the Y pixel
+ offset to use when accessing the dither
+ table.
+
+ DR4 bit definition
+ 31:16 Drawing Rectangle Origin Y. Specifies the Y
+ origin of coordinates relative to the
+ draw buffer.
+
+ 15:0 Drawing Rectangle Origin X. Specifies the X
+ origin of coordinates relative to the
+ draw buffer.
+
+ As you can see, these two fields are necessary for correctly
+ offsetting drawing within a buffer which contains multiple surfaces.
+ Note that DR1 is only used on Gen3 and earlier hardware and that
+ newer hardware sticks the dither offset elsewhere.
+
+7.3.1 Detailed Execution Description
+
+ Execution of a single batch buffer requires several preparatory
+ steps to make the objects visible to the graphics engine and resolve
+ relocations to account for their current addresses.
+
+ A. Mapping and Relocation
+
+ Each exec_object structure in the array is examined in turn.
+
+ If the object is not already bound to the GTT, it is assigned a
+ location in the graphics address space. If no space is available in
+ the GTT, some other object will be evicted. This may require waiting
+ for previous execbuffer requests to complete before that object can
+ be unmapped. With the location assigned, the pages for the object
+ are pinned in memory using find_or_create_page and the GTT entries
+ updated to point at the relevant pages using drm_agp_bind_pages.
+
+ Then the array of relocations is traversed. Each relocation record
+ looks up the target object and, if the presumed offset does not
+ match the current offset (remember that this buffer has already been
+ assigned an address as it must have been mapped earlier), the
+ relocation value is computed using the current offset. If the
+ object is currently in use by the graphics engine, writing the data
+ out must be preceeded by a delay while the object is still busy.
+ Once it is idle, then the page containing the relocation is mapped
+ by the CPU and the updated relocation data written out.
+
+ The read_domains and write_domain entries in each relocation are
+ used to compute the new read_domains and write_domain values for the
+ target buffers. The actual execution of the domain changes must wait
+ until all of the exec_object entries have been evaluated as the
+ complete set of domain information will not be available until then.
+
+ B. Memory Domain Resolution
+
+ After all of the new memory domain data has been pulled out of the
+ relocations and computed for each object, the list of objects is
+ again traversed and the new memory domains compared against the
+ current memory domains. There are two basic operations involved here:
+
+ * Flushing the current write domain. If the new read domains
+ are not equal to the current write domain, then the current
+ write domain must be flushed. Otherwise, reads will not see data
+ present in the write domain cache. In addition, any new read domains
+ other than the current write domain must be invalidated to ensure
+ that the flushed data are re-read into their caches.
+
+ * Invaliding new read domains. Any domains which were not currently
+ used for this object must be invalidated as old objects which
+ were mapped at the same location may have stale data in the new
+ domain caches.
+
+ If the CPU cache is being invalidated and some GPU cache is being
+ flushed, then we'll have to wait for rendering to complete so that
+ any pending GPU writes will be complete before we flush the GPU
+ cache.
+
+ If the CPU cache is being flushed, then we use 'clflush' to get data
+ written from the CPU.
+
+ Because the GPU caches cannot be partially flushed or invalidated,
+ we don't actually flush them during this traversal stage. Rather, we
+ gather the invalidate and flush bits up in the device structure.
+
+ Once all of the object domain changes have been evaluated, then the
+ gathered invalidate and flush bits are examined. For any GPU flush
+ operations, we emit a single MI_FLUSH command that performs all of
+ the necessary flushes. We then look to see if the CPU cache was
+ flushed. If so, we use the chipset flush magic (writing to a special
+ page) to get the data out of the chipset and into memory.
+
+ C. Queuing Batch Buffer to the Ring
+
+ With all of the objects resident in graphics memory space, and all
+ of the caches prepared with appropriate data, the batch buffer
+ object can be queued to the ring. If there are clip rectangles, then
+ the buffer is queued once per rectangle, with suitable clipping
+ inserted into the ring just before the batch buffer.
+
+ D. Creating an IRQ Cookie
+
+ Right after the batch buffer is placed in the ring, a request to
+ generate an IRQ is added to the ring along with a command to write a
+ marker into memory. When the IRQ fires, the driver can look at the
+ memory location to see where in the ring the GPU has passed. This
+ magic cookie value is stored in each object used in this execbuffer
+ command; it is used whereever you saw 'wait for rendering' above in
+ this document.
+
+ E. Writing back the new object offsets
+
+ So that the application has a better idea what to use for
+ 'presumed_offset' values later, the current object offsets are
+ written back to the exec_object structures.
+
+
+8. Other misc Intel-specific functions.
+
+To complete the driver, a few other functions were necessary.
+
+8.1 Initialization from the X server
+
+As the X server is currently responsible for apportioning memory between 2D
+and 3D, it must tell the kernel which region of the GTT aperture is
+available for 3D objects to be mapped into.
+
+ struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the
+ * DRM memory manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM
+ * memory manager.
+ */
+ uint64_t gtt_end;
+ };
+ /* usage */
+ init.gtt_start = <gtt_start>;
+ init.gtt_end = <gtt_end>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_INIT, &init);
+
+ The GTT aperture between gtt_start and gtt_end will be used to map
+ objects. This also tells the kernel that the ring can be used,
+ pulling the ring addresses from the device registers.
+
+8.2 Pinning objects in the GTT
+
+For scan-out buffers and the current shared depth and back buffers, we need
+to have them always available in the GTT, at least for now. Pinning means to
+lock their pages in memory along with keeping them at a fixed offset in the
+graphics aperture. These operations are available only to root.
+
+ struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+ };
+
+ /* usage */
+ pin.handle = <handle>;
+ pin.alignment = <alignment>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_PIN, &pin);
+ if (ret == 0)
+ return pin.offset;
+
+ Pinning an object ensures that it will not be evicted from the GTT
+ or moved. It will stay resident until destroyed or unpinned.
+
+ struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ /* usage */
+ unpin.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+
+ Unpinning an object makes it possible to evict this object from the
+ GTT. It doesn't ensure that it will be evicted, just that it may.
+
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 5b2d7829..c18159f8 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -54,6 +54,7 @@
#include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/dma-mapping.h>
#include <linux/mm.h>
+#include <linux/kref.h>
#include <linux/pagemap.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
#include <linux/mutex.h>
@@ -89,6 +90,10 @@
struct drm_device;
struct drm_file;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+typedef unsigned long uintptr_t;
+#endif
+
/* If you want the memory alloc debug functionality, change define below */
/* #define DEBUG_MEMORY */
@@ -108,7 +113,7 @@ struct drm_file;
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
#define DRIVER_MODESET 0x400
-
+#define DRIVER_GEM 0x800
/*@}*/
@@ -427,6 +432,11 @@ struct drm_file {
struct list_head refd_objects;
+ /** Mapping of mm object handles to object pointers. */
+ struct idr object_idr;
+ /** Lock for synchronization of access to object_idr. */
+ spinlock_t table_lock;
+
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
void *driver_priv;
@@ -469,6 +479,11 @@ struct drm_lock_data {
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
+ /**
+ * Boolean signaling that the lock is held on behalf of the
+ * file_priv client by the kernel in an ioctl handler.
+ */
+ int kernel_held;
};
/**
@@ -544,17 +559,17 @@ struct drm_sigdata {
* Generic memory manager structs
*/
-struct drm_mm_node {
+struct drm_memrange_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
- struct drm_mm *mm;
+ struct drm_memrange *mm;
void *private;
};
-struct drm_mm {
+struct drm_memrange {
struct list_head fl_entry;
struct list_head ml_entry;
};
@@ -568,9 +583,9 @@ struct drm_map_list {
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
uint64_t user_token;
- struct drm_mm_node *file_offset_node;
struct drm_master *master; /** if this map is associated with a specific
master */
+ struct drm_memrange_node *file_offset_node;
};
typedef struct drm_map drm_local_map_t;
@@ -618,6 +633,56 @@ struct drm_ati_pcigart_info {
int table_size;
};
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /** Reference count of this object */
+ struct kref refcount;
+
+ /** Handle count of this object. Each handle also holds a reference */
+ struct kref handlecount;
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage */
+ struct file *filp;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+};
+
#include "drm_objects.h"
#include "drm_crtc.h"
@@ -745,6 +810,29 @@ struct drm_driver {
/* Master routines */
int (*master_create)(struct drm_device *dev, struct drm_master *master);
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ /**
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ /**
+ * Driver-specific callback to set memory domains from userspace
+ */
+ int (*gem_set_domain) (struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+ /**
+ * Driver-specific callback to flush pwrite through chipset
+ */
+ int (*gem_flush_pwrite) (struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
@@ -827,7 +915,7 @@ struct drm_device {
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
- struct drm_mm offset_manager; /**< User token manager */
+ struct drm_memrange offset_manager; /**< User token manager */
struct drm_open_hash object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
@@ -943,6 +1031,15 @@ struct drm_device {
/* DRM mode setting */
struct drm_mode_config mode_config;
+
+ /** \name GEM information */
+ /*@{ */
+ spinlock_t object_name_lock;
+ struct idr object_name_idr;
+ atomic_t object_count;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
+ /*@} */
};
#if __OS_HAS_AGP
@@ -1069,6 +1166,10 @@ extern void drm_free_pages(unsigned long address, int order, int area);
extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
@@ -1151,6 +1252,10 @@ extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
+extern int drm_client_lock_take(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern void drm_client_lock_release(struct drm_device *dev,
+ struct drm_file *file_priv);
/*
* These are exported to drivers so that they can implement fencing using
@@ -1317,27 +1422,107 @@ extern int drm_sysfs_connector_add(struct drm_connector *connector);
extern void drm_sysfs_connector_remove(struct drm_connector *connector);
/*
- * Basic memory manager support (drm_mm.c)
+ * Basic memory manager support (drm_memrange.c)
*/
-extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
- unsigned alignment);
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
- unsigned alignment, int best_match);
-extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
-extern void drm_mm_takedown(struct drm_mm *mm);
-extern int drm_mm_clean(struct drm_mm *mm);
-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
-extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
-extern void drm_mm_print(struct drm_mm *mm, const char *name);
-
-static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
+ unsigned long size,
+ unsigned alignment);
+extern void drm_memrange_put_block(struct drm_memrange_node *cur);
+extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
+ unsigned long size,
+ unsigned alignment, int best_match);
+extern int drm_memrange_init(struct drm_memrange *mm,
+ unsigned long start, unsigned long size);
+extern void drm_memrange_takedown(struct drm_memrange *mm);
+extern int drm_memrange_clean(struct drm_memrange *mm);
+extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
+extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
+ unsigned long size);
+extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
+ unsigned long size);
+static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
{
return block->mm;
}
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int
+drm_gem_init (struct drm_device *dev);
+
+void
+drm_gem_object_free (struct kref *kref);
+
+void
+drm_gem_object_handle_free (struct kref *kref);
+
+static inline void drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ kref_get(&obj->refcount);
+}
+
+static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ kref_put (&obj->refcount, drm_gem_object_free);
+}
+
+static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
+{
+ drm_gem_object_reference (obj);
+ kref_get(&obj->handlecount);
+}
+
+static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ kref_put (&obj->handlecount, drm_gem_object_handle_free);
+ drm_gem_object_unreference (obj);
+}
+
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ int handle);
+int drm_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
+
+/*
+ * Given the new read/write domains for an object,
+ * compute the invalidate/flush domains for the whole device.
+ *
+ */
+int drm_gem_object_set_domain (struct drm_gem_object *object,
+ uint32_t read_domains,
+ uint32_t write_domains);
+
+
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 0aa94a75..d6594b87 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -484,7 +484,50 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset)
+{
+ DRM_AGP_MEM *mem;
+ int ret, i;
+
+ DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+ mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
+#else
+ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+ AGP_USER_MEMORY);
+#endif
+ if (mem == NULL) {
+ DRM_ERROR("Failed to allocate memory for %ld pages\n",
+ num_pages);
+ return NULL;
+ }
+
+ for (i = 0; i < num_pages; i++)
+ mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
+ mem->page_count = num_pages;
+
+ mem->is_flushed = TRUE;
+ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+ if (ret != 0) {
+ DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+ agp_free_memory(mem);
+ return NULL;
+ }
+ return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
/*
* AGP ttm backend interface.
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index f2d3cebf..2e0d1243 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -418,14 +418,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
if (!bo->fence) {
list_del_init(&bo->lru);
if (bo->mem.mm_node) {
- drm_mm_put_block(bo->mem.mm_node);
+ drm_memrange_put_block(bo->mem.mm_node);
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
bo->mem.mm_node = NULL;
}
list_del_init(&bo->pinned_lru);
if (bo->pinned_node) {
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
list_del_init(&bo->ddestroy);
@@ -790,7 +790,7 @@ out:
mutex_lock(&dev->struct_mutex);
if (evict_mem.mm_node) {
if (evict_mem.mm_node != bo->pinned_node)
- drm_mm_put_block(evict_mem.mm_node);
+ drm_memrange_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@@ -809,7 +809,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
struct drm_bo_mem_reg *mem,
uint32_t mem_type, int no_wait)
{
- struct drm_mm_node *node;
+ struct drm_memrange_node *node;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_buffer_object *entry;
struct drm_mem_type_manager *man = &bm->man[mem_type];
@@ -819,7 +819,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
mutex_lock(&dev->struct_mutex);
do {
- node = drm_mm_search_free(&man->manager, num_pages,
+ node = drm_memrange_search_free(&man->manager, num_pages,
mem->page_alignment, 1);
if (node)
break;
@@ -845,7 +845,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
return -ENOMEM;
}
- node = drm_mm_get_block(node, num_pages, mem->page_alignment);
+ node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
if (unlikely(!node)) {
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
@@ -923,7 +923,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
int type_found = 0;
int type_ok = 0;
int has_eagain = 0;
- struct drm_mm_node *node = NULL;
+ struct drm_memrange_node *node = NULL;
int ret;
mem->mm_node = NULL;
@@ -951,10 +951,10 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (man->has_type && man->use_type) {
type_found = 1;
- node = drm_mm_search_free(&man->manager, mem->num_pages,
+ node = drm_memrange_search_free(&man->manager, mem->num_pages,
mem->page_alignment, 1);
if (node)
- node = drm_mm_get_block(node, mem->num_pages,
+ node = drm_memrange_get_block(node, mem->num_pages,
mem->page_alignment);
}
mutex_unlock(&dev->struct_mutex);
@@ -1339,7 +1339,7 @@ out_unlock:
if (ret || !move_unfenced) {
if (mem.mm_node) {
if (mem.mm_node != bo->pinned_node)
- drm_mm_put_block(mem.mm_node);
+ drm_memrange_put_block(mem.mm_node);
mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@@ -1431,7 +1431,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
if (bo->pinned_node != bo->mem.mm_node) {
if (bo->pinned_node != NULL)
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
bo->pinned_node = bo->mem.mm_node;
}
@@ -1442,7 +1442,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (bo->pinned_node != bo->mem.mm_node)
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
list_del_init(&bo->pinned_lru);
bo->pinned_node = NULL;
@@ -2081,7 +2081,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
if (bo->pinned_node != NULL) {
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -2222,8 +2222,8 @@ int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
- if (drm_mm_clean(&man->manager)) {
- drm_mm_takedown(&man->manager);
+ if (drm_memrange_clean(&man->manager)) {
+ drm_memrange_takedown(&man->manager);
} else {
ret = -EBUSY;
}
@@ -2294,7 +2294,7 @@ int drm_bo_init_mm(struct drm_device *dev, unsigned type,
DRM_ERROR("Zero size memory manager type %d\n", type);
return ret;
}
- ret = drm_mm_init(&man->manager, p_offset, p_size);
+ ret = drm_memrange_init(&man->manager, p_offset, p_size);
if (ret)
return ret;
}
@@ -2721,7 +2721,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
list->user_token = 0;
}
if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
+ drm_memrange_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
@@ -2764,7 +2764,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
atomic_inc(&bo->usage);
map->handle = (void *)bo;
- list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+ list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
bo->mem.num_pages, 0, 0);
if (unlikely(!list->file_offset_node)) {
@@ -2772,7 +2772,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
return -ENOMEM;
}
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
bo->mem.num_pages, 0);
if (unlikely(!list->file_offset_node)) {
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index 5c290af2..9147a475 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -41,7 +41,7 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo)
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
- drm_mm_put_block(old_mem->mm_node);
+ drm_memrange_put_block(old_mem->mm_node);
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 82e5af57..df09e72b 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -175,6 +175,15 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CREATE, drm_gem_create_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_PREAD, drm_gem_pread_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_PWRITE, drm_gem_pwrite_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_MMAP, drm_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_SET_DOMAIN, drm_gem_set_domain_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -421,7 +430,7 @@ static void drm_cleanup(struct drm_device * dev)
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
- drm_mm_takedown(&dev->offset_manager);
+ drm_memrange_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
drm_put_minor(&dev->primary);
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 03881ee6..3b3a0a3c 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -262,6 +262,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_open(dev, priv);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -462,6 +465,9 @@ int drm_release(struct inode *inode, struct file *filp)
}
}
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
+
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
new file mode 100644
index 00000000..b726e598
--- /dev/null
+++ b/linux-core/drm_gem.c
@@ -0,0 +1,639 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file. However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ * default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ * handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls. The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+ spin_lock_init(&dev->object_name_lock);
+ idr_init(&dev->object_name_idr);
+ atomic_set(&dev->object_count, 0);
+ return 0;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+static struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_object *obj;
+
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, 0);
+ if (IS_ERR(obj->filp)) {
+ kfree(obj);
+ return NULL;
+ }
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+
+ /*
+ * We've just allocated pages from the kernel,
+ * so they've just been written by the CPU with
+ * zeros. They'll need to be clflushed before we
+ * use them with the GPU.
+ */
+ obj->write_domain = DRM_GEM_DOMAIN_CPU;
+ obj->read_domains = DRM_GEM_DOMAIN_CPU;
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0) {
+ fput(obj->filp);
+ kfree(obj);
+ return NULL;
+ }
+ atomic_inc(&dev->object_count);
+ return obj;
+}
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+
+ /* This is gross. The idr system doesn't let us try a delete and
+ * return an error code. It just spews if you fail at deleting.
+ * So, we have to grab a lock around finding the object and then
+ * doing the delete on it and dropping the refcount, or the user
+ * could race us to double-decrement the refcount and cause a
+ * use-after-free later. Given the frequency of our handle lookups,
+ * we may want to use ida for number allocation and a hash table
+ * for the pointers, anyway.
+ */
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return -EINVAL;
+ }
+ dev = obj->dev;
+
+ /* Release reference and decrement refcount. */
+ idr_remove(&filp->object_idr, handle);
+ spin_unlock(&filp->table_lock);
+
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+static int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep)
+{
+ int ret;
+
+ /*
+ * Get the user-visible handle using idr.
+ */
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ /* do the allocation under our spinlock */
+ spin_lock(&file_priv->table_lock);
+ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
+ spin_unlock(&file_priv->table_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0)
+ return ret;
+
+ drm_gem_object_handle_reference(obj);
+ return 0;
+}
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ int handle)
+{
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
+
+ drm_gem_object_reference(obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+drm_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_create *args = data;
+ struct drm_gem_object *obj;
+ int handle, ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_close *args = data;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ ret = drm_gem_handle_delete(file_priv, args->handle);
+
+ return ret;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+drm_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_pread *args = data;
+ struct drm_gem_object *obj;
+ ssize_t read;
+ loff_t offset;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->gem_set_domain) {
+ ret = dev->driver->gem_set_domain(obj, file_priv,
+ DRM_GEM_DOMAIN_CPU,
+ 0);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+ offset = args->offset;
+
+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+ args->size, &offset);
+ if (read != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (read < 0)
+ return read;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ loff_t offset;
+ unsigned long addr;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ offset = args->offset;
+
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ up_write(&current->mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR((void *)addr))
+ return addr;
+
+ args->addr_ptr = (uint64_t) addr;
+
+ return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ ssize_t written;
+ loff_t offset;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->gem_set_domain) {
+ ret = dev->driver->gem_set_domain(obj, file_priv,
+ DRM_GEM_DOMAIN_CPU,
+ DRM_GEM_DOMAIN_CPU);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+ offset = args->offset;
+
+ written = vfs_write(obj->filp,
+ (char __user *)(uintptr_t) args->data_ptr,
+ args->size, &offset);
+
+ if (written != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ if (dev->driver->gem_flush_pwrite)
+ dev->driver->gem_flush_pwrite(obj,
+ args->offset,
+ args->size);
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_flink *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+again:
+ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ spin_lock(&dev->object_name_lock);
+ if (obj->name) {
+ spin_unlock(&dev->object_name_lock);
+ return -EEXIST;
+ }
+ ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+ &obj->name);
+ spin_unlock(&dev->object_name_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /*
+ * Leave the reference from the lookup around as the
+ * name table now holds one
+ */
+ args->name = (uint64_t) obj->name;
+
+ return 0;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_open *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+ int handle;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ spin_lock(&dev->object_name_lock);
+ obj = idr_find(&dev->object_name_idr, (int) args->name);
+ if (obj)
+ drm_gem_object_reference(obj);
+ spin_unlock(&dev->object_name_lock);
+ if (!obj)
+ return -ENOENT;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ args->size = obj->size;
+
+ return 0;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_set_domain *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->gem_set_domain) {
+ ret = dev->driver->gem_set_domain(obj, file_priv,
+ args->read_domains,
+ args->write_domain);
+ } else {
+ obj->read_domains = args->read_domains;
+ obj->write_domain = args->write_domain;
+ ret = 0;
+ }
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+ idr_init(&file_private->object_idr);
+ spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+
+ drm_gem_object_handle_unreference(obj);
+
+ return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_private->object_idr,
+ &drm_gem_object_release_handle, NULL);
+
+ idr_destroy(&file_private->object_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_device *dev = obj->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(obj);
+
+ fput(obj->filp);
+ atomic_dec(&dev->object_count);
+ kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+ struct drm_gem_object *obj = container_of(kref,
+ struct drm_gem_object,
+ handlecount);
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ spin_lock(&dev->object_name_lock);
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ spin_unlock(&dev->object_name_lock);
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ */
+ drm_gem_object_unreference(obj);
+ } else
+ spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index e1c93054..c6024d95 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -133,6 +133,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
+ init_timer_deferrable(&dev->vblank_disable_timer);
spin_lock_init(&dev->vbl_lock);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index 6bbf1444..d2fb1feb 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -384,6 +384,64 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
+/**
+ * Takes the lock on behalf of the client if needed, using the kernel context.
+ *
+ * This allows us to hide the hardware lock when it's required for protection
+ * of data structures (such as command ringbuffer) shared with the X Server, an
+
+ * a way for us to transition to lockless for those requests when the X Server
+ * stops accessing the ringbuffer directly, without having to update the
+ * other userland clients.
+ */
+int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_master *master = file_priv->master;
+ int ret;
+ unsigned long irqflags;
+
+ /* If the client has the lock, we're already done. */
+ if (drm_i_have_hw_lock(dev, file_priv))
+ return 0;
+
+ mutex_unlock (&dev->struct_mutex);
+ /* Client doesn't hold the lock. Block taking the lock with the kernel
+ * context on behalf of the client, and return whether we were
+ * successful.
+ */
+ spin_lock_irqsave(&master->lock.spinlock, irqflags);
+ master->lock.user_waiters++;
+ spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
+ ret = wait_event_interruptible(master->lock.lock_queue,
+ drm_lock_take(&master->lock,
+ DRM_KERNEL_CONTEXT));
+ spin_lock_irqsave(&master->lock.spinlock, irqflags);
+ master->lock.user_waiters--;
+ if (ret != 0) {
+ spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
+ } else {
+ master->lock.file_priv = file_priv;
+ master->lock.lock_time = jiffies;
+ master->lock.kernel_held = 1;
+ file_priv->lock_count++;
+ spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
+ }
+ mutex_lock (&dev->struct_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_lock_take);
+
+void drm_client_lock_release(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_master *master = file_priv->master;
+
+ if (master->lock.kernel_held) {
+ master->lock.kernel_held = 0;
+ master->lock.file_priv = NULL;
+ drm_lock_free(&master->lock, DRM_KERNEL_CONTEXT);
+ }
+}
+EXPORT_SYMBOL(drm_client_lock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 75f5b521..4b494f9c 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -310,6 +310,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
+EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -322,6 +323,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
+EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/linux-core/drm_mm.c b/linux-core/drm_memrange.c
index 28726a65..5921eff8 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_memrange.c
@@ -44,26 +44,26 @@
#include "drmP.h"
#include <linux/slab.h>
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
+unsigned long drm_memrange_tail_space(struct drm_memrange *mm)
{
struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long size)
{
struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
if (!entry->free)
return -ENOMEM;
@@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
}
-static int drm_mm_create_tail_node(struct drm_mm *mm,
+static int drm_memrange_create_tail_node(struct drm_memrange *mm,
unsigned long start,
unsigned long size)
{
- struct drm_mm_node *child;
+ struct drm_memrange_node *child;
- child = (struct drm_mm_node *)
+ child = (struct drm_memrange_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@@ -98,26 +98,26 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
}
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+int drm_memrange_add_space_to_tail(struct drm_memrange *mm, unsigned long size)
{
struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+ return drm_memrange_create_tail_node(mm, entry->start + entry->size, size);
}
entry->size += size;
return 0;
}
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange_node *parent,
unsigned long size)
{
- struct drm_mm_node *child;
+ struct drm_memrange_node *child;
- child = (struct drm_mm_node *)
+ child = (struct drm_memrange_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@@ -137,19 +137,19 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
return child;
}
-struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
unsigned long size, unsigned alignment)
{
- struct drm_mm_node *align_splitoff = NULL;
- struct drm_mm_node *child;
+ struct drm_memrange_node *align_splitoff = NULL;
+ struct drm_memrange_node *child;
unsigned tmp = 0;
if (alignment)
tmp = parent->start % alignment;
if (tmp) {
- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+ align_splitoff = drm_memrange_split_at_start(parent, alignment - tmp);
if (!align_splitoff)
return NULL;
}
@@ -159,40 +159,41 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
parent->free = 0;
return parent;
} else {
- child = drm_mm_split_at_start(parent, size);
+ child = drm_memrange_split_at_start(parent, size);
}
if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ drm_memrange_put_block(align_splitoff);
return child;
}
+EXPORT_SYMBOL(drm_memrange_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
*/
-void drm_mm_put_block(struct drm_mm_node * cur)
+void drm_memrange_put_block(struct drm_memrange_node * cur)
{
- struct drm_mm *mm = cur->mm;
+ struct drm_memrange *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ struct drm_memrange_node *prev_node = NULL;
+ struct drm_memrange_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+ prev_node = list_entry(cur_head->prev, struct drm_memrange_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+ next_node = list_entry(cur_head->next, struct drm_memrange_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
@@ -215,16 +216,16 @@ void drm_mm_put_block(struct drm_mm_node * cur)
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
-EXPORT_SYMBOL(drm_mm_put_block);
+EXPORT_SYMBOL(drm_memrange_put_block);
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
+struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
+ struct drm_memrange_node *entry;
+ struct drm_memrange_node *best;
unsigned long best_size;
unsigned wasted;
@@ -232,7 +233,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
best_size = ~0UL;
list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
+ entry = list_entry(list, struct drm_memrange_node, fl_entry);
wasted = 0;
if (entry->size < size)
@@ -257,30 +258,31 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
return best;
}
+EXPORT_SYMBOL(drm_memrange_search_free);
-int drm_mm_clean(struct drm_mm * mm)
+int drm_memrange_clean(struct drm_memrange * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+int drm_memrange_init(struct drm_memrange * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
- return drm_mm_create_tail_node(mm, start, size);
+ return drm_memrange_create_tail_node(mm, start, size);
}
-EXPORT_SYMBOL(drm_mm_init);
+EXPORT_SYMBOL(drm_memrange_init);
-void drm_mm_takedown(struct drm_mm * mm)
+void drm_memrange_takedown(struct drm_memrange * mm)
{
struct list_head *bnode = mm->fl_entry.next;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+ entry = list_entry(bnode, struct drm_memrange_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
@@ -292,20 +294,4 @@ void drm_mm_takedown(struct drm_mm * mm)
list_del(&entry->ml_entry);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
-
-EXPORT_SYMBOL(drm_mm_takedown);
-
-void drm_mm_print(struct drm_mm *mm, const char *name)
-{
- struct list_head *list;
- const struct list_head *mm_stack = &mm->ml_entry;
- struct drm_mm_node *entry;
-
- DRM_DEBUG("Memory usage for '%s'\n", name ? name : "unknown");
- list_for_each(list, mm_stack) {
- entry = list_entry(list, struct drm_mm_node, ml_entry);
- DRM_DEBUG("\t0x%08lx %li %s pages\n", entry->start, entry->size,
- entry->free ? "free" : "used");
- }
-}
-EXPORT_SYMBOL(drm_mm_print);
+EXPORT_SYMBOL(drm_memrange_takedown);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 7feacd33..d0c34ca3 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -300,7 +300,12 @@ struct drm_ttm_backend_func {
void (*destroy) (struct drm_ttm_backend *backend);
};
-
+/**
+ * This structure associates a set of flags and methods with a drm_ttm
+ * object, and will also be subclassed by the particular backend.
+ *
+ * \sa #drm_agp_ttm_backend
+ */
struct drm_ttm_backend {
struct drm_device *dev;
uint32_t flags;
@@ -412,7 +417,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm);
*/
struct drm_bo_mem_reg {
- struct drm_mm_node *mm_node;
+ struct drm_memrange_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
@@ -493,7 +498,7 @@ struct drm_buffer_object {
unsigned long num_pages;
/* For pinned buffers */
- struct drm_mm_node *pinned_node;
+ struct drm_memrange_node *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
@@ -528,7 +533,7 @@ struct drm_mem_type_manager {
int has_type;
int use_type;
int kern_init_type;
- struct drm_mm manager;
+ struct drm_memrange manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 7f185209..690e081c 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -51,6 +51,10 @@ static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_objects_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
@@ -70,6 +74,8 @@ static struct drm_proc_list {
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"objects", drm_objects_info},
+ {"gem_names", drm_gem_name_info},
+ {"gem_objects", drm_gem_object_info},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
@@ -586,6 +592,79 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
return ret;
}
+struct drm_gem_name_info_data {
+ int len;
+ char *buf;
+ int eof;
+};
+
+static int drm_gem_one_name_info (int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+ struct drm_gem_name_info_data *nid = data;
+
+ DRM_INFO ("name %d size %d\n", obj->name, obj->size);
+ if (nid->eof)
+ return 0;
+
+ nid->len += sprintf (&nid->buf[nid->len],
+ "%6d%9d%8d%9d\n",
+ obj->name, obj->size,
+ atomic_read(&obj->handlecount.refcount),
+ atomic_read(&obj->refcount.refcount));
+ if (nid->len > DRM_PROC_LIMIT) {
+ nid->eof = 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_gem_name_info_data nid;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ nid.len = sprintf (buf, " name size handles refcount\n");
+ nid.buf = buf;
+ nid.eof = 0;
+ idr_for_each (&dev->object_name_idr, drm_gem_one_name_info, &nid);
+
+ *start = &buf[offset];
+ *eof = 0;
+ if (nid.len > request + offset)
+ return request;
+ *eof = 1;
+ return nid.len - offset;
+}
+
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT ("%d objects\n", atomic_read (&dev->object_count));
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index 8421a939..7c16f685 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -88,34 +88,34 @@ EXPORT_SYMBOL(drm_sman_init);
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
+ struct drm_memrange *mm = (struct drm_memrange *) private;
+ struct drm_memrange_node *tmp;
- tmp = drm_mm_search_free(mm, size, alignment, 1);
+ tmp = drm_memrange_search_free(mm, size, alignment, 1);
if (!tmp) {
return NULL;
}
- tmp = drm_mm_get_block(tmp, size, alignment);
+ tmp = drm_memrange_get_block(tmp, size, alignment);
return tmp;
}
static void drm_sman_mm_free(void *private, void *ref)
{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
+ struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
- drm_mm_put_block(node);
+ drm_memrange_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
+ struct drm_memrange *mm = (struct drm_memrange *) private;
+ drm_memrange_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
+ struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
return node->start;
}
@@ -124,7 +124,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
+ struct drm_memrange *mm;
int ret;
BUG_ON(manager >= sman->num_managers);
@@ -135,7 +135,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
return -ENOMEM;
}
sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
+ ret = drm_memrange_init(mm, start, size);
if (ret) {
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h
index 39a39fef..0299776c 100644
--- a/linux-core/drm_sman.h
+++ b/linux-core/drm_sman.h
@@ -45,7 +45,7 @@
/*
* A class that is an abstration of a simple memory allocator.
* The sman implementation provides a default such allocator
- * using the drm_mm.c implementation. But the user can replace it.
+ * using the drm_memrange.c implementation. But the user can replace it.
* See the SiS implementation, which may use the SiS FB kernel module
* for memory management.
*/
@@ -116,7 +116,7 @@ extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
- * Initialize a drm_mm.c allocator. Should be called only once for each
+ * Initialize a drm_memrange.c allocator. Should be called only once for each
* manager unless a customized allogator is used.
*/
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 45b8f386..dc853b23 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -201,15 +201,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
return -ENOMEM;
- if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
+ if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_ht_remove(&dev->map_hash);
- drm_mm_takedown(&dev->offset_manager);
+ drm_memrange_takedown(&dev->offset_manager);
return -ENOMEM;
}
@@ -249,7 +249,16 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
+ if (driver->driver_features & DRIVER_GEM) {
+ retcode = drm_gem_init (dev);
+ if (retcode) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto error_out_unreg;
+ }
+ }
+
drm_fence_manager_init(dev);
+
return 0;
error_out_unreg:
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index f755dcd4..8718bd10 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -575,7 +575,7 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
.load = i915_driver_load,
.unload = i915_driver_unload,
.firstopen = i915_driver_firstopen,
@@ -597,6 +597,10 @@ static struct drm_driver driver = {
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
.ioctls = i915_ioctls,
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+ .gem_set_domain = i915_gem_set_domain,
+ .gem_flush_pwrite = i915_gem_flush_pwrite,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
new file mode 100644
index 00000000..b2870893
--- /dev/null
+++ b/linux-core/i915_gem.c
@@ -0,0 +1,1759 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define WATCH_COHERENCY 0
+#define WATCH_BUF 0
+#define WATCH_EXEC 0
+#define WATCH_LRU 0
+#define WATCH_RELOC 0
+
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_init *args = data;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
+ (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
+ args->gtt_end - args->gtt_start);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static void
+i915_gem_object_free_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count = obj->size / PAGE_SIZE;
+ int i;
+
+ if (obj_priv->page_list == NULL)
+ return;
+
+
+ for (i = 0; i < page_count; i++)
+ if (obj_priv->page_list[i] != NULL)
+ page_cache_release(obj_priv->page_list[i]);
+
+ drm_free(obj_priv->page_list,
+ page_count * sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ obj_priv->page_list = NULL;
+}
+
+static void
+i915_gem_object_move_to_active(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj_priv->active) {
+ drm_gem_object_reference(obj);
+ obj_priv->active = 1;
+ }
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.active_list);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count != 0)
+ list_del_init(&obj_priv->list);
+ else
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ if (obj_priv->active) {
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static uint32_t
+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ uint32_t seqno;
+ RING_LOCALS;
+
+ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+ if (request == NULL)
+ return 0;
+
+ /* Grab the seqno we're going to make this request be, and bump the
+ * next (skipping 0 so it can be the reserved no-seqno value).
+ */
+ seqno = dev_priv->mm.next_gem_seqno;
+ dev_priv->mm.next_gem_seqno++;
+ if (dev_priv->mm.next_gem_seqno == 0)
+ dev_priv->mm.next_gem_seqno++;
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(GFX_OP_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+
+ DRM_DEBUG("%d\n", seqno);
+
+ request->seqno = seqno;
+ request->emitted_jiffies = jiffies;
+ request->flush_domains = flush_domains;
+ if (list_empty(&dev_priv->mm.request_list))
+ mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ);
+
+ list_add_tail(&request->list, &dev_priv->mm.request_list);
+
+ return seqno;
+}
+
+/**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+
+uint32_t
+i915_retire_commands(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ uint32_t flush_domains = 0;
+ RING_LOCALS;
+
+ /* The sampler always gets flushed on i965 (sigh) */
+ if (IS_I965G(dev))
+ flush_domains |= DRM_GEM_DOMAIN_I915_SAMPLER;
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ return flush_domains;
+}
+
+/**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+static void
+i915_gem_retire_request(struct drm_device *dev,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (request->flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ /* First clear any buffers that were only waiting for a flush
+ * matching the one just retired.
+ */
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if (obj->write_domain & request->flush_domains) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ /* If the seqno being retired doesn't match the oldest in the
+ * list, then the oldest in the list must still be newer than
+ * this seqno.
+ */
+ if (obj_priv->last_rendering_seqno != request->seqno)
+ return;
+#if WATCH_LRU
+ DRM_INFO("%s: retire %d moves to inactive list %p\n",
+ __func__, request->seqno, obj);
+#endif
+
+ if (obj->write_domain != 0) {
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.flushing_list);
+ } else {
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+}
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+static uint32_t
+i915_get_gem_seqno(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t seqno;
+
+ seqno = i915_get_gem_seqno(dev);
+
+ while (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+ uint32_t retiring_seqno;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+ retiring_seqno = request->seqno;
+
+ if (i915_seqno_passed(seqno, retiring_seqno)) {
+ i915_gem_retire_request(dev, request);
+
+ list_del(&request->list);
+ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+ } else
+ break;
+ }
+}
+
+void
+i915_gem_retire_timeout(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *) data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ schedule_work(&dev_priv->mm.retire_task);
+}
+
+void
+i915_gem_retire_handler(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+
+ dev_priv = container_of(work, struct drm_i915_private,
+ mm.retire_task);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_retire_requests(dev);
+ if (!list_empty(&dev_priv->mm.request_list))
+ mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+ i915_user_irq_on(dev);
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev),
+ seqno));
+ i915_user_irq_off(dev);
+ }
+ if (ret)
+ DRM_ERROR ("%s returns %d (awaiting %d at %d)\n",
+ __func__, ret, seqno, i915_get_gem_seqno(dev));
+
+ /* Directly dispatch request retiring. While we have the work queue
+ * to handle this, the waiter on a request often wants an associated
+ * buffer to have made it to the inactive list, and we would need
+ * a separate wait queue to handle that.
+ */
+ if (ret == 0)
+ i915_gem_retire_requests(dev);
+
+ return ret;
+}
+
+static void
+i915_gem_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t cmd;
+ RING_LOCALS;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+#endif
+
+ if (flush_domains & DRM_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+
+ if ((invalidate_domains|flush_domains) & ~DRM_GEM_DOMAIN_CPU) {
+ /*
+ * read/write caches:
+ *
+ * DRM_GEM_DOMAIN_I915_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * DRM_GEM_DOMAIN_I915_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * DRM_GEM_DOMAIN_I915_COMMAND may not exist?
+ *
+ * DRM_GEM_DOMAIN_I915_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * DRM_GEM_DOMAIN_I915_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with DRM_GEM_DOMAIN_I915_COMMAND
+ * and DRM_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * DRM_GEM_DOMAIN_I915_RENDER and DRM_GEM_DOMAIN_I915_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ DRM_GEM_DOMAIN_I915_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & DRM_GEM_DOMAIN_I915_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & DRM_GEM_DOMAIN_I915_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ }
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ /* If there are writes queued to the buffer, flush and
+ * create a new seqno to wait for.
+ */
+ if (obj->write_domain & ~(DRM_GEM_DOMAIN_CPU)) {
+ uint32_t write_domain = obj->write_domain;
+#if WATCH_BUF
+ DRM_INFO("%s: flushing object %p from write domain %08x\n",
+ __func__, obj, write_domain);
+#endif
+ i915_gem_flush(dev, 0, write_domain);
+ obj->write_domain = 0;
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = i915_add_request(dev,
+ write_domain);
+ BUG_ON(obj_priv->last_rendering_seqno == 0);
+#if WATCH_LRU
+ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
+#endif
+ }
+ /* If there is rendering queued on the buffer being evicted, wait for
+ * it.
+ */
+ if (obj_priv->active) {
+#if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+static int
+i915_gem_object_unbind(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret = 0;
+
+#if WATCH_BUF
+ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+#endif
+ if (obj_priv->gtt_space == NULL)
+ return 0;
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it. This will
+ * also ensure that all pending GPU writes are finished
+ * before we unbind.
+ */
+ ret = i915_gem_object_set_domain (obj, DRM_GEM_DOMAIN_CPU,
+ DRM_GEM_DOMAIN_CPU);
+ if (ret)
+ return ret;
+
+ if (obj_priv->agp_mem != NULL) {
+ drm_unbind_agp(obj_priv->agp_mem);
+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ obj_priv->agp_mem = NULL;
+ }
+
+ i915_gem_object_free_page_list(obj);
+
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+
+ /* Remove ourselves from the LRU list if present. */
+ if (!list_empty(&obj_priv->list)) {
+ list_del_init(&obj_priv->list);
+ if (obj_priv->active) {
+ DRM_ERROR("Failed to wait on buffer when unbinding, "
+ "continued anyway.\n");
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+ }
+ return 0;
+}
+
+#if WATCH_BUF | WATCH_EXEC
+static void
+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+ uint32_t bias, uint32_t mark)
+{
+ uint32_t *mem = kmap_atomic(page, KM_USER0);
+ int i;
+ for (i = start; i < end; i += 4)
+ DRM_INFO("%08x: %08x%s\n",
+ (int) (bias + i), mem[i / 4],
+ (bias + i == mark) ? " ********" : "");
+ kunmap_atomic(mem, KM_USER0);
+ /* give syslog time to catch up */
+ msleep(1);
+}
+
+static void
+i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+
+ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
+ int page_len, chunk, chunk_len;
+
+ page_len = len - page * PAGE_SIZE;
+ if (page_len > PAGE_SIZE)
+ page_len = PAGE_SIZE;
+
+ for (chunk = 0; chunk < page_len; chunk += 128) {
+ chunk_len = page_len - chunk;
+ if (chunk_len > 128)
+ chunk_len = 128;
+ i915_gem_dump_page(obj_priv->page_list[page],
+ chunk, chunk + chunk_len,
+ obj_priv->gtt_offset +
+ page * PAGE_SIZE,
+ mark);
+ }
+ }
+}
+#endif
+
+#if WATCH_LRU
+static void
+i915_dump_lru(struct drm_device *dev, const char *where)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+
+ DRM_INFO("active list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("flushing list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("inactive %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+}
+#endif
+
+static int
+i915_gem_evict_something(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ for (;;) {
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+ BUG_ON(obj_priv->pin_count != 0);
+ break;
+ }
+
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for one of those things to finish and hopefully
+ * leave us a buffer to evict.
+ */
+ if (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret != 0)
+ return ret;
+
+ continue;
+ }
+
+ /* If we didn't have anything on the request list but there
+ * are buffers awaiting a flush, emit one and try again.
+ * When we wait on it, those buffers waiting for that flush
+ * will get moved to inactive.
+ */
+ if (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ i915_add_request(dev, obj->write_domain);
+
+ obj = NULL;
+ continue;
+ }
+
+ /* If we didn't do any of the above, there's nothing to be done
+ * and we just can't fit it in.
+ */
+ return -ENOMEM;
+ }
+
+#if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+
+ BUG_ON(obj_priv->active);
+
+ /* Wait on the rendering and unbind the buffer. */
+ ret = i915_gem_object_unbind(obj);
+
+ return ret;
+}
+
+static int
+i915_gem_object_get_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count, i;
+ if (obj_priv->page_list)
+ return 0;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->size / PAGE_SIZE;
+ BUG_ON(obj_priv->page_list != NULL);
+ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ if (obj_priv->page_list == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < page_count; i++) {
+ obj_priv->page_list[i] =
+ find_or_create_page(obj->filp->f_mapping, i, GFP_HIGHUSER);
+
+ if (obj_priv->page_list[i] == NULL) {
+ i915_gem_object_free_page_list(obj);
+ return -ENOMEM;
+ }
+ unlock_page(obj_priv->page_list[i]);
+ }
+ return 0;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_memrange_node *free_space;
+ int page_count, ret;
+
+ if (alignment == 0)
+ alignment = PAGE_SIZE;
+ if (alignment & (PAGE_SIZE - 1)) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return -EINVAL;
+ }
+
+ search_free:
+ free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
+ obj->size,
+ alignment, 0);
+ if (free_space != NULL) {
+ obj_priv->gtt_space =
+ drm_memrange_get_block(free_space, obj->size,
+ alignment);
+ if (obj_priv->gtt_space != NULL) {
+ obj_priv->gtt_space->private = obj;
+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
+ }
+ }
+ if (obj_priv->gtt_space == NULL) {
+ /* If the gtt is empty and we're still having trouble
+ * fitting our object in, we're out of memory.
+ */
+#if WATCH_LRU
+ DRM_INFO("%s: GTT full, evicting something\n", __func__);
+#endif
+ if (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.active_list)) {
+ DRM_ERROR("GTT full, but LRU list empty\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0)
+ return ret;
+ goto search_free;
+ }
+
+#if WATCH_BUF
+ DRM_INFO("Binding object of size %d at 0x%08x\n",
+ obj->size, obj_priv->gtt_offset);
+#endif
+ ret = i915_gem_object_get_page_list(obj);
+ if (ret) {
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return ret;
+ }
+
+ page_count = obj->size / PAGE_SIZE;
+ /* Create an AGP memory structure pointing at our pages, and bind it
+ * into the GTT.
+ */
+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
+ obj_priv->page_list,
+ page_count,
+ obj_priv->gtt_offset);
+ if (obj_priv->agp_mem == NULL) {
+ i915_gem_object_free_page_list(obj);
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return -ENOMEM;
+ }
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->read_domains & ~DRM_GEM_DOMAIN_CPU);
+ BUG_ON(obj->write_domain & ~DRM_GEM_DOMAIN_CPU);
+
+ return 0;
+}
+
+static void
+i915_gem_clflush_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+ if (obj_priv->page_list == NULL)
+ return;
+
+ drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
+}
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+ int ret;
+
+#if WATCH_BUF
+ DRM_INFO("%s: object %p read %08x write %08x\n",
+ __func__, obj, read_domains, write_domain);
+#endif
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (write_domain == 0)
+ read_domains |= obj->read_domains;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->write_domain && obj->write_domain != read_domains) {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |= read_domains & ~obj->write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= read_domains & ~obj->read_domains;
+ if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU) {
+#if WATCH_BUF
+ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+ __func__, flush_domains, invalidate_domains);
+#endif
+ /*
+ * If we're invaliding the CPU cache and flushing a GPU cache,
+ * then pause for rendering so that the GPU caches will be
+ * flushed before the cpu cache is invalidated
+ */
+ if ((invalidate_domains & DRM_GEM_DOMAIN_CPU) &&
+ (flush_domains & ~DRM_GEM_DOMAIN_CPU)) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
+ i915_gem_clflush_object(obj);
+ }
+
+ if ((write_domain | flush_domains) != 0)
+ obj->write_domain = write_domain;
+ obj->read_domains = read_domains;
+ dev->invalidate_domains |= invalidate_domains;
+ dev->flush_domains |= flush_domains;
+ return 0;
+}
+
+/**
+ * Once all of the objects have been set in the proper domain,
+ * perform the necessary flush and invalidate operations.
+ *
+ * Returns the write domains flushed, for use in flush tracking.
+ */
+static uint32_t
+i915_gem_dev_set_domain(struct drm_device *dev)
+{
+ uint32_t flush_domains = dev->flush_domains;
+
+ /*
+ * Now that all the buffers are synced to the proper domains,
+ * flush and invalidate the collected domains
+ */
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+ }
+
+ return flush_domains;
+}
+
+#if WATCH_COHERENCY
+static void
+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+ uint32_t *gtt_mapping;
+ uint32_t *backing_map = NULL;
+ int bad_count = 0;
+
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ __FUNCTION__, obj, obj_priv->gtt_offset, handle,
+ obj->size / 1024);
+
+ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
+ obj->size);
+ if (gtt_mapping == NULL) {
+ DRM_ERROR("failed to map GTT space\n");
+ return;
+ }
+
+ for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+ int i;
+
+ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+
+ if (backing_map == NULL) {
+ DRM_ERROR("failed to map backing page\n");
+ goto out;
+ }
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ uint32_t cpuval = backing_map[i];
+ uint32_t gttval = readl(gtt_mapping +
+ page * 1024 + i);
+
+ if (cpuval != gttval) {
+ DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+ "0x%08x vs 0x%08x\n",
+ (int)(obj_priv->gtt_offset +
+ page * PAGE_SIZE + i * 4),
+ cpuval, gttval);
+ if (bad_count++ >= 8) {
+ DRM_INFO("...\n");
+ goto out;
+ }
+ }
+ }
+ kunmap_atomic(backing_map, KM_USER0);
+ backing_map = NULL;
+ }
+
+ out:
+ if (backing_map != NULL)
+ kunmap_atomic(backing_map, KM_USER0);
+ iounmap(gtt_mapping);
+
+ /* give syslog time to catch up */
+ msleep(1);
+
+ /* Directly flush the object, since we just loaded values with the CPU
+ * from thebacking pages and we don't want to disturb the cache
+ * management that we're trying to observe.
+ */
+
+ i915_gem_clflush_object(obj);
+}
+#endif
+
+/**
+ * Bind an object to the GTT and evaluate the relocations landing in it
+ *
+ *
+ */
+static int
+i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object *entry)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_relocation_entry __user *relocs;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i;
+ uint32_t last_reloc_offset = -1;
+ void *reloc_page = NULL;
+
+ /* Choose the GTT offset for our buffer and put it there. */
+ if (obj_priv->gtt_space == NULL) {
+ i915_gem_object_bind_to_gtt(obj, (unsigned) entry->alignment);
+ if (obj_priv->gtt_space == NULL)
+ return -ENOMEM;
+ }
+
+ entry->offset = obj_priv->gtt_offset;
+
+ relocs = (struct drm_i915_gem_relocation_entry __user *)
+ (uintptr_t) entry->relocs_ptr;
+ /* Apply the relocations, using the GTT aperture to avoid cache
+ * flushing requirements.
+ */
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_obj_priv;
+ uint32_t reloc_val, reloc_offset, *reloc_entry;
+ int ret;
+
+ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+ if (ret != 0)
+ return ret;
+
+ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+ reloc.target_handle);
+ if (target_obj == NULL)
+ return -EINVAL;
+ target_obj_priv = target_obj->driver_private;
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_obj_priv->gtt_space == NULL) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc.target_handle);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+
+ if (reloc.offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+ if (reloc.offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+
+ if (reloc.write_domain && target_obj->pending_write_domain &&
+ reloc.write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.write_domain,
+ target_obj->pending_write_domain);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc.offset,
+ (int) reloc.target_handle,
+ (int) reloc.read_domains,
+ (int) reloc.write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc.presumed_offset,
+ reloc.delta);
+#endif
+
+ target_obj->pending_read_domains |= reloc.read_domains;
+ target_obj->pending_write_domain |= reloc.write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+ drm_gem_object_unreference(target_obj);
+ continue;
+ }
+
+ /* Now that we're going to actually write some data in,
+ * make sure that any rendering using this buffer's contents
+ * is completed.
+ */
+ i915_gem_object_wait_rendering(obj);
+
+ /* As we're writing through the gtt, flush
+ * any CPU writes before we write the relocations
+ */
+ if (obj->write_domain & DRM_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+ reloc_offset = obj_priv->gtt_offset + reloc.offset;
+ if (reloc_page == NULL ||
+ (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
+ (reloc_offset & ~(PAGE_SIZE - 1))) {
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+ reloc_page = ioremap(dev->agp->base +
+ (reloc_offset & ~(PAGE_SIZE - 1)),
+ PAGE_SIZE);
+ last_reloc_offset = reloc_offset;
+ if (reloc_page == NULL) {
+ drm_gem_object_unreference(target_obj);
+ return -ENOMEM;
+ }
+ }
+
+ reloc_entry = (uint32_t *)((char *)reloc_page +
+ (reloc_offset & (PAGE_SIZE - 1)));
+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+#if WATCH_BUF
+ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+ obj, (unsigned int) reloc.offset,
+ readl(reloc_entry), reloc_val);
+#endif
+ writel(reloc_val, reloc_entry);
+
+ /* Write the updated presumed offset for this entry back out
+ * to the user.
+ */
+ reloc.presumed_offset = target_obj_priv->gtt_offset;
+ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ return ret;
+ }
+
+ drm_gem_object_unreference(target_obj);
+ }
+
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+#if WATCH_BUF
+ if (0)
+ i915_gem_dump_object(obj, 128, __func__, ~0);
+#endif
+ return 0;
+}
+
+/** Dispatch a batchbuffer to the ring
+ */
+static int
+i915_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer *exec,
+ uint64_t exec_offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+ (uintptr_t) exec->cliprects_ptr;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint32_t exec_start, exec_len;
+ RING_LOCALS;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ if ((exec_start | exec_len) & 0x7) {
+ DRM_ERROR("alignment\n");
+ return -EINVAL;
+ }
+
+ if (!exec_start)
+ return -EINVAL;
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, boxes, i,
+ exec->DR1, exec->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ OUT_RING(exec_start + exec_len - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(2);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ OUT_RING(exec_start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6));
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ }
+ ADVANCE_LP_RING();
+ }
+ }
+
+ /* XXX breadcrumb */
+ return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ while (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ /* Break out if we're close enough. */
+ if ((long) (jiffies - request->emitted_jiffies) <= (20 * HZ) / 1000) {
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ /* Wait on the last request if not. */
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_gem_object **object_list = NULL;
+ struct drm_gem_object *batch_obj;
+ int ret, i;
+ uint64_t exec_offset;
+ uint32_t seqno, flush_domains;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+ i915_kernel_lost_context(dev);
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ if (exec_list == NULL || object_list == NULL) {
+ DRM_ERROR("Failed to allocate exec or object list "
+ "for %d buffers\n",
+ args->buffer_count);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ goto pre_mutex_err;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Zero the gloabl flush/invalidate flags. These
+ * will be modified as each object is bound to the
+ * gtt
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
+ /* Look up object handles and perform the relocations */
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ exec_list[i].handle);
+ if (object_list[i] == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec_list[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_bind_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret) {
+ DRM_ERROR("object bind and relocate failed %d\n", ret);
+ goto err;
+ }
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = object_list[args->buffer_count-1];
+ batch_obj->pending_read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
+ batch_obj->pending_write_domain = 0;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->gtt_space == NULL) {
+ /* We evicted the buffer in the process of validating
+ * our set of buffers in. We could try to recover by
+ * kicking them everything out and trying again from
+ * the start.
+ */
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* make sure all previous memory operations have passed */
+ ret = i915_gem_object_set_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
+ if (ret)
+ goto err;
+ }
+
+ /* Flush/invalidate caches and chipset buffer */
+ flush_domains = i915_gem_dev_set_domain(dev);
+
+#if WATCH_COHERENCY
+ for (i = 0; i < args->buffer_count; i++) {
+ i915_gem_object_check_coherency(object_list[i],
+ exec_list[i].handle);
+ }
+#endif
+
+ exec_offset = exec_list[args->buffer_count - 1].offset;
+
+#if WATCH_EXEC
+ i915_gem_dump_object(object_list[args->buffer_count - 1],
+ args->batch_len,
+ __func__,
+ ~0);
+#endif
+
+ /* Exec the batchbuffer */
+ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+ if (ret) {
+ DRM_ERROR("dispatch failed %d\n", ret);
+ goto err;
+ }
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires
+ */
+ flush_domains |= i915_retire_commands(dev);
+
+ /*
+ * Get a seqno representing the execution of the current buffer,
+ * which we can wait on. We would like to mitigate these interrupts,
+ * likely by only creating seqnos occasionally (so that we have
+ * *some* interrupts representing completion of buffers that we can
+ * wait on when trying to clear up gtt space).
+ */
+ seqno = i915_add_request(dev, flush_domains);
+ BUG_ON(seqno == 0);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = seqno;
+#if WATCH_LRU
+ DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+#endif
+ }
+#if WATCH_LRU
+ i915_dump_lru(dev, __func__);
+#endif
+
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret)
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+err:
+ if (object_list != NULL) {
+ for (i = 0; i < args->buffer_count; i++)
+ drm_gem_object_unreference(object_list[i]);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ drm_free(object_list, sizeof(*object_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+
+ return ret;
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_kernel_lost_context(dev);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ obj_priv = obj->driver_private;
+ if (obj_priv->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj,
+ (unsigned) args->alignment);
+ if (ret != 0) {
+ DRM_ERROR("Failure to bind in "
+ "i915_gem_pin_ioctl(): %d\n",
+ ret);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+ obj_priv->pin_count++;
+ args->offset = obj_priv->gtt_offset;
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_kernel_lost_context(dev);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ obj_priv = obj->driver_private;
+ obj_priv->pin_count--;
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_busy *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ obj_priv = obj->driver_private;
+ args->busy = obj_priv->active;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return i915_gem_ring_throttle(dev);
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+ if (obj_priv == NULL)
+ return -ENOMEM;
+
+ obj->driver_private = obj_priv;
+ obj_priv->obj = obj;
+ INIT_LIST_HEAD(&obj_priv->list);
+ return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+ i915_kernel_lost_context(obj->dev);
+ i915_gem_object_unbind(obj);
+
+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+}
+
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ drm_client_lock_take(dev, file_priv);
+ i915_kernel_lost_context(dev);
+ ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
+ if (ret) {
+ drm_client_lock_release(dev, file_priv);
+ return ret;
+ }
+ i915_gem_dev_set_domain(obj->dev);
+ drm_client_lock_release(dev, file_priv);
+ return 0;
+}
+
+int
+i915_gem_flush_pwrite(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size)
+{
+#if 0
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /*
+ * For writes much less than the size of the object and
+ * which are already pinned in memory, do the flush right now
+ */
+
+ if ((size < obj->size >> 1) && obj_priv->page_list != NULL) {
+ unsigned long first_page = offset / PAGE_SIZE;
+ unsigned long beyond_page = roundup(offset + size, PAGE_SIZE) / PAGE_SIZE;
+
+ drm_ttm_cache_flush(obj_priv->page_list + first_page,
+ beyond_page - first_page);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+#endif
+ return 0;
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Assume that the chip has been idled at this point. Just pull them
+ * off the execution list and unref them. Since this is the last
+ * close, this is also the last ref and they'll go away.
+ */
+
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+
+ list_del_init(&obj_priv->list);
+ obj_priv->active = 0;
+ obj_priv->obj->write_domain = 0;
+ drm_gem_object_unreference(obj_priv->obj);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c
index ab3b23a4..86347e03 100644
--- a/linux-core/nouveau_bo.c
+++ b/linux-core/nouveau_bo.c
@@ -229,7 +229,7 @@ out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
- drm_mm_put_block(tmp_mem.mm_node);
+ drm_memrange_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c
index cc4d5a92..81704ea1 100644
--- a/linux-core/nouveau_sgdma.c
+++ b/linux-core/nouveau_sgdma.c
@@ -280,7 +280,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_ttm_backend *be;
struct drm_scatter_gather sgreq;
- struct drm_mm_node mm_node;
+ struct drm_memrange_node mm_node;
struct drm_bo_mem_reg mem;
int ret;
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 40653b4b..59cbbdd9 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -993,6 +993,93 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
+struct drm_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+ /** Returned global name */
+ uint32_t name;
+};
+
+struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+ /** Returned handle for the object */
+ uint32_t handle;
+ /** Returned size of the object */
+ uint64_t size;
+};
+
+struct drm_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+ /** New read domains */
+ uint32_t read_domains;
+ /** New write domain */
+ uint32_t write_domain;
+};
+#define DRM_GEM_DOMAIN_CPU 0x00000001
/*
* Drm mode setting
@@ -1209,7 +1296,7 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
-#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -1261,6 +1348,15 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+#define DRM_IOCTL_GEM_CREATE DRM_IOWR(0x09, struct drm_gem_create)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x0a, struct drm_gem_close)
+#define DRM_IOCTL_GEM_PREAD DRM_IOW (0x0b, struct drm_gem_pread)
+#define DRM_IOCTL_GEM_PWRITE DRM_IOW (0x0c, struct drm_gem_pwrite)
+#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0e, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0f, struct drm_gem_open)
+#define DRM_IOCTL_GEM_SET_DOMAIN DRM_IOW (0xb7, struct drm_gem_set_domain)
+
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index db857fbd..86881ab8 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -41,10 +41,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
+ u32 last_acthd = I915_READ(acthd_reg);
+ u32 acthd;
int i;
for (i = 0; i < 10000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
@@ -54,13 +58,41 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->head != last_head)
i = 0;
+ if (acthd != last_acthd)
+ i = 0;
+
last_head = ring->head;
- DRM_UDELAY(1);
+ last_acthd = acthd;
+ msleep_interruptible (10);
}
return -EBUSY;
}
+#if I915_RING_VALIDATE
+/**
+ * Validate the cached ring tail value
+ *
+ * If the X server writes to the ring and DRM doesn't
+ * reload the head and tail pointers, it will end up writing
+ * data to the wrong place in the ring, causing havoc.
+ */
+void i915_ring_validate(struct drm_device *dev, const char *func, int line)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ u32 tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
+ u32 head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
+
+ if (tail != ring->tail) {
+ DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
+ func, line,
+ ring->head, head, ring->tail, tail);
+ BUG_ON(1);
+ }
+}
+#endif
+
void i915_kernel_lost_context(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -113,7 +145,6 @@ int i915_dma_cleanup(struct drm_device * dev)
I915_WRITE(0x02080, 0x1ffff000);
}
-
return 0;
}
@@ -195,7 +226,6 @@ static int i915_initialize(struct drm_device * dev,
}
}
-
#ifdef I915_HAVE_BUFFER
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
@@ -264,8 +294,7 @@ static int i915_initialize(struct drm_device * dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_init(&dev_priv->cmdbuf_mutex);
}
-#endif
-#if defined(I915_HAVE_BUFFER)
+
if (init->func == I915_INIT_DMA2) {
int ret = setup_dri2_sarea(dev, file_priv, init);
if (ret) {
@@ -288,11 +317,6 @@ static int i915_dma_resume(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (!dev_priv->mmio_map) {
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
-
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -456,9 +480,9 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
return 0;
}
-static int i915_emit_box(struct drm_device * dev,
- struct drm_clip_rect __user * boxes,
- int i, int DR1, int DR4)
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_clip_rect box;
@@ -514,7 +538,7 @@ void i915_emit_breadcrumb(struct drm_device *dev)
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(20);
+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
@@ -713,9 +737,19 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
int i915_quiescent(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ if (ret)
+ {
+ i915_kernel_lost_context (dev);
+ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
+ dev_priv->ring.head,
+ dev_priv->ring.tail,
+ dev_priv->ring.space);
+ }
+ return ret;
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -1051,6 +1085,12 @@ struct drm_ioctl_desc i915_ioctls[] = {
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index bdcac9aa..bb551d84 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -176,6 +176,12 @@ typedef struct drm_i915_sarea {
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_EXECBUFFER 0x12
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -195,6 +201,12 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
/* Asynchronous page flipping:
*/
@@ -399,4 +411,146 @@ struct drm_i915_execbuffer {
struct drm_fence_arg fence_arg;
};
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_end;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ uint32_t delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+};
+
+/**
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+
+/* 0x00000001 is DRM_GEM_DOMAIN_CPU */
+#define DRM_GEM_DOMAIN_I915_RENDER 0x00000002 /* Render cache, used by 2D and 3D drawing */
+#define DRM_GEM_DOMAIN_I915_SAMPLER 0x00000004 /* Sampler cache, used by texture engine */
+#define DRM_GEM_DOMAIN_I915_COMMAND 0x00000008 /* Command queue, used to load batch buffers */
+#define DRM_GEM_DOMAIN_I915_INSTRUCTION 0x00000010 /* Instruction cache, used by shader programs */
+#define DRM_GEM_DOMAIN_I915_VERTEX 0x00000020 /* Vertex address cache */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** List of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+ uint64_t relocs_ptr; /* struct drm_i915_gem_relocation_entry *relocs */
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ uint64_t buffers_ptr; /* struct drm_i915_gem_validate_entry *buffers */
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+};
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ uint32_t handle;
+
+ /** Return busy status (1 if busy, 0 if idle) */
+ uint32_t busy;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 6d72c051..eab51e39 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -114,6 +114,8 @@ struct drm_i915_master_private {
};
struct drm_i915_private {
+ struct drm_device *dev;
+
struct drm_buffer_object *ring_buffer;
drm_local_map_t *mmio_map;
@@ -145,7 +147,7 @@ struct drm_i915_private {
DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
- uint32_t irq_enable_reg;
+ uint32_t irq_mask_reg;
int irq_enabled;
struct workqueue_struct *wq;
@@ -187,6 +189,57 @@ struct drm_i915_private {
int int_crt_support:1;
#endif
+ struct {
+ struct drm_memrange gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct timer_list retire_timer;
+ struct work_struct retire_task;
+
+ uint32_t next_gem_seqno;
+ } mm;
+
+ struct work_struct user_interrupt_task;
+
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
@@ -284,6 +337,68 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ struct drm_gem_object *obj;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_memrange_node *gtt_space;
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head list;
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ int active;
+
+ /** AGP memory structure for our GTT binding. */
+ DRM_AGP_MEM *agp_mem;
+
+ struct page **page_list;
+
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ /** Boolean whether this object has a valid gtt offset. */
+ int gtt_bound;
+
+ /** How many users have pinned this object in GTT space */
+ int pin_count;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** Cache domains that were flushed at the start of the request. */
+ uint32_t flush_domains;
+
+ struct list_head list;
+};
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
@@ -309,6 +424,10 @@ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch);
extern int i915_quiescent(struct drm_device *dev);
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4);
+
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -325,6 +444,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_emit_irq(struct drm_device * dev);
extern void i915_enable_interrupt (struct drm_device *dev);
+extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
@@ -332,6 +452,7 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_user_irq_on(struct drm_device *dev);
extern void i915_user_irq_off(struct drm_device *dev);
+extern void i915_user_interrupt_handler(struct work_struct *work);
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -368,7 +489,31 @@ void i915_flush_ttm(struct drm_ttm *ttm);
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int i915_gem_flush_pwrite(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size);
+void i915_gem_lastclose(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_timeout(unsigned long data);
+void i915_gem_retire_handler(struct work_struct *work);
#endif
extern unsigned int i915_fbpercrtc;
@@ -392,16 +537,25 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
+#define I915_RING_VALIDATE 0
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", \
(n)); \
+ I915_RING_DO_VALIDATE(dev); \
if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
@@ -420,6 +574,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define ADVANCE_LP_RING() do { \
if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ I915_RING_DO_VALIDATE(dev); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I915_WRITE(PRB0_TAIL, outring); \
@@ -532,17 +687,40 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) /* used to have 1<<22? */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 4: ring 0 head pointer
+ * 5: ring 1 head pointer (915-class)
+ * 6: ring 2 head pointer (915-class)
+ *
+ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define I915_GEM_HWS_INDEX 0x10
+
/*
* 3D instructions used by the kernel
*/
#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -603,6 +781,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#define I965REG_ACTHD 0x02074
#define HWS_PGA 0x02080
#define IPEIR 0x02088
#define NOPID 0x02094
@@ -632,6 +811,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define EMR 0x020b4
#define ESR 0x020b8
#define INSTPM 0x020c0
+#define I915REG_ACTHD 0x020C8
#define FW_BLC 0x020d8
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define MI_ARB_STATE 0x020e4 /* 915+ only */
diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c
index f2c07fc6..e13d12fc 100644
--- a/shared-core/i915_init.c
+++ b/shared-core/i915_init.c
@@ -228,9 +228,6 @@ int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_init(dev);
drm_helper_initial_config(dev, false);
- drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM");
- drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT");
-
dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
@@ -293,7 +290,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
memset(dev_priv, 0, sizeof(struct drm_i915_private));
dev->dev_private = (void *)dev_priv;
-// dev_priv->flags = flags;
+ dev_priv->dev = dev;
/* i915 has 4 more counters */
dev->counters += 4;
@@ -341,6 +338,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ dev_priv->mm.retire_timer.function = i915_gem_retire_timeout;
+ dev_priv->mm.retire_timer.data = (unsigned long) dev;
+ init_timer_deferrable (&dev_priv->mm.retire_timer);
+ INIT_WORK(&dev_priv->mm.retire_task,
+ i915_gem_retire_handler);
+ INIT_WORK(&dev_priv->user_interrupt_task,
+ i915_user_interrupt_handler);
+ dev_priv->mm.next_gem_seqno = 1;
+
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
@@ -500,7 +510,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
@@ -511,8 +521,33 @@ void i915_driver_lastclose(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+#ifdef I915_HAVE_BUFFER
+ if (dev_priv->val_bufs) {
+ vfree(dev_priv->val_bufs);
+ dev_priv->val_bufs = NULL;
+ }
+#endif
+
+ i915_gem_lastclose(dev);
+
if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
+
+#if defined(I915_HAVE_BUFFER)
+ if (dev_priv->sarea_kmap.virtual) {
+ drm_bo_kunmap(&dev_priv->sarea_kmap);
+ dev_priv->sarea_kmap.virtual = NULL;
+ dev->control->master->lock.hw_lock = NULL;
+ dev->sigdata.lock = NULL;
+ }
+
+ if (dev_priv->sarea_bo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->sarea_bo = NULL;
+ }
+#endif
i915_dma_cleanup(dev);
}
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 2d355688..d36a3691 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -443,9 +443,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
static struct drm_device *hotplug_dev;
-/*
- * This code is called in a more safe envirmoent to handle the hotplugs.
- * Add code here for hotplug love to userspace.
+/**
+ * Handler for user interrupts in process context (able to sleep, do VFS
+ * operations, etc.
+ *
+ * If another IRQ comes in while we're in this handler, it will still get put
+ * on the queue again to be rerun when we finish.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
static void i915_hotplug_work_func(void *work)
@@ -485,12 +488,26 @@ static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
if (stat & SDVOC_HOTPLUG_INT_STATUS) {
DRM_DEBUG("sDVOC event\n");
}
-
queue_work(dev_priv->wq, &hotplug);
return 0;
}
+void
+i915_user_interrupt_handler(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+
+ dev_priv = container_of(work, struct drm_i915_private,
+ user_interrupt_task);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -507,7 +524,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
else
iir = I915_READ16(IIR);
- iir &= (dev_priv->irq_enable_reg | I915_USER_INTERRUPT);
+ iir &= (dev_priv->irq_mask_reg | I915_USER_INTERRUPT);
#if 0
DRM_DEBUG("flag=%08x\n", iir);
@@ -581,6 +598,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
+ schedule_work(&dev_priv->user_interrupt_task);
#endif
}
@@ -636,11 +654,12 @@ void i915_user_irq_on(struct drm_device *dev)
DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ I915_READ16(IMR);
}
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
@@ -651,18 +670,20 @@ void i915_user_irq_off(struct drm_device *dev)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ BUG_ON(dev_priv->user_irq_refcount <= 0);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- // dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
- // if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- // I915_WRITE(IER, dev_priv->irq_enable_reg);
- // else
- // I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ dev_priv->irq_mask_reg |= I915_USER_INTERRUPT;
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ else
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ I915_READ16(IMR);
}
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_i915_master_private *master_priv;
@@ -739,16 +760,17 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
@@ -775,11 +797,13 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
I915_WRITE(pipestat_reg, pipestat);
}
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ dev_priv->irq_mask_reg &= ~mask_reg;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
-
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
return 0;
}
@@ -789,16 +813,17 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
@@ -806,13 +831,15 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
break;
}
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ dev_priv->irq_mask_reg |= mask_reg;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
- if (pipestat_reg)
- {
+ if (pipestat_reg) {
pipestat = I915_READ (pipestat_reg);
pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -822,6 +849,7 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg);
}
}
@@ -830,14 +858,14 @@ void i915_enable_interrupt (struct drm_device *dev)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_connector *o;
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
if (dev->mode_config.num_connector)
- dev_priv->irq_enable_reg |= I915_DISPLAY_PORT_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
} else {
if (dev->mode_config.num_connector)
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
/* Enable global interrupts for hotplug - not a pipeA event */
I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) |
@@ -847,7 +875,8 @@ void i915_enable_interrupt (struct drm_device *dev)
PIPE_HOTPLUG_INTERRUPT_STATUS);
}
- if (dev_priv->irq_enable_reg & (I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
+ if (!(dev_priv->irq_mask_reg & I915_DISPLAY_PORT_INTERRUPT) ||
+ !(dev_priv->irq_mask_reg & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
u32 temp = 0;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
@@ -891,10 +920,13 @@ void i915_enable_interrupt (struct drm_device *dev)
}
}
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
- else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IER, ~dev_priv->irq_mask_reg);
+ } else {
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE16(IER, ~(u16)dev_priv->irq_mask_reg);
+ }
dev_priv->irq_enabled = 1;
}
@@ -1134,7 +1166,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
tmp = I915_READ16(IIR);
I915_WRITE16(IIR, tmp);
}
-
}
int i915_driver_irq_postinstall(struct drm_device * dev)
@@ -1148,7 +1179,7 @@ int i915_driver_irq_postinstall(struct drm_device * dev)
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
- dev_priv->irq_enable_reg = 0;
+ dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)
@@ -1179,7 +1210,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->irq_enabled = 0;
+ dev_priv->irq_enabled = 1;
temp = I915_READ(PIPEASTAT);
I915_WRITE(PIPEASTAT, temp);
diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c
index 2e680306..819a61ae 100644
--- a/shared-core/radeon_cp.c
+++ b/shared-core/radeon_cp.c
@@ -112,6 +112,27 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
}
+static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
+{
+ u32 agp_base_hi = upper_32_bits(agp_base);
+ u32 agp_base_lo = agp_base & 0xffffffff;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
+ R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
+ R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
+ RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
+ RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
+ R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
+ R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
+ } else {
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_base_lo);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
+ RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
+ }
+}
+
static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -542,9 +563,8 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
- RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
- RADEON_WRITE(RADEON_AGP_BASE_2, 0);
+ radeon_write_agp_base(dev_priv, dev->agp->base);
+
radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h
index 1b32b2f4..e263c610 100644
--- a/shared-core/radeon_drv.h
+++ b/shared-core/radeon_drv.h
@@ -524,9 +524,13 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
#define RV515_MC_FB_LOCATION 0x01
#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_BASE 0x03
+#define RV515_MC_AGP_BASE_2 0x04
#define R520_MC_FB_LOCATION 0x04
#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_BASE 0x06
+#define R520_MC_AGP_BASE_2 0x07
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
diff --git a/tests/Makefile.am b/tests/Makefile.am
index dce1754e..718cc436 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -22,7 +22,10 @@ TESTS = auth \
getstats \
lock \
setversion \
- updatedraw
+ updatedraw \
+ gem_basic \
+ gem_readwrite \
+ gem_mmap
EXTRA_PROGRAMS = $(TESTS)
CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES)
diff --git a/tests/drmtest.c b/tests/drmtest.c
index cae99a0c..5453b105 100644
--- a/tests/drmtest.c
+++ b/tests/drmtest.c
@@ -26,6 +26,7 @@
*/
#include <fcntl.h>
+#include <sys/stat.h>
#include "drmtest.h"
/** Open the first DRM device we can find, searching up to 16 device nodes */
@@ -80,4 +81,3 @@ int drm_open_any_master(void)
fprintf(stderr, "Couldn't find an un-controlled DRM device\n");
abort();
}
-
diff --git a/tests/gem_basic.c b/tests/gem_basic.c
new file mode 100644
index 00000000..8b8b63d0
--- /dev/null
+++ b/tests/gem_basic.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+
+static void
+test_bad_close(int fd)
+{
+ struct drm_gem_close close;
+ int ret;
+
+ printf("Testing error return on bad close ioctl.\n");
+
+ close.handle = 0x10101010;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+ assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+test_create_close(int fd)
+{
+ struct drm_gem_create create;
+ struct drm_gem_close close;
+ int ret;
+
+ printf("Testing creating and closing an object.\n");
+
+ memset(&create, 0, sizeof(create));
+ create.size = 16 * 1024;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ assert(ret == 0);
+
+ close.handle = create.handle;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+}
+
+static void
+test_create_fd_close(int fd)
+{
+ struct drm_gem_create create;
+ int ret;
+
+ printf("Testing closing with an object allocated.\n");
+
+ memset(&create, 0, sizeof(create));
+ create.size = 16 * 1024;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ assert(ret == 0);
+
+ close(fd);
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+
+ fd = drm_open_any();
+
+ test_bad_close(fd);
+ test_create_close(fd);
+ test_create_fd_close(fd);
+
+ return 0;
+}
diff --git a/tests/gem_mmap.c b/tests/gem_mmap.c
new file mode 100644
index 00000000..3f8e27a0
--- /dev/null
+++ b/tests/gem_mmap.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+
+#define OBJECT_SIZE 16384
+
+int do_read(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_gem_pread read;
+
+ /* Ensure that we don't have any convenient data in buf in case
+ * we fail.
+ */
+ memset(buf, 0xd0, size);
+
+ memset(&read, 0, sizeof(read));
+ read.handle = handle;
+ read.data_ptr = (uintptr_t)buf;
+ read.size = size;
+ read.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
+}
+
+int do_write(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_gem_pwrite write;
+
+ memset(&write, 0, sizeof(write));
+ write.handle = handle;
+ write.data_ptr = (uintptr_t)buf;
+ write.size = size;
+ write.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+ struct drm_gem_create create;
+ struct drm_gem_mmap mmap;
+ struct drm_gem_close unref;
+ uint8_t expected[OBJECT_SIZE];
+ uint8_t buf[OBJECT_SIZE];
+ uint8_t *addr;
+ int ret;
+ int handle;
+
+ fd = drm_open_any();
+
+ memset(&mmap, 0, sizeof(mmap));
+ mmap.handle = 0x10101010;
+ mmap.offset = 0;
+ mmap.size = 4096;
+ printf("Testing mmaping of bad object.\n");
+ ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ assert(ret == -1 && errno == EINVAL);
+
+ memset(&create, 0, sizeof(create));
+ create.size = OBJECT_SIZE;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ assert(ret == 0);
+ handle = create.handle;
+
+ printf("Testing mmaping of newly created object.\n");
+ mmap.handle = handle;
+ mmap.offset = 0;
+ mmap.size = OBJECT_SIZE;
+ ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ assert(ret == 0);
+ addr = (uint8_t *)(uintptr_t)mmap.addr_ptr;
+
+ printf("Testing contents of newly created object.\n");
+ memset(expected, 0, sizeof(expected));
+ assert(memcmp(addr, expected, sizeof(expected)) == 0);
+
+ printf("Testing coherency of writes and mmap reads.\n");
+ memset(buf, 0, sizeof(buf));
+ memset(buf + 1024, 0x01, 1024);
+ memset(expected + 1024, 0x01, 1024);
+ ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ assert(memcmp(buf, addr, sizeof(buf)) == 0);
+
+ printf("Testing that mapping stays after close\n");
+ unref.handle = handle;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &unref);
+ assert(ret == 0);
+ assert(memcmp(buf, addr, sizeof(buf)) == 0);
+
+ printf("Testing unmapping\n");
+ munmap(addr, OBJECT_SIZE);
+
+ close(fd);
+
+ return 0;
+}
diff --git a/tests/gem_readwrite.c b/tests/gem_readwrite.c
new file mode 100644
index 00000000..a48f9847
--- /dev/null
+++ b/tests/gem_readwrite.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+
+#define OBJECT_SIZE 16384
+
+int do_read(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_gem_pread read;
+
+ /* Ensure that we don't have any convenient data in buf in case
+ * we fail.
+ */
+ memset(buf, 0xd0, size);
+
+ memset(&read, 0, sizeof(read));
+ read.handle = handle;
+ read.data_ptr = (uintptr_t)buf;
+ read.size = size;
+ read.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
+}
+
+int do_write(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_gem_pwrite write;
+
+ memset(&write, 0, sizeof(write));
+ write.handle = handle;
+ write.data_ptr = (uintptr_t)buf;
+ write.size = size;
+ write.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+ struct drm_gem_create create;
+ uint8_t expected[OBJECT_SIZE];
+ uint8_t buf[OBJECT_SIZE];
+ int ret;
+ int handle;
+
+ fd = drm_open_any();
+
+ memset(&create, 0, sizeof(create));
+ create.size = OBJECT_SIZE;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ assert(ret == 0);
+ handle = create.handle;
+
+ printf("Testing contents of newly created object.\n");
+ ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ memset(&expected, 0, sizeof(expected));
+ assert(memcmp(expected, buf, sizeof(expected)) == 0);
+
+ printf("Testing read beyond end of buffer.\n");
+ ret = do_read(fd, handle, buf, OBJECT_SIZE / 2, OBJECT_SIZE);
+ assert(ret == -1 && errno == EINVAL);
+
+ printf("Testing full write of buffer\n");
+ memset(buf, 0, sizeof(buf));
+ memset(buf + 1024, 0x01, 1024);
+ memset(expected + 1024, 0x01, 1024);
+ ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ assert(memcmp(buf, expected, sizeof(buf)) == 0);
+
+ printf("Testing partial write of buffer\n");
+ memset(buf + 4096, 0x02, 1024);
+ memset(expected + 4096, 0x02, 1024);
+ ret = do_write(fd, handle, buf + 4096, 4096, 1024);
+ assert(ret == 0);
+ ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ assert(memcmp(buf, expected, sizeof(buf)) == 0);
+
+ printf("Testing partial read of buffer\n");
+ ret = do_read(fd, handle, buf, 512, 1024);
+ assert(ret == 0);
+ assert(memcmp(buf, expected + 512, 1024) == 0);
+
+ close(fd);
+
+ return 0;
+}