aboutsummaryrefslogtreecommitdiff
path: root/intel
diff options
context:
space:
mode:
authorKristian Høgsberg <krh@bitplanet.net>2009-11-17 11:14:54 -0500
committerKristian Høgsberg <krh@bitplanet.net>2009-11-17 11:15:06 -0500
commit4f57abfe66091281c9f59c14e6ea27b524b55d5b (patch)
treef00d1022cf208b381b93aa5c96858164e6a3f602 /intel
parent9dd3613073aa2491cef440725fdfa0cf1e8f1a42 (diff)
Move libdrm/ up one level
Diffstat (limited to 'intel')
-rw-r--r--intel/Makefile.am50
-rw-r--r--intel/intel_atomic.h78
-rw-r--r--intel/intel_bufmgr.c237
-rw-r--r--intel/intel_bufmgr.h212
-rw-r--r--intel/intel_bufmgr_fake.c1610
-rw-r--r--intel/intel_bufmgr_gem.c1722
-rw-r--r--intel/intel_bufmgr_priv.h254
-rw-r--r--intel/intel_chipset.h77
-rw-r--r--intel/libdrm_intel.pc.in10
-rw-r--r--intel/mm.c271
-rw-r--r--intel/mm.h94
11 files changed, 4615 insertions, 0 deletions
diff --git a/intel/Makefile.am b/intel/Makefile.am
new file mode 100644
index 00000000..8bb2c6e3
--- /dev/null
+++ b/intel/Makefile.am
@@ -0,0 +1,50 @@
+# Copyright © 2008 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Eric Anholt <eric@anholt.net>
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/intel \
+ $(PTHREADSTUBS_CFLAGS) \
+ -I$(top_srcdir)/include/drm
+
+libdrm_intel_la_LTLIBRARIES = libdrm_intel.la
+libdrm_intel_ladir = $(libdir)
+libdrm_intel_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_intel_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@ @CLOCK_LIB@
+
+libdrm_intel_la_SOURCES = \
+ intel_atomic.h \
+ intel_bufmgr.c \
+ intel_bufmgr_priv.h \
+ intel_bufmgr_fake.c \
+ intel_bufmgr_gem.c \
+ intel_chipset.h \
+ mm.c \
+ mm.h
+
+libdrm_intelincludedir = ${includedir}
+libdrm_intelinclude_HEADERS = intel_bufmgr.h
+
+pkgconfig_DATA = libdrm_intel.pc
diff --git a/intel/intel_atomic.h b/intel/intel_atomic.h
new file mode 100644
index 00000000..e725c4a4
--- /dev/null
+++ b/intel/intel_atomic.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+/**
+ * @file intel_atomics.h
+ *
+ * Private definitions for atomic operations
+ */
+
+#ifndef INTEL_ATOMICS_H
+#define INTEL_ATOMICS_H
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#if HAVE_INTEL_ATOMIC_PRIMITIVES
+
+#define HAS_ATOMIC_OPS 1
+
+typedef struct {
+ int atomic;
+} atomic_t;
+
+# define atomic_read(x) ((x)->atomic)
+# define atomic_set(x, val) ((x)->atomic = (val))
+# define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1))
+# define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1)
+# define atomic_cmpxchg(x, oldv, newv) __sync_val_compare_and_swap (&(x)->atomic, oldv, newv)
+
+#endif
+
+#if HAVE_LIB_ATOMIC_OPS
+#include <atomic_ops.h>
+
+#define HAS_ATOMIC_OPS 1
+
+typedef struct {
+ AO_t atomic;
+} atomic_t;
+
+# define atomic_read(x) AO_load_full(&(x)->atomic)
+# define atomic_set(x, val) AO_store_full(&(x)->atomic, (val))
+# define atomic_inc(x) ((void) AO_fetch_and_add1_full(&(x)->atomic))
+# define atomic_dec_and_test(x) (AO_fetch_and_sub1_full(&(x)->atomic) == 1)
+# define atomic_cmpxchg(x, oldv, newv) AO_compare_and_swap_full(&(x)->atomic, oldv, newv)
+
+#endif
+
+#if ! HAS_ATOMIC_OPS
+#error libdrm-intel requires atomic operations, please define them for your CPU/compiler.
+#endif
+
+#endif
diff --git a/intel/intel_bufmgr.c b/intel/intel_bufmgr.c
new file mode 100644
index 00000000..2469cd84
--- /dev/null
+++ b/intel/intel_bufmgr.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+#include <errno.h>
+#include <drm.h>
+#include <i915_drm.h>
+#include "intel_bufmgr.h"
+#include "intel_bufmgr_priv.h"
+
+/** @file intel_bufmgr.c
+ *
+ * Convenience functions for buffer management methods.
+ */
+
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ return bufmgr->bo_alloc(bufmgr, name, size, alignment);
+}
+
+drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
+}
+
+drm_intel_bo *
+drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
+ int x, int y, int cpp, uint32_t *tiling_mode,
+ unsigned long *pitch, unsigned long flags)
+{
+ return bufmgr->bo_alloc_tiled(bufmgr, name, x, y, cpp,
+ tiling_mode, pitch, flags);
+}
+
+void drm_intel_bo_reference(drm_intel_bo *bo)
+{
+ bo->bufmgr->bo_reference(bo);
+}
+
+void drm_intel_bo_unreference(drm_intel_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ bo->bufmgr->bo_unreference(bo);
+}
+
+int drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
+{
+ return buf->bufmgr->bo_map(buf, write_enable);
+}
+
+int drm_intel_bo_unmap(drm_intel_bo *buf)
+{
+ return buf->bufmgr->bo_unmap(buf);
+}
+
+int
+drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ int ret;
+
+ if (bo->bufmgr->bo_subdata)
+ return bo->bufmgr->bo_subdata(bo, offset, size, data);
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = drm_intel_bo_map(bo, 1);
+ if (ret)
+ return ret;
+ memcpy((unsigned char *)bo->virtual + offset, data, size);
+ drm_intel_bo_unmap(bo);
+ return 0;
+}
+
+int
+drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ int ret;
+ if (bo->bufmgr->bo_subdata)
+ return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
+
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = drm_intel_bo_map(bo, 0);
+ if (ret)
+ return ret;
+ memcpy(data, (unsigned char *)bo->virtual + offset, size);
+ drm_intel_bo_unmap(bo);
+ return 0;
+}
+
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
+{
+ bo->bufmgr->bo_wait_rendering(bo);
+}
+
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
+{
+ bufmgr->destroy(bufmgr);
+}
+
+int
+drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+ return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
+}
+
+void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
+{
+ bufmgr->debug = enable_debug;
+}
+
+int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
+{
+ return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
+}
+
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
+{
+ if (bo->bufmgr->bo_flink)
+ return bo->bufmgr->bo_flink(bo, name);
+
+ return -ENODEV;
+}
+
+int
+drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ return bo->bufmgr->bo_emit_reloc(bo, offset,
+ target_bo, target_offset,
+ read_domains, write_domain);
+}
+
+int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
+{
+ if (bo->bufmgr->bo_pin)
+ return bo->bufmgr->bo_pin(bo, alignment);
+
+ return -ENODEV;
+}
+
+int drm_intel_bo_unpin(drm_intel_bo *bo)
+{
+ if (bo->bufmgr->bo_unpin)
+ return bo->bufmgr->bo_unpin(bo);
+
+ return -ENODEV;
+}
+
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride)
+{
+ if (bo->bufmgr->bo_set_tiling)
+ return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
+
+ *tiling_mode = I915_TILING_NONE;
+ return 0;
+}
+
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode)
+{
+ if (bo->bufmgr->bo_get_tiling)
+ return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
+
+ *tiling_mode = I915_TILING_NONE;
+ *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ return 0;
+}
+
+int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
+{
+ if (bo->bufmgr->bo_disable_reuse)
+ return bo->bufmgr->bo_disable_reuse(bo);
+ return 0;
+}
+
+int drm_intel_bo_busy(drm_intel_bo *bo)
+{
+ if (bo->bufmgr->bo_busy)
+ return bo->bufmgr->bo_busy(bo);
+ return 0;
+}
+
+int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ return bo->bufmgr->bo_references(bo, target_bo);
+}
+
+int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
+{
+ if (bufmgr->get_pipe_from_crtc_id)
+ return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
+ return -1;
+}
diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
new file mode 100644
index 00000000..3801ff31
--- /dev/null
+++ b/intel/intel_bufmgr.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr.h
+ *
+ * Public definitions of Intel-specific bufmgr functions.
+ */
+
+#ifndef INTEL_BUFMGR_H
+#define INTEL_BUFMGR_H
+
+#include <stdint.h>
+
+typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
+typedef struct _drm_intel_bo drm_intel_bo;
+
+struct _drm_intel_bo {
+ /**
+ * Size in bytes of the buffer object.
+ *
+ * The size may be larger than the size originally requested for the
+ * allocation, such as being aligned to page size.
+ */
+ unsigned long size;
+
+ /**
+ * Alignment requirement for object
+ *
+ * Used for GTT mapping & pinning the object.
+ */
+ unsigned long align;
+
+ /**
+ * Last seen card virtual address (offset from the beginning of the
+ * aperture) for the object. This should be used to fill relocation
+ * entries when calling drm_intel_bo_emit_reloc()
+ */
+ unsigned long offset;
+
+ /**
+ * Virtual address for accessing the buffer data. Only valid while
+ * mapped.
+ */
+ void *virtual;
+
+ /** Buffer manager context associated with this buffer object */
+ drm_intel_bufmgr *bufmgr;
+
+ /**
+ * MM-specific handle for accessing object
+ */
+ int handle;
+};
+
+#define BO_ALLOC_FOR_RENDER (1<<0)
+
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags);
+void drm_intel_bo_reference(drm_intel_bo *bo);
+void drm_intel_bo_unreference(drm_intel_bo *bo);
+int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
+int drm_intel_bo_unmap(drm_intel_bo *bo);
+
+int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
+
+void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
+int drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4);
+int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
+
+int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
+int drm_intel_bo_unpin(drm_intel_bo *bo);
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
+int drm_intel_bo_busy(drm_intel_bo *bo);
+
+int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
+int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
+
+/* drm_intel_bufmgr_gem.c */
+drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
+drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle);
+void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
+void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
+
+int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
+
+/* drm_intel_bufmgr_fake.c */
+drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ volatile unsigned int
+ *last_dispatch);
+void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
+ volatile unsigned int
+ *last_dispatch);
+void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+ int (*exec) (drm_intel_bo *bo,
+ unsigned int used,
+ void *priv),
+ void *priv);
+void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+ unsigned int (*emit) (void *priv),
+ void (*wait) (unsigned int fence,
+ void *priv),
+ void *priv);
+drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long offset,
+ unsigned long size, void *virtual);
+void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+ void (*invalidate_cb) (drm_intel_bo
+ * bo,
+ void *ptr),
+ void *ptr);
+
+void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
+
+/** @{ Compatibility defines to keep old code building despite the symbol rename
+ * from dri_* to drm_intel_*
+ */
+#define dri_bo drm_intel_bo
+#define dri_bufmgr drm_intel_bufmgr
+#define dri_bo_alloc drm_intel_bo_alloc
+#define dri_bo_reference drm_intel_bo_reference
+#define dri_bo_unreference drm_intel_bo_unreference
+#define dri_bo_map drm_intel_bo_map
+#define dri_bo_unmap drm_intel_bo_unmap
+#define dri_bo_subdata drm_intel_bo_subdata
+#define dri_bo_get_subdata drm_intel_bo_get_subdata
+#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
+#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
+#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
+#define dri_bo_exec drm_intel_bo_exec
+#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
+#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
+ reloc_offset, target_bo) \
+ drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
+ target_bo, target_offset, \
+ read, write);
+#define dri_bo_pin drm_intel_bo_pin
+#define dri_bo_unpin drm_intel_bo_unpin
+#define dri_bo_get_tiling drm_intel_bo_get_tiling
+#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
+#define dri_bo_flink drm_intel_bo_flink
+#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
+#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
+#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
+#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
+#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
+#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
+#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
+#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
+#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
+#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
+#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
+
+/** @{ */
+
+#endif /* INTEL_BUFMGR_H */
diff --git a/intel/intel_bufmgr_fake.c b/intel/intel_bufmgr_fake.c
new file mode 100644
index 00000000..54b3cb80
--- /dev/null
+++ b/intel/intel_bufmgr_fake.c
@@ -0,0 +1,1610 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Originally a fake version of the buffer manager so that we can
+ * prototype the changes in a driver fairly quickly, has been fleshed
+ * out to a fully functional interim solution.
+ *
+ * Basically wraps the old style memory management in the new
+ * programming interface, but is more expressive and avoids many of
+ * the bugs in the old texture manager.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <xf86drm.h>
+#include <pthread.h>
+#include "intel_bufmgr.h"
+#include "intel_bufmgr_priv.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "mm.h"
+#include "libdrm_lists.h"
+
+#define DBG(...) do { \
+ if (bufmgr_fake->bufmgr.debug) \
+ drmMsg(__VA_ARGS__); \
+} while (0)
+
+/* Internal flags:
+ */
+#define BM_NO_BACKING_STORE 0x00000001
+#define BM_NO_FENCE_SUBDATA 0x00000002
+#define BM_PINNED 0x00000004
+
+/* Wrapper around mm.c's mem_block, which understands that you must
+ * wait for fences to expire before memory can be freed. This is
+ * specific to our use of memcpy for uploads - an upload that was
+ * processed through the command queue wouldn't need to care about
+ * fences.
+ */
+#define MAX_RELOCS 4096
+
+struct fake_buffer_reloc {
+ /** Buffer object that the relocation points at. */
+ drm_intel_bo *target_buf;
+ /** Offset of the relocation entry within reloc_buf. */
+ uint32_t offset;
+ /**
+ * Cached value of the offset when we last performed this relocation.
+ */
+ uint32_t last_target_offset;
+ /** Value added to target_buf's offset to get the relocation entry. */
+ uint32_t delta;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+};
+
+struct block {
+ struct block *next, *prev;
+ struct mem_block *mem; /* BM_MEM_AGP */
+
+ /**
+ * Marks that the block is currently in the aperture and has yet to be
+ * fenced.
+ */
+ unsigned on_hardware:1;
+ /**
+ * Marks that the block is currently fenced (being used by rendering)
+ * and can't be freed until @fence is passed.
+ */
+ unsigned fenced:1;
+
+ /** Fence cookie for the block. */
+ unsigned fence; /* Split to read_fence, write_fence */
+
+ drm_intel_bo *bo;
+ void *virtual;
+};
+
+typedef struct _bufmgr_fake {
+ drm_intel_bufmgr bufmgr;
+
+ pthread_mutex_t lock;
+
+ unsigned long low_offset;
+ unsigned long size;
+ void *virtual;
+
+ struct mem_block *heap;
+
+ unsigned buf_nr; /* for generating ids */
+
+ /**
+ * List of blocks which are currently in the GART but haven't been
+ * fenced yet.
+ */
+ struct block on_hardware;
+ /**
+ * List of blocks which are in the GART and have an active fence on
+ * them.
+ */
+ struct block fenced;
+ /**
+ * List of blocks which have an expired fence and are ready to be
+ * evicted.
+ */
+ struct block lru;
+
+ unsigned int last_fence;
+
+ unsigned fail:1;
+ unsigned need_fence:1;
+ int thrashing;
+
+ /**
+ * Driver callback to emit a fence, returning the cookie.
+ *
+ * This allows the driver to hook in a replacement for the DRM usage in
+ * bufmgr_fake.
+ *
+ * Currently, this also requires that a write flush be emitted before
+ * emitting the fence, but this should change.
+ */
+ unsigned int (*fence_emit) (void *private);
+ /** Driver callback to wait for a fence cookie to have passed. */
+ void (*fence_wait) (unsigned int fence, void *private);
+ void *fence_priv;
+
+ /**
+ * Driver callback to execute a buffer.
+ *
+ * This allows the driver to hook in a replacement for the DRM usage in
+ * bufmgr_fake.
+ */
+ int (*exec) (drm_intel_bo *bo, unsigned int used, void *priv);
+ void *exec_priv;
+
+ /** Driver-supplied argument to driver callbacks */
+ void *driver_priv;
+ /**
+ * Pointer to kernel-updated sarea data for the last completed user irq
+ */
+ volatile int *last_dispatch;
+
+ int fd;
+
+ int debug;
+
+ int performed_rendering;
+} drm_intel_bufmgr_fake;
+
+typedef struct _drm_intel_bo_fake {
+ drm_intel_bo bo;
+
+ unsigned id; /* debug only */
+ const char *name;
+
+ unsigned dirty:1;
+ /**
+ * has the card written to this buffer - we make need to copy it back
+ */
+ unsigned card_dirty:1;
+ unsigned int refcount;
+ /* Flags may consist of any of the DRM_BO flags, plus
+ * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
+ * first two driver private flags.
+ */
+ uint64_t flags;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+
+ unsigned int alignment;
+ int is_static, validated;
+ unsigned int map_count;
+
+ /** relocation list */
+ struct fake_buffer_reloc *relocs;
+ int nr_relocs;
+ /**
+ * Total size of the target_bos of this buffer.
+ *
+ * Used for estimation in check_aperture.
+ */
+ unsigned int child_size;
+
+ struct block *block;
+ void *backing_store;
+ void (*invalidate_cb) (drm_intel_bo *bo, void *ptr);
+ void *invalidate_ptr;
+} drm_intel_bo_fake;
+
+static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
+ unsigned int fence_cookie);
+
+#define MAXFENCE 0x7fffffff
+
+static int
+FENCE_LTE(unsigned a, unsigned b)
+{
+ if (a == b)
+ return 1;
+
+ if (a < b && b - a < (1 << 24))
+ return 1;
+
+ if (a > b && MAXFENCE - a + b < (1 << 24))
+ return 1;
+
+ return 0;
+}
+
+void
+drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+ unsigned int (*emit) (void *priv),
+ void (*wait) (unsigned int fence,
+ void *priv),
+ void *priv)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ bufmgr_fake->fence_emit = emit;
+ bufmgr_fake->fence_wait = wait;
+ bufmgr_fake->fence_priv = priv;
+}
+
+static unsigned int
+_fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ struct drm_i915_irq_emit ie;
+ int ret, seq = 1;
+
+ if (bufmgr_fake->fence_emit != NULL) {
+ seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
+ return seq;
+ }
+
+ ie.irq_seq = &seq;
+ ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
+ &ie, sizeof(ie));
+ if (ret) {
+ drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__, ret);
+ abort();
+ }
+
+ DBG("emit 0x%08x\n", seq);
+ return seq;
+}
+
+static void
+_fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
+{
+ struct drm_i915_irq_wait iw;
+ int hw_seq, busy_count = 0;
+ int ret;
+ int kernel_lied;
+
+ if (bufmgr_fake->fence_wait != NULL) {
+ bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
+ clear_fenced(bufmgr_fake, seq);
+ return;
+ }
+
+ DBG("wait 0x%08x\n", iw.irq_seq);
+
+ iw.irq_seq = seq;
+
+ /* The kernel IRQ_WAIT implementation is all sorts of broken.
+ * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
+ * unsigned range.
+ * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
+ * signed range.
+ * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
+ * signed range.
+ * 4) It returns -EBUSY in 3 seconds even if the hardware is still
+ * successfully chewing through buffers.
+ *
+ * Assume that in userland we treat sequence numbers as ints, which
+ * makes some of the comparisons convenient, since the sequence
+ * numbers are all postive signed integers.
+ *
+ * From this we get several cases we need to handle. Here's a timeline.
+ * 0x2 0x7 0x7ffffff8 0x7ffffffd
+ * | | | |
+ * ------------------------------------------------------------
+ *
+ * A) Normal wait for hw to catch up
+ * hw_seq seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to
+ * catch up.
+ *
+ * B) Normal wait for a sequence number that's already passed.
+ * seq hw_seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
+ *
+ * C) Hardware has already wrapped around ahead of us
+ * hw_seq seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
+ * for hw_seq >= seq, which may never occur. Thus, we want to catch
+ * this in userland and return 0.
+ *
+ * D) We've wrapped around ahead of the hardware.
+ * seq hw_seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would
+ * return 0 quickly because hw_seq >= seq, even though the hardware
+ * isn't caught up. Thus, we need to catch this early return in
+ * userland and bother the kernel until the hardware really does
+ * catch up.
+ *
+ * E) Hardware might wrap after we test in userland.
+ * hw_seq seq
+ * | |
+ * ------------------------------------------------------------
+ * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >=
+ * hw_seq and wait. However, suppose hw_seq wraps before we make it
+ * into the kernel. The kernel sees hw_seq >= seq and waits for 3
+ * seconds then returns -EBUSY. This is case C). We should catch
+ * this and then return successfully.
+ *
+ * F) Hardware might take a long time on a buffer.
+ * hw_seq seq
+ * | |
+ * -------------------------------------------------------------------
+ * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5
+ * take too long, it will return -EBUSY. Batchbuffers in the
+ * gltestperf demo were seen to take up to 7 seconds. We should
+ * catch early -EBUSY return and keep trying.
+ */
+
+ do {
+ /* Keep a copy of last_dispatch so that if the wait -EBUSYs
+ * because the hardware didn't catch up in 3 seconds, we can
+ * see if it at least made progress and retry.
+ */
+ hw_seq = *bufmgr_fake->last_dispatch;
+
+ /* Catch case C */
+ if (seq - hw_seq > 0x40000000)
+ return;
+
+ ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
+ &iw, sizeof(iw));
+ /* Catch case D */
+ kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
+ -0x40000000);
+
+ /* Catch case E */
+ if (ret == -EBUSY
+ && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
+ ret = 0;
+
+ /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
+ if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
+ busy_count = 0;
+ else
+ busy_count++;
+ } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
+ (ret == -EBUSY && busy_count < 5));
+
+ if (ret != 0) {
+ drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
+ __LINE__, strerror(-ret));
+ abort();
+ }
+ clear_fenced(bufmgr_fake, seq);
+}
+
+static int
+_fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ /* Slight problem with wrap-around:
+ */
+ return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
+}
+
+/**
+ * Allocate a memory manager block for the buffer.
+ */
+static int
+alloc_block(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ struct block *block = (struct block *)calloc(sizeof *block, 1);
+ unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
+ unsigned int sz;
+
+ if (!block)
+ return 1;
+
+ sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+ block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
+ if (!block->mem) {
+ free(block);
+ return 0;
+ }
+
+ DRMINITLISTHEAD(block);
+
+ /* Insert at head or at tail??? */
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+
+ block->virtual = (uint8_t *) bufmgr_fake->virtual +
+ block->mem->ofs - bufmgr_fake->low_offset;
+ block->bo = bo;
+
+ bo_fake->block = block;
+
+ return 1;
+}
+
+/* Release the card storage associated with buf:
+ */
+static void
+free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
+ int skip_dirty_copy)
+{
+ drm_intel_bo_fake *bo_fake;
+ DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
+ block->on_hardware, block->fenced);
+
+ if (!block)
+ return;
+
+ bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
+ skip_dirty_copy = 1;
+
+ if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
+ memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
+ bo_fake->card_dirty = 0;
+ bo_fake->dirty = 1;
+ }
+
+ if (block->on_hardware) {
+ block->bo = NULL;
+ } else if (block->fenced) {
+ block->bo = NULL;
+ } else {
+ DBG(" - free immediately\n");
+ DRMLISTDEL(block);
+
+ mmFreeMem(block->mem);
+ free(block);
+ }
+}
+
+static void
+alloc_backing_store(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ assert(!bo_fake->backing_store);
+ assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
+
+ bo_fake->backing_store = malloc(bo->size);
+
+ DBG("alloc_backing - buf %d %p %d\n", bo_fake->id,
+ bo_fake->backing_store, bo->size);
+ assert(bo_fake->backing_store);
+}
+
+static void
+free_backing_store(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ if (bo_fake->backing_store) {
+ assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
+ free(bo_fake->backing_store);
+ bo_fake->backing_store = NULL;
+ }
+}
+
+static void
+set_dirty(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ if (bo_fake->flags & BM_NO_BACKING_STORE
+ && bo_fake->invalidate_cb != NULL)
+ bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
+
+ assert(!(bo_fake->flags & BM_PINNED));
+
+ DBG("set_dirty - buf %d\n", bo_fake->id);
+ bo_fake->dirty = 1;
+}
+
+static int
+evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __FUNCTION__);
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ if (block->fence && max_fence && !FENCE_LTE(block->fence,
+ max_fence))
+ return 0;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __FUNCTION__);
+
+ DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes all objects from the fenced list older than the given fence.
+ */
+static int
+clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
+{
+ struct block *block, *tmp;
+ int ret = 0;
+
+ bufmgr_fake->last_fence = fence_cookie;
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
+ assert(block->fenced);
+
+ if (_fence_test(bufmgr_fake, block->fence)) {
+
+ block->fenced = 0;
+
+ if (!block->bo) {
+ DBG("delayed free: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ mmFreeMem(block->mem);
+ free(block);
+ } else {
+ DBG("return to lru: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+ }
+
+ ret = 1;
+ } else {
+ /* Blocks are ordered by fence, so if one fails, all
+ * from here will fail also:
+ */
+ DBG("fence not passed: offset %x sz %x %d %d \n",
+ block->mem->ofs, block->mem->size, block->fence,
+ bufmgr_fake->last_fence);
+ break;
+ }
+ }
+
+ DBG("%s: %d\n", __FUNCTION__, ret);
+ return ret;
+}
+
+static void
+fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ struct block *block, *tmp;
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
+ block, block->mem->size, block->mem->ofs, block->bo, fence);
+ block->fence = fence;
+
+ block->on_hardware = 0;
+ block->fenced = 1;
+
+ /* Move to tail of pending list here
+ */
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
+ }
+
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+}
+
+static int
+evict_and_alloc_block(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ assert(bo_fake->block == NULL);
+
+ /* Search for already free memory:
+ */
+ if (alloc_block(bo))
+ return 1;
+
+ /* If we're not thrashing, allow lru eviction to dig deeper into
+ * recently used textures. We'll probably be thrashing soon:
+ */
+ if (!bufmgr_fake->thrashing) {
+ while (evict_lru(bufmgr_fake, 0))
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ /* Keep thrashing counter alive?
+ */
+ if (bufmgr_fake->thrashing)
+ bufmgr_fake->thrashing = 20;
+
+ /* Wait on any already pending fences - here we are waiting for any
+ * freed memory that has been submitted to hardware and fenced to
+ * become available:
+ */
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+ }
+
+ if (!bufmgr_fake->thrashing) {
+ DBG("thrashing\n");
+ }
+ bufmgr_fake->thrashing = 20;
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ while (evict_mru(bufmgr_fake))
+ if (alloc_block(bo))
+ return 1;
+
+ DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
+
+ return 0;
+}
+
+/***********************************************************************
+ * Public functions
+ */
+
+/**
+ * Wait for hardware idle by emitting a fence and waiting for it.
+ */
+static void
+drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ unsigned int cookie;
+
+ cookie = _fence_emit_internal(bufmgr_fake);
+ _fence_wait_internal(bufmgr_fake, cookie);
+}
+
+/**
+ * Wait for rendering to a buffer to complete.
+ *
+ * It is assumed that the bathcbuffer which performed the rendering included
+ * the necessary flushing.
+ */
+static void
+drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ if (bo_fake->block == NULL || !bo_fake->block->fenced)
+ return;
+
+ _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+}
+
+static void
+drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ drm_intel_fake_bo_wait_rendering_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+/* Specifically ignore texture memory sharing.
+ * -- just evict everything
+ * -- and wait for idle
+ */
+void
+drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+ struct block *block, *tmp;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ assert(_fence_test(bufmgr_fake, block->fence));
+ set_dirty(block->bo);
+ }
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+static drm_intel_bo *
+drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake;
+
+ bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = -1;
+ bo_fake->bo.virtual = NULL;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+
+ /* Alignment must be a power of two */
+ assert((alignment & (alignment - 1)) == 0);
+ if (alignment == 0)
+ alignment = 1;
+ bo_fake->alignment = alignment;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = 0;
+ bo_fake->is_static = 0;
+
+ DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+static drm_intel_bo *
+drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags)
+{
+ unsigned long stride, aligned_y;
+
+ /* No runtime tiling support for fake. */
+ *tiling_mode = I915_TILING_NONE;
+
+ /* Align it for being a render target. Shouldn't need anything else. */
+ stride = x * cpp;
+ stride = ROUND_UP_TO(stride, 64);
+
+ /* 965 subspan loading alignment */
+ aligned_y = ALIGN(y, 2);
+
+ *pitch = stride;
+
+ return drm_intel_fake_bo_alloc(bufmgr, name, stride * aligned_y,
+ 4096);
+}
+
+drm_intel_bo *
+drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long offset,
+ unsigned long size, void *virtual)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake;
+
+ bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = offset;
+ bo_fake->bo.virtual = virtual;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = BM_PINNED;
+ bo_fake->is_static = 1;
+
+ DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id,
+ bo_fake->name, bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+static void
+drm_intel_fake_bo_reference(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ bo_fake->refcount++;
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+static void
+drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ bo_fake->refcount++;
+}
+
+static void
+drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i;
+
+ if (--bo_fake->refcount == 0) {
+ assert(bo_fake->map_count == 0);
+ /* No remaining references, so free it */
+ if (bo_fake->block)
+ free_block(bufmgr_fake, bo_fake->block, 1);
+ free_backing_store(bo);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++)
+ drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
+ target_buf);
+
+ DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
+ bo_fake->name);
+
+ free(bo_fake->relocs);
+ free(bo);
+ }
+}
+
+static void
+drm_intel_fake_bo_unreference(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ drm_intel_fake_bo_unreference_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+/**
+ * Set the buffer as not requiring backing store, and instead get the callback
+ * invoked whenever it would be set dirty.
+ */
+void
+drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+ void (*invalidate_cb) (drm_intel_bo *bo,
+ void *ptr),
+ void *ptr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ if (bo_fake->backing_store)
+ free_backing_store(bo);
+
+ bo_fake->flags |= BM_NO_BACKING_STORE;
+
+ DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
+ bo_fake->dirty = 1;
+ bo_fake->invalidate_cb = invalidate_cb;
+ bo_fake->invalidate_ptr = ptr;
+
+ /* Note that it is invalid right from the start. Also note
+ * invalidate_cb is called with the bufmgr locked, so cannot
+ * itself make bufmgr calls.
+ */
+ if (invalidate_cb != NULL)
+ invalidate_cb(bo, ptr);
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+/**
+ * Map a buffer into bo->virtual, allocating either card memory space (If
+ * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
+ */
+static int
+ drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static) {
+ if (bo_fake->card_dirty) {
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+ bo_fake->card_dirty = 0;
+ }
+ return 0;
+ }
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops, and it is used internally in bufmgr_fake
+ * for relocation.
+ */
+ if (bo_fake->map_count++ != 0)
+ return 0;
+
+ {
+ DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id,
+ bo_fake->name, bo_fake->bo.size / 1024);
+
+ if (bo->virtual != NULL) {
+ drmMsg("%s: already mapped\n", __FUNCTION__);
+ abort();
+ } else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
+
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ DBG("%s: alloc failed\n", __FUNCTION__);
+ bufmgr_fake->fail = 1;
+ return 1;
+ } else {
+ assert(bo_fake->block);
+ bo_fake->dirty = 0;
+
+ if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
+ bo_fake->block->fenced) {
+ drm_intel_fake_bo_wait_rendering_locked
+ (bo);
+ }
+
+ bo->virtual = bo_fake->block->virtual;
+ }
+ } else {
+ if (write_enable)
+ set_dirty(bo);
+
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+
+ if ((bo_fake->card_dirty == 1) && bo_fake->block) {
+ if (bo_fake->block->fenced)
+ drm_intel_fake_bo_wait_rendering_locked
+ (bo);
+
+ memcpy(bo_fake->backing_store,
+ bo_fake->block->virtual,
+ bo_fake->block->bo->size);
+ bo_fake->card_dirty = 0;
+ }
+
+ bo->virtual = bo_fake->backing_store;
+ }
+ }
+
+ return 0;
+}
+
+static int
+ drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ ret = drm_intel_fake_bo_map_locked(bo, write_enable);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return ret;
+}
+
+static int
+ drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static)
+ return 0;
+
+ assert(bo_fake->map_count != 0);
+ if (--bo_fake->map_count != 0)
+ return 0;
+
+ DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ bo->virtual = NULL;
+
+ return 0;
+}
+
+static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ ret = drm_intel_fake_bo_unmap_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return ret;
+}
+
+static void
+ drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ bufmgr_fake->performed_rendering = 0;
+ /* okay for ever BO that is on the HW kick it off.
+ seriously not afraid of the POLICE right now */
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+
+ block->on_hardware = 0;
+ free_block(bufmgr_fake, block, 0);
+ bo_fake->block = NULL;
+ bo_fake->validated = 0;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE))
+ bo_fake->dirty = 1;
+ }
+
+}
+
+static int
+ drm_intel_fake_bo_validate(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+ bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
+
+ DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id,
+ bo_fake->name, bo_fake->bo.size / 1024);
+
+ /* Sanity check: Buffers should be unmapped before being validated.
+ * This is not so much of a problem for bufmgr_fake, but TTM refuses,
+ * and the problem is harder to debug there.
+ */
+ assert(bo_fake->map_count == 0);
+
+ if (bo_fake->is_static) {
+ /* Add it to the needs-fence list */
+ bufmgr_fake->need_fence = 1;
+ return 0;
+ }
+
+ /* Allocate the card memory */
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ bufmgr_fake->fail = 1;
+ DBG("Failed to validate buf %d:%s\n", bo_fake->id,
+ bo_fake->name);
+ return -1;
+ }
+
+ assert(bo_fake->block);
+ assert(bo_fake->block->bo == &bo_fake->bo);
+
+ bo->offset = bo_fake->block->mem->ofs;
+
+ /* Upload the buffer contents if necessary */
+ if (bo_fake->dirty) {
+ DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
+ bo_fake->name, bo->size, bo_fake->block->mem->ofs);
+
+ assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
+
+ /* Actually, should be able to just wait for a fence on the
+ * mmory, hich we would be tracking when we free it. Waiting
+ * for idle is a sufficiently large hammer for now.
+ */
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* we may never have mapped this BO so it might not have any
+ * backing store if this happens it should be rare, but 0 the
+ * card memory in any case */
+ if (bo_fake->backing_store)
+ memcpy(bo_fake->block->virtual, bo_fake->backing_store,
+ bo->size);
+ else
+ memset(bo_fake->block->virtual, 0, bo->size);
+
+ bo_fake->dirty = 0;
+ }
+
+ bo_fake->block->fenced = 0;
+ bo_fake->block->on_hardware = 1;
+ DRMLISTDEL(bo_fake->block);
+ DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
+
+ bo_fake->validated = 1;
+ bufmgr_fake->need_fence = 1;
+
+ return 0;
+}
+
+static void
+drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+ unsigned int cookie;
+
+ cookie = _fence_emit_internal(bufmgr_fake);
+ fence_blocks(bufmgr_fake, cookie);
+
+ DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
+}
+
+static void
+drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ pthread_mutex_destroy(&bufmgr_fake->lock);
+ mmDestroy(bufmgr_fake->heap);
+ free(bufmgr);
+}
+
+static int
+drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ struct fake_buffer_reloc *r;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
+ int i;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ assert(bo);
+ assert(target_bo);
+
+ if (bo_fake->relocs == NULL) {
+ bo_fake->relocs =
+ malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
+ }
+
+ r = &bo_fake->relocs[bo_fake->nr_relocs++];
+
+ assert(bo_fake->nr_relocs <= MAX_RELOCS);
+
+ drm_intel_fake_bo_reference_locked(target_bo);
+
+ if (!target_fake->is_static) {
+ bo_fake->child_size +=
+ ALIGN(target_bo->size, target_fake->alignment);
+ bo_fake->child_size += target_fake->child_size;
+ }
+ r->target_buf = target_bo;
+ r->offset = offset;
+ r->last_target_offset = target_bo->offset;
+ r->delta = target_offset;
+ r->read_domains = read_domains;
+ r->write_domain = write_domain;
+
+ if (bufmgr_fake->debug) {
+ /* Check that a conflicting relocation hasn't already been
+ * emitted.
+ */
+ for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
+ struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
+
+ assert(r->offset != r2->offset);
+ }
+ }
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return 0;
+}
+
+/**
+ * Incorporates the validation flags associated with each relocation into
+ * the combined validation flags for the buffer on this batchbuffer submission.
+ */
+static void
+drm_intel_fake_calculate_domains(drm_intel_bo *bo)
+{
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ drm_intel_bo_fake *target_fake =
+ (drm_intel_bo_fake *) r->target_buf;
+
+ /* Do the same for the tree of buffers we depend on */
+ drm_intel_fake_calculate_domains(r->target_buf);
+
+ target_fake->read_domains |= r->read_domains;
+ target_fake->write_domain |= r->write_domain;
+ }
+}
+
+static int
+drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i, ret;
+
+ assert(bo_fake->map_count == 0);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ drm_intel_bo_fake *target_fake =
+ (drm_intel_bo_fake *) r->target_buf;
+ uint32_t reloc_data;
+
+ /* Validate the target buffer if that hasn't been done. */
+ if (!target_fake->validated) {
+ ret =
+ drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
+ if (ret != 0) {
+ if (bo->virtual != NULL)
+ drm_intel_fake_bo_unmap_locked(bo);
+ return ret;
+ }
+ }
+
+ /* Calculate the value of the relocation entry. */
+ if (r->target_buf->offset != r->last_target_offset) {
+ reloc_data = r->target_buf->offset + r->delta;
+
+ if (bo->virtual == NULL)
+ drm_intel_fake_bo_map_locked(bo, 1);
+
+ *(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
+ reloc_data;
+
+ r->last_target_offset = r->target_buf->offset;
+ }
+ }
+
+ if (bo->virtual != NULL)
+ drm_intel_fake_bo_unmap_locked(bo);
+
+ if (bo_fake->write_domain != 0) {
+ if (!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))) {
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+ }
+ bo_fake->card_dirty = 1;
+ bufmgr_fake->performed_rendering = 1;
+ }
+
+ return drm_intel_fake_bo_validate(bo);
+}
+
+static void
+drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ drm_intel_bo_fake *target_fake =
+ (drm_intel_bo_fake *) r->target_buf;
+
+ if (target_fake->validated)
+ drm_intel_bo_fake_post_submit(r->target_buf);
+
+ DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
+ bo_fake->name, (uint32_t) bo->offset, r->offset,
+ target_fake->name, (uint32_t) r->target_buf->offset,
+ r->delta);
+ }
+
+ assert(bo_fake->map_count == 0);
+ bo_fake->validated = 0;
+ bo_fake->read_domains = 0;
+ bo_fake->write_domain = 0;
+}
+
+void
+drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+ int (*exec) (drm_intel_bo *bo,
+ unsigned int used,
+ void *priv),
+ void *priv)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ bufmgr_fake->exec = exec;
+ bufmgr_fake->exec_priv = priv;
+}
+
+static int
+drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo->bufmgr;
+ drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
+ struct drm_i915_batchbuffer batch;
+ int ret;
+ int retry_count = 0;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ bufmgr_fake->performed_rendering = 0;
+
+ drm_intel_fake_calculate_domains(bo);
+
+ batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
+
+ /* we've ran out of RAM so blow the whole lot away and retry */
+restart:
+ ret = drm_intel_fake_reloc_and_validate_buffer(bo);
+ if (bufmgr_fake->fail == 1) {
+ if (retry_count == 0) {
+ retry_count++;
+ drm_intel_fake_kick_all_locked(bufmgr_fake);
+ bufmgr_fake->fail = 0;
+ goto restart;
+ } else /* dump out the memory here */
+ mmDumpMemInfo(bufmgr_fake->heap);
+ }
+
+ assert(ret == 0);
+
+ if (bufmgr_fake->exec != NULL) {
+ int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
+ if (ret != 0) {
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+ return ret;
+ }
+ } else {
+ batch.start = bo->offset;
+ batch.used = used;
+ batch.cliprects = cliprects;
+ batch.num_cliprects = num_cliprects;
+ batch.DR1 = 0;
+ batch.DR4 = DR4;
+
+ if (drmCommandWrite
+ (bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
+ sizeof(batch))) {
+ drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+ return -errno;
+ }
+ }
+
+ drm_intel_fake_fence_validated(bo->bufmgr);
+
+ drm_intel_bo_fake_post_submit(bo);
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return 0;
+}
+
+/**
+ * Return an error if the list of BOs will exceed the aperture size.
+ *
+ * This is a rough guess and likely to fail, as during the validate sequence we
+ * may place a buffer in an inopportune spot early on and then fail to fit
+ * a set smaller than the aperture.
+ */
+static int
+drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake =
+ (drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
+ unsigned int sz = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo_array[i];
+
+ if (bo_fake == NULL)
+ continue;
+
+ if (!bo_fake->is_static)
+ sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
+ sz += bo_fake->child_size;
+ }
+
+ if (sz > bufmgr_fake->size) {
+ DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n",
+ sz / 1024, bufmgr_fake->size / 1024);
+ return -1;
+ }
+
+ DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024,
+ bufmgr_fake->size / 1024);
+ return 0;
+}
+
+/**
+ * Evicts all buffers, waiting for fences to pass and copying contents out
+ * as necessary.
+ *
+ * Used by the X Server on LeaveVT, when the card memory is no longer our
+ * own.
+ */
+void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+ struct block *block, *tmp;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+ /* Releases the memory, and memcpys dirty contents out if
+ * necessary.
+ */
+ free_block(bufmgr_fake, block, 0);
+ bo_fake->block = NULL;
+ }
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
+void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
+ volatile unsigned int
+ *last_dispatch)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+
+ bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
+}
+
+drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ volatile unsigned int
+ *last_dispatch)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake;
+
+ bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
+
+ if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
+ free(bufmgr_fake);
+ return NULL;
+ }
+
+ /* Initialize allocator */
+ DRMINITLISTHEAD(&bufmgr_fake->fenced);
+ DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
+ DRMINITLISTHEAD(&bufmgr_fake->lru);
+
+ bufmgr_fake->low_offset = low_offset;
+ bufmgr_fake->virtual = low_virtual;
+ bufmgr_fake->size = size;
+ bufmgr_fake->heap = mmInit(low_offset, size);
+
+ /* Hook in methods */
+ bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
+ bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
+ bufmgr_fake->bufmgr.bo_alloc_tiled = drm_intel_fake_bo_alloc_tiled;
+ bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
+ bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
+ bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
+ bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
+ bufmgr_fake->bufmgr.bo_wait_rendering =
+ drm_intel_fake_bo_wait_rendering;
+ bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
+ bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
+ bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
+ bufmgr_fake->bufmgr.check_aperture_space =
+ drm_intel_fake_check_aperture_space;
+ bufmgr_fake->bufmgr.debug = 0;
+
+ bufmgr_fake->fd = fd;
+ bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
+
+ return &bufmgr_fake->bufmgr;
+}
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
new file mode 100644
index 00000000..87795f33
--- /dev/null
+++ b/intel/intel_bufmgr_gem.c
@@ -0,0 +1,1722 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Red Hat Inc.
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <xf86drm.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "errno.h"
+#include "libdrm_lists.h"
+#include "intel_atomic.h"
+#include "intel_bufmgr.h"
+#include "intel_bufmgr_priv.h"
+#include "intel_chipset.h"
+#include "string.h"
+
+#include "i915_drm.h"
+
+#define DBG(...) do { \
+ if (bufmgr_gem->bufmgr.debug) \
+ fprintf(stderr, __VA_ARGS__); \
+} while (0)
+
+typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
+
+struct drm_intel_gem_bo_bucket {
+ drmMMListHead head;
+ unsigned long size;
+};
+
+/* Only cache objects up to 64MB. Bigger than that, and the rounding of the
+ * size makes many operations fail that wouldn't otherwise.
+ */
+#define DRM_INTEL_GEM_BO_BUCKETS 14
+typedef struct _drm_intel_bufmgr_gem {
+ drm_intel_bufmgr bufmgr;
+
+ int fd;
+
+ int max_relocs;
+
+ pthread_mutex_t lock;
+
+ struct drm_i915_gem_exec_object *exec_objects;
+ drm_intel_bo **exec_bos;
+ int exec_size;
+ int exec_count;
+
+ /** Array of lists of cached gem objects of power-of-two sizes */
+ struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
+
+ uint64_t gtt_size;
+ int available_fences;
+ int pci_device;
+ char bo_reuse;
+} drm_intel_bufmgr_gem;
+
+struct _drm_intel_bo_gem {
+ drm_intel_bo bo;
+
+ atomic_t refcount;
+ uint32_t gem_handle;
+ const char *name;
+
+ /**
+ * Kenel-assigned global name for this object
+ */
+ unsigned int global_name;
+
+ /**
+ * Index of the buffer within the validation list while preparing a
+ * batchbuffer execution.
+ */
+ int validate_index;
+
+ /**
+ * Current tiling mode
+ */
+ uint32_t tiling_mode;
+ uint32_t swizzle_mode;
+
+ time_t free_time;
+
+ /** Array passed to the DRM containing relocation information. */
+ struct drm_i915_gem_relocation_entry *relocs;
+ /** Array of bos corresponding to relocs[i].target_handle */
+ drm_intel_bo **reloc_target_bo;
+ /** Number of entries in relocs */
+ int reloc_count;
+ /** Mapped address for the buffer, saved across map/unmap cycles */
+ void *mem_virtual;
+ /** GTT virtual address for the buffer, saved across map/unmap cycles */
+ void *gtt_virtual;
+
+ /** BO cache list */
+ drmMMListHead head;
+
+ /**
+ * Boolean of whether this BO and its children have been included in
+ * the current drm_intel_bufmgr_check_aperture_space() total.
+ */
+ char included_in_check_aperture;
+
+ /**
+ * Boolean of whether this buffer has been used as a relocation
+ * target and had its size accounted for, and thus can't have any
+ * further relocations added to it.
+ */
+ char used_as_reloc_target;
+
+ /**
+ * Boolean of whether this buffer can be re-used
+ */
+ char reusable;
+
+ /**
+ * Size in bytes of this buffer and its relocation descendents.
+ *
+ * Used to avoid costly tree walking in
+ * drm_intel_bufmgr_check_aperture in the common case.
+ */
+ int reloc_tree_size;
+
+ /**
+ * Number of potential fence registers required by this buffer and its
+ * relocations.
+ */
+ int reloc_tree_fences;
+};
+
+static unsigned int
+drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
+
+static unsigned int
+drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
+
+static int
+drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+
+static int
+drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+
+static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
+static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
+ time_t time);
+
+static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
+
+static void drm_intel_gem_bo_free(drm_intel_bo *bo);
+
+static unsigned long
+drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
+ uint32_t *tiling_mode)
+{
+ unsigned long min_size, max_size;
+ unsigned long i;
+
+ if (*tiling_mode == I915_TILING_NONE)
+ return size;
+
+ /* 965+ just need multiples of page size for tiling */
+ if (IS_I965G(bufmgr_gem))
+ return ROUND_UP_TO(size, 4096);
+
+ /* Older chips need powers of two, of at least 512k or 1M */
+ if (IS_I9XX(bufmgr_gem)) {
+ min_size = 1024*1024;
+ max_size = 128*1024*1024;
+ } else {
+ min_size = 512*1024;
+ max_size = 64*1024*1024;
+ }
+
+ if (size > max_size) {
+ *tiling_mode = I915_TILING_NONE;
+ return size;
+ }
+
+ for (i = min_size; i < size; i <<= 1)
+ ;
+
+ return i;
+}
+
+/*
+ * Round a given pitch up to the minimum required for X tiling on a
+ * given chip. We use 512 as the minimum to allow for a later tiling
+ * change.
+ */
+static unsigned long
+drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
+ unsigned long pitch, uint32_t tiling_mode)
+{
+ unsigned long tile_width = 512;
+ unsigned long i;
+
+ if (tiling_mode == I915_TILING_NONE)
+ return ROUND_UP_TO(pitch, tile_width);
+
+ /* 965 is flexible */
+ if (IS_I965G(bufmgr_gem))
+ return ROUND_UP_TO(pitch, tile_width);
+
+ /* Pre-965 needs power of two tile width */
+ for (i = tile_width; i < pitch; i <<= 1)
+ ;
+
+ return i;
+}
+
+static struct drm_intel_gem_bo_bucket *
+drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
+ unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+ struct drm_intel_gem_bo_bucket *bucket =
+ &bufmgr_gem->cache_bucket[i];
+ if (bucket->size >= size) {
+ return bucket;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int i, j;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ if (bo_gem->relocs == NULL) {
+ DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
+ bo_gem->name);
+ continue;
+ }
+
+ for (j = 0; j < bo_gem->reloc_count; j++) {
+ drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
+ drm_intel_bo_gem *target_gem =
+ (drm_intel_bo_gem *) target_bo;
+
+ DBG("%2d: %d (%s)@0x%08llx -> "
+ "%d (%s)@0x%08lx + 0x%08x\n",
+ i,
+ bo_gem->gem_handle, bo_gem->name,
+ (unsigned long long)bo_gem->relocs[j].offset,
+ target_gem->gem_handle,
+ target_gem->name,
+ target_bo->offset,
+ bo_gem->relocs[j].delta);
+ }
+ }
+}
+
+static void
+drm_intel_gem_bo_reference(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ assert(atomic_read(&bo_gem->refcount) > 0);
+ atomic_inc(&bo_gem->refcount);
+}
+
+/**
+ * Adds the given buffer to the list of buffers to be validated (moved into the
+ * appropriate memory type) with the next batch submission.
+ *
+ * If a buffer is validated multiple times in a batch submission, it ends up
+ * with the intersection of the memory type flags and the union of the
+ * access flags.
+ */
+static void
+drm_intel_add_validate_buffer(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int index;
+
+ if (bo_gem->validate_index != -1)
+ return;
+
+ /* Extend the array of validation entries as necessary. */
+ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ int new_size = bufmgr_gem->exec_size * 2;
+
+ if (new_size == 0)
+ new_size = 5;
+
+ bufmgr_gem->exec_objects =
+ realloc(bufmgr_gem->exec_objects,
+ sizeof(*bufmgr_gem->exec_objects) * new_size);
+ bufmgr_gem->exec_bos =
+ realloc(bufmgr_gem->exec_bos,
+ sizeof(*bufmgr_gem->exec_bos) * new_size);
+ bufmgr_gem->exec_size = new_size;
+ }
+
+ index = bufmgr_gem->exec_count;
+ bo_gem->validate_index = index;
+ /* Fill in array entry */
+ bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
+ bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
+ bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
+ bufmgr_gem->exec_objects[index].alignment = 0;
+ bufmgr_gem->exec_objects[index].offset = 0;
+ bufmgr_gem->exec_bos[index] = bo;
+ drm_intel_gem_bo_reference(bo);
+ bufmgr_gem->exec_count++;
+}
+
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
+ sizeof(uint32_t))
+
+static int
+drm_intel_setup_reloc_list(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ unsigned int max_relocs = bufmgr_gem->max_relocs;
+
+ if (bo->size / 4 < max_relocs)
+ max_relocs = bo->size / 4;
+
+ bo_gem->relocs = malloc(max_relocs *
+ sizeof(struct drm_i915_gem_relocation_entry));
+ bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_busy(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_busy busy;
+ int ret;
+
+ memset(&busy, 0, sizeof(busy));
+ busy.handle = bo_gem->gem_handle;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+
+ return (ret == 0 && busy.busy);
+}
+
+static int
+drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem, int state)
+{
+ struct drm_i915_gem_madvise madv;
+
+ madv.handle = bo_gem->gem_handle;
+ madv.madv = state;
+ madv.retained = 1;
+ ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+
+ return madv.retained;
+}
+
+/* drop the oldest entries that have been purged by the kernel */
+static void
+drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
+ struct drm_intel_gem_bo_bucket *bucket)
+{
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ if (drm_intel_gem_bo_madvise
+ (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
+ break;
+
+ DRMLISTDEL(&bo_gem->head);
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned long flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ unsigned int page_size = getpagesize();
+ int ret;
+ struct drm_intel_gem_bo_bucket *bucket;
+ int alloc_from_cache;
+ unsigned long bo_size;
+ int for_render = 0;
+
+ if (flags & BO_ALLOC_FOR_RENDER)
+ for_render = 1;
+
+ /* Round the allocated size up to a power of two number of pages. */
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
+
+ /* If we don't have caching at this size, don't actually round the
+ * allocation up.
+ */
+ if (bucket == NULL) {
+ bo_size = size;
+ if (bo_size < page_size)
+ bo_size = page_size;
+ } else {
+ bo_size = bucket->size;
+ }
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ /* Get a buffer out of the cache if available */
+retry:
+ alloc_from_cache = 0;
+ if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
+ if (for_render) {
+ /* Allocate new render-target BOs from the tail (MRU)
+ * of the list, as it will likely be hot in the GPU
+ * cache and in the aperture for us.
+ */
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.prev, head);
+ DRMLISTDEL(&bo_gem->head);
+ alloc_from_cache = 1;
+ } else {
+ /* For non-render-target BOs (where we're probably
+ * going to map it first thing in order to fill it
+ * with data), check if the last BO in the cache is
+ * unbusy, and only reuse in that case. Otherwise,
+ * allocating a new buffer is probably faster than
+ * waiting for the GPU to finish.
+ */
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
+ alloc_from_cache = 1;
+ DRMLISTDEL(&bo_gem->head);
+ }
+ }
+
+ if (alloc_from_cache) {
+ if (!drm_intel_gem_bo_madvise
+ (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
+ bucket);
+ goto retry;
+ }
+ }
+ }
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ if (!alloc_from_cache) {
+ struct drm_i915_gem_create create;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = bo_size;
+ memset(&create, 0, sizeof(create));
+ create.size = bo_size;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ bo_gem->gem_handle = create.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
+ if (ret != 0) {
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.bufmgr = bufmgr;
+ }
+
+ bo_gem->name = name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_size = bo_gem->bo.size;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = 0;
+ bo_gem->tiling_mode = I915_TILING_NONE;
+ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo_gem->reusable = 1;
+
+ DBG("bo_create: buf %d (%s) %ldb\n",
+ bo_gem->gem_handle, bo_gem->name, size);
+
+ return &bo_gem->bo;
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
+ BO_ALLOC_FOR_RENDER);
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
+}
+
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
+ int x, int y, int cpp, uint32_t *tiling_mode,
+ unsigned long *pitch, unsigned long flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ drm_intel_bo *bo;
+ unsigned long size, stride, aligned_y = y;
+ int ret;
+
+ if (*tiling_mode == I915_TILING_NONE)
+ aligned_y = ALIGN(y, 2);
+ else if (*tiling_mode == I915_TILING_X)
+ aligned_y = ALIGN(y, 8);
+ else if (*tiling_mode == I915_TILING_Y)
+ aligned_y = ALIGN(y, 32);
+
+ stride = x * cpp;
+ stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
+ size = stride * aligned_y;
+ size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
+
+ bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
+ if (!bo)
+ return NULL;
+
+ ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
+ if (ret != 0) {
+ drm_intel_gem_bo_unreference(bo);
+ return NULL;
+ }
+
+ *pitch = stride;
+
+ return bo;
+}
+
+/**
+ * Returns a drm_intel_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+drm_intel_bo *
+drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ int ret;
+ struct drm_gem_open open_arg;
+ struct drm_i915_gem_get_tiling get_tiling;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ memset(&open_arg, 0, sizeof(open_arg));
+ open_arg.name = handle;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
+ name, handle, strerror(errno));
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.size = open_arg.size;
+ bo_gem->bo.offset = 0;
+ bo_gem->bo.virtual = NULL;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->name = name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->gem_handle = open_arg.handle;
+ bo_gem->global_name = handle;
+ bo_gem->reusable = 0;
+
+ memset(&get_tiling, 0, sizeof(get_tiling));
+ get_tiling.handle = bo_gem->gem_handle;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+ if (ret != 0) {
+ drm_intel_gem_bo_unreference(&bo_gem->bo);
+ return NULL;
+ }
+ bo_gem->tiling_mode = get_tiling.tiling_mode;
+ bo_gem->swizzle_mode = get_tiling.swizzle_mode;
+ if (bo_gem->tiling_mode == I915_TILING_NONE)
+ bo_gem->reloc_tree_fences = 0;
+ else
+ bo_gem->reloc_tree_fences = 1;
+
+ DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+
+ return &bo_gem->bo;
+}
+
+static void
+drm_intel_gem_bo_free(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_gem_close close;
+ int ret;
+
+ if (bo_gem->mem_virtual)
+ munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ if (bo_gem->gtt_virtual)
+ munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+
+ free(bo_gem->reloc_target_bo);
+ free(bo_gem->relocs);
+
+ /* Close this object */
+ memset(&close, 0, sizeof(close));
+ close.handle = bo_gem->gem_handle;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ if (ret != 0) {
+ fprintf(stderr,
+ "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+ bo_gem->gem_handle, bo_gem->name, strerror(errno));
+ }
+ free(bo);
+}
+
+/** Frees all cached buffers significantly older than @time. */
+static void
+drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
+{
+ int i;
+
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+ struct drm_intel_gem_bo_bucket *bucket =
+ &bufmgr_gem->cache_bucket[i];
+
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ if (time - bo_gem->free_time <= 1)
+ break;
+
+ DRMLISTDEL(&bo_gem->head);
+
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+ }
+}
+
+static void
+drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_intel_gem_bo_bucket *bucket;
+ uint32_t tiling_mode;
+ int i;
+
+ /* Unreference all the target buffers */
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ drm_intel_gem_bo_unreference_locked_timed(bo_gem->
+ reloc_target_bo[i],
+ time);
+ }
+
+ DBG("bo_unreference final: %d (%s)\n",
+ bo_gem->gem_handle, bo_gem->name);
+
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+ /* Put the buffer into our internal cache for reuse if we can. */
+ tiling_mode = I915_TILING_NONE;
+ if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
+ drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0) {
+ bo_gem->free_time = time;
+
+ bo_gem->name = NULL;
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_count = 0;
+
+ DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
+
+ drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem,
+ I915_MADV_DONTNEED);
+ drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
+ } else {
+ drm_intel_gem_bo_free(bo);
+ }
+}
+
+static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ assert(atomic_read(&bo_gem->refcount) > 0);
+ if (atomic_dec_and_test(&bo_gem->refcount)) {
+ struct timespec time;
+
+ clock_gettime(CLOCK_MONOTONIC, &time);
+ drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+ }
+}
+
+static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
+ time_t time)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ assert(atomic_read(&bo_gem->refcount) > 0);
+ if (atomic_dec_and_test(&bo_gem->refcount))
+ drm_intel_gem_bo_unreference_final(bo, time);
+}
+
+static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ assert(atomic_read(&bo_gem->refcount) > 0);
+ if (atomic_dec_and_test(&bo_gem->refcount)) {
+ drm_intel_bufmgr_gem *bufmgr_gem =
+ (drm_intel_bufmgr_gem *) bo->bufmgr;
+ struct timespec time;
+
+ clock_gettime(CLOCK_MONOTONIC, &time);
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ }
+}
+
+static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops.
+ */
+ if (!bo_gem->mem_virtual) {
+ struct drm_i915_gem_mmap mmap_arg;
+
+ DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+
+ memset(&mmap_arg, 0, sizeof(mmap_arg));
+ mmap_arg.handle = bo_gem->gem_handle;
+ mmap_arg.offset = 0;
+ mmap_arg.size = bo->size;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ bo_gem->name, strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+ bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+ }
+ DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+ bo_gem->mem_virtual);
+ bo->virtual = bo_gem->mem_virtual;
+
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ if (write_enable)
+ set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+ else
+ set_domain.write_domain = 0;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ } while (ret == -1 && errno == EINTR);
+ if (ret != 0) {
+ fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+}
+
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ /* Get a mapping of the buffer if we haven't before. */
+ if (bo_gem->gtt_virtual == NULL) {
+ struct drm_i915_gem_mmap_gtt mmap_arg;
+
+ DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
+ bo_gem->name);
+
+ memset(&mmap_arg, 0, sizeof(mmap_arg));
+ mmap_arg.handle = bo_gem->gem_handle;
+
+ /* Get the fake offset back... */
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
+ &mmap_arg);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s:%d: Error preparing buffer map %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+
+ /* and mmap it */
+ bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
+ if (bo_gem->gtt_virtual == MAP_FAILED) {
+ fprintf(stderr,
+ "%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return errno;
+ }
+ }
+
+ bo->virtual = bo_gem->gtt_virtual;
+
+ DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+ bo_gem->gtt_virtual);
+
+ /* Now move it to the GTT domain so that the CPU caches are flushed */
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret != 0) {
+ fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ strerror(errno));
+ }
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+}
+
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int ret = 0;
+
+ if (bo == NULL)
+ return 0;
+
+ assert(bo_gem->gtt_virtual != NULL);
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ bo->virtual = NULL;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+
+static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_sw_finish sw_finish;
+ int ret;
+
+ if (bo == NULL)
+ return 0;
+
+ assert(bo_gem->mem_virtual != NULL);
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ /* Cause a flush to happen if the buffer's pinned for scanout, so the
+ * results show up in a timely manner.
+ */
+ sw_finish.handle = bo_gem->gem_handle;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
+ &sw_finish);
+ } while (ret == -1 && errno == EINTR);
+
+ bo->virtual = NULL;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_pwrite pwrite;
+ int ret;
+
+ memset(&pwrite, 0, sizeof(pwrite));
+ pwrite.handle = bo_gem->gem_handle;
+ pwrite.offset = offset;
+ pwrite.size = size;
+ pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+ } while (ret == -1 && errno == EINTR);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+ (int)size, strerror(errno));
+ }
+ return 0;
+}
+
+static int
+drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
+ int ret;
+
+ get_pipe_from_crtc_id.crtc_id = crtc_id;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
+ &get_pipe_from_crtc_id);
+ if (ret != 0) {
+ /* We return -1 here to signal that we don't
+ * know which pipe is associated with this crtc.
+ * This lets the caller know that this information
+ * isn't available; using the wrong pipe for
+ * vblank waiting can cause the chipset to lock up
+ */
+ return -1;
+ }
+
+ return get_pipe_from_crtc_id.pipe;
+}
+
+static int
+drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_pread pread;
+ int ret;
+
+ memset(&pread, 0, sizeof(pread));
+ pread.handle = bo_gem->gem_handle;
+ pread.offset = offset;
+ pread.size = size;
+ pread.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+ } while (ret == -1 && errno == EINTR);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+ (int)size, strerror(errno));
+ }
+ return 0;
+}
+
+/** Waits for all GPU rendering to the object to have completed. */
+static void
+drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
+{
+ drm_intel_gem_bo_start_gtt_access(bo, 0);
+}
+
+/**
+ * Sets the object to the GTT read and possibly write domain, used by the X
+ * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
+ *
+ * In combination with drm_intel_gem_bo_pin() and manual fence management, we
+ * can do tiled pixmaps this way.
+ */
+void
+drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ } while (ret == -1 && errno == EINTR);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ set_domain.read_domains, set_domain.write_domain,
+ strerror(errno));
+ }
+}
+
+static void
+drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ int i;
+
+ free(bufmgr_gem->exec_objects);
+ free(bufmgr_gem->exec_bos);
+
+ pthread_mutex_destroy(&bufmgr_gem->lock);
+
+ /* Free any cached buffer objects we were going to reuse */
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+ struct drm_intel_gem_bo_bucket *bucket =
+ &bufmgr_gem->cache_bucket[i];
+ drm_intel_bo_gem *bo_gem;
+
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bucket->head.next, head);
+ DRMLISTDEL(&bo_gem->head);
+
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+ }
+
+ free(bufmgr);
+}
+
+/**
+ * Adds the target buffer to the validation list and adds the relocation
+ * to the reloc_buffer's relocation list.
+ *
+ * The relocation entry at the given offset must already contain the
+ * precomputed relocation value, because the kernel will optimize out
+ * the relocation entry write when the buffer hasn't moved from the
+ * last known offset in target_bo.
+ */
+static int
+drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ /* Create a new relocation list if needed */
+ if (bo_gem->relocs == NULL)
+ drm_intel_setup_reloc_list(bo);
+
+ /* Check overflow */
+ assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
+
+ /* Check args */
+ assert(offset <= bo->size - 4);
+ assert((write_domain & (write_domain - 1)) == 0);
+
+ /* Make sure that we're not adding a reloc to something whose size has
+ * already been accounted for.
+ */
+ assert(!bo_gem->used_as_reloc_target);
+ bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+ bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
+
+ /* Flag the target to disallow further relocations in it. */
+ target_bo_gem->used_as_reloc_target = 1;
+
+ bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+ bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
+ bo_gem->relocs[bo_gem->reloc_count].target_handle =
+ target_bo_gem->gem_handle;
+ bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+ bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+ bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
+
+ bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
+ drm_intel_gem_bo_reference(target_bo);
+
+ bo_gem->reloc_count++;
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+}
+
+/**
+ * Walk the tree of relocations rooted at BO and accumulate the list of
+ * validations to be performed and update the relocation buffers with
+ * index values into the validation list.
+ */
+static void
+drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+
+ if (bo_gem->relocs == NULL)
+ return;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
+
+ /* Continue walking the tree depth-first. */
+ drm_intel_gem_bo_process_reloc(target_bo);
+
+ /* Add the target to the validate list */
+ drm_intel_add_validate_buffer(target_bo);
+ }
+}
+
+static void
+drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ /* Update the buffer offset */
+ if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+ DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+ bo_gem->gem_handle, bo_gem->name, bo->offset,
+ (unsigned long long)bufmgr_gem->exec_objects[i].
+ offset);
+ bo->offset = bufmgr_gem->exec_objects[i].offset;
+ }
+ }
+}
+
+static int
+drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ struct drm_i915_gem_execbuffer execbuf;
+ int ret, i;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ /* Update indices and set up the validate list. */
+ drm_intel_gem_bo_process_reloc(bo);
+
+ /* Add the batch buffer to the validation list. There are no
+ * relocations pointing to it.
+ */
+ drm_intel_add_validate_buffer(bo);
+
+ execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
+ execbuf.buffer_count = bufmgr_gem->exec_count;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = used;
+ execbuf.cliprects_ptr = (uintptr_t) cliprects;
+ execbuf.num_cliprects = num_cliprects;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = DR4;
+
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER,
+ &execbuf);
+ } while (ret != 0 && errno == EAGAIN);
+
+ if (ret != 0 && errno == ENOMEM) {
+ fprintf(stderr,
+ "Execbuffer fails to pin. "
+ "Estimate: %u. Actual: %u. Available: %u\n",
+ drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ (unsigned int)bufmgr_gem->gtt_size);
+ }
+ drm_intel_update_buffer_offsets(bufmgr_gem);
+
+ if (bufmgr_gem->bufmgr.debug)
+ drm_intel_gem_dump_validation_list(bufmgr_gem);
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ /* Disconnect the buffer from the validate list */
+ bo_gem->validate_index = -1;
+ drm_intel_gem_bo_unreference_locked(bo);
+ bufmgr_gem->exec_bos[i] = NULL;
+ }
+ bufmgr_gem->exec_count = 0;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_pin pin;
+ int ret;
+
+ memset(&pin, 0, sizeof(pin));
+ pin.handle = bo_gem->gem_handle;
+ pin.alignment = alignment;
+
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret != 0)
+ return -errno;
+
+ bo->offset = pin.offset;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_unpin(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_unpin unpin;
+ int ret;
+
+ memset(&unpin, 0, sizeof(unpin));
+ unpin.handle = bo_gem->gem_handle;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+ if (ret != 0)
+ return -errno;
+
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_tiling set_tiling;
+ int ret;
+
+ if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
+ return 0;
+
+ /* If we're going from non-tiling to tiling, bump fence count */
+ if (bo_gem->tiling_mode == I915_TILING_NONE)
+ bo_gem->reloc_tree_fences++;
+
+ memset(&set_tiling, 0, sizeof(set_tiling));
+ set_tiling.handle = bo_gem->gem_handle;
+ set_tiling.tiling_mode = *tiling_mode;
+ set_tiling.stride = stride;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
+ if (ret != 0) {
+ *tiling_mode = bo_gem->tiling_mode;
+ return -errno;
+ }
+ bo_gem->tiling_mode = set_tiling.tiling_mode;
+ bo_gem->swizzle_mode = set_tiling.swizzle_mode;
+
+ /* If we're going from tiling to non-tiling, drop fence count */
+ if (bo_gem->tiling_mode == I915_TILING_NONE)
+ bo_gem->reloc_tree_fences--;
+
+ *tiling_mode = bo_gem->tiling_mode;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ *tiling_mode = bo_gem->tiling_mode;
+ *swizzle_mode = bo_gem->swizzle_mode;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_gem_flink flink;
+ int ret;
+
+ if (!bo_gem->global_name) {
+ memset(&flink, 0, sizeof(flink));
+ flink.handle = bo_gem->gem_handle;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (ret != 0)
+ return -errno;
+ bo_gem->global_name = flink.name;
+ bo_gem->reusable = 0;
+ }
+
+ *name = bo_gem->global_name;
+ return 0;
+}
+
+/**
+ * Enables unlimited caching of buffer objects for reuse.
+ *
+ * This is potentially very memory expensive, as the cache at each bucket
+ * size is only bounded by how many buffers of that size we've managed to have
+ * in flight at once.
+ */
+void
+drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+
+ bufmgr_gem->bo_reuse = 1;
+}
+
+/**
+ * Return the additional aperture space required by the tree of buffer objects
+ * rooted at bo.
+ */
+static int
+drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+ int total = 0;
+
+ if (bo == NULL || bo_gem->included_in_check_aperture)
+ return 0;
+
+ total += bo->size;
+ bo_gem->included_in_check_aperture = 1;
+
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ total +=
+ drm_intel_gem_bo_get_aperture_space(bo_gem->
+ reloc_target_bo[i]);
+
+ return total;
+}
+
+/**
+ * Count the number of buffers in this list that need a fence reg
+ *
+ * If the count is greater than the number of available regs, we'll have
+ * to ask the caller to resubmit a batch with fewer tiled buffers.
+ *
+ * This function over-counts if the same buffer is used multiple times.
+ */
+static unsigned int
+drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
+{
+ int i;
+ unsigned int total = 0;
+
+ for (i = 0; i < count; i++) {
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
+
+ if (bo_gem == NULL)
+ continue;
+
+ total += bo_gem->reloc_tree_fences;
+ }
+ return total;
+}
+
+/**
+ * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
+ * for the next drm_intel_bufmgr_check_aperture_space() call.
+ */
+static void
+drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+
+ if (bo == NULL || !bo_gem->included_in_check_aperture)
+ return;
+
+ bo_gem->included_in_check_aperture = 0;
+
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
+ reloc_target_bo[i]);
+}
+
+/**
+ * Return a conservative estimate for the amount of aperture required
+ * for a collection of buffers. This may double-count some buffers.
+ */
+static unsigned int
+drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
+{
+ int i;
+ unsigned int total = 0;
+
+ for (i = 0; i < count; i++) {
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
+ if (bo_gem != NULL)
+ total += bo_gem->reloc_tree_size;
+ }
+ return total;
+}
+
+/**
+ * Return the amount of aperture needed for a collection of buffers.
+ * This avoids double counting any buffers, at the cost of looking
+ * at every buffer in the set.
+ */
+static unsigned int
+drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
+{
+ int i;
+ unsigned int total = 0;
+
+ for (i = 0; i < count; i++) {
+ total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
+ /* For the first buffer object in the array, we get an
+ * accurate count back for its reloc_tree size (since nothing
+ * had been flagged as being counted yet). We can save that
+ * value out as a more conservative reloc_tree_size that
+ * avoids double-counting target buffers. Since the first
+ * buffer happens to usually be the batch buffer in our
+ * callers, this can pull us back from doing the tree
+ * walk on every new batch emit.
+ */
+ if (i == 0) {
+ drm_intel_bo_gem *bo_gem =
+ (drm_intel_bo_gem *) bo_array[i];
+ bo_gem->reloc_tree_size = total;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
+ return total;
+}
+
+/**
+ * Return -1 if the batchbuffer should be flushed before attempting to
+ * emit rendering referencing the buffers pointed to by bo_array.
+ *
+ * This is required because if we try to emit a batchbuffer with relocations
+ * to a tree of buffers that won't simultaneously fit in the aperture,
+ * the rendering will return an error at a point where the software is not
+ * prepared to recover from it.
+ *
+ * However, we also want to emit the batchbuffer significantly before we reach
+ * the limit, as a series of batchbuffers each of which references buffers
+ * covering almost all of the aperture means that at each emit we end up
+ * waiting to evict a buffer from the last rendering, and we get synchronous
+ * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
+ * get better parallelism.
+ */
+static int
+drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem =
+ (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
+ unsigned int total = 0;
+ unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
+ int total_fences;
+
+ /* Check for fence reg constraints if necessary */
+ if (bufmgr_gem->available_fences) {
+ total_fences = drm_intel_gem_total_fences(bo_array, count);
+ if (total_fences > bufmgr_gem->available_fences)
+ return -1;
+ }
+
+ total = drm_intel_gem_estimate_batch_space(bo_array, count);
+
+ if (total > threshold)
+ total = drm_intel_gem_compute_batch_space(bo_array, count);
+
+ if (total > threshold) {
+ DBG("check_space: overflowed available aperture, "
+ "%dkb vs %dkb\n",
+ total / 1024, (int)bufmgr_gem->gtt_size / 1024);
+ return -1;
+ } else {
+ DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
+ (int)bufmgr_gem->gtt_size / 1024);
+ return 0;
+ }
+}
+
+/*
+ * Disable buffer reuse for objects which are shared with the kernel
+ * as scanout buffers
+ */
+static int
+drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ bo_gem->reusable = 0;
+ return 0;
+}
+
+static int
+_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ if (bo_gem->reloc_target_bo[i] == target_bo)
+ return 1;
+ if (_drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
+ target_bo))
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Return true if target_bo is referenced by bo's relocation tree. */
+static int
+drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+
+ if (bo == NULL || target_bo == NULL)
+ return 0;
+ if (target_bo_gem->used_as_reloc_target)
+ return _drm_intel_gem_bo_references(bo, target_bo);
+ return 0;
+}
+
+/**
+ * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ */
+drm_intel_bufmgr *
+drm_intel_bufmgr_gem_init(int fd, int batch_size)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_gem_get_aperture aperture;
+ drm_i915_getparam_t gp;
+ int ret, i;
+ unsigned long size;
+
+ bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+ bufmgr_gem->fd = fd;
+
+ if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
+ free(bufmgr_gem);
+ return NULL;
+ }
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+
+ if (ret == 0)
+ bufmgr_gem->gtt_size = aperture.aper_available_size;
+ else {
+ fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
+ strerror(errno));
+ bufmgr_gem->gtt_size = 128 * 1024 * 1024;
+ fprintf(stderr, "Assuming %dkB available aperture size.\n"
+ "May lead to reduced performance or incorrect "
+ "rendering.\n",
+ (int)bufmgr_gem->gtt_size / 1024);
+ }
+
+ gp.param = I915_PARAM_CHIPSET_ID;
+ gp.value = &bufmgr_gem->pci_device;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret) {
+ fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
+ fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
+ }
+
+ if (!IS_I965G(bufmgr_gem)) {
+ gp.param = I915_PARAM_NUM_FENCES_AVAIL;
+ gp.value = &bufmgr_gem->available_fences;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret) {
+ fprintf(stderr, "get fences failed: %d [%d]\n", ret,
+ errno);
+ fprintf(stderr, "param: %d, val: %d\n", gp.param,
+ *gp.value);
+ bufmgr_gem->available_fences = 0;
+ }
+ }
+
+ /* Let's go with one relocation per every 2 dwords (but round down a bit
+ * since a power of two will mean an extra page allocation for the reloc
+ * buffer).
+ *
+ * Every 4 was too few for the blender benchmark.
+ */
+ bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
+
+ bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
+ bufmgr_gem->bufmgr.bo_alloc_for_render =
+ drm_intel_gem_bo_alloc_for_render;
+ bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
+ bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
+ bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
+ bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
+ bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
+ bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
+ bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
+ bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
+ bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
+ bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
+ bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
+ bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
+ bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
+ bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+ bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
+ bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+ bufmgr_gem->bufmgr.debug = 0;
+ bufmgr_gem->bufmgr.check_aperture_space =
+ drm_intel_gem_check_aperture_space;
+ bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
+ bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
+ drm_intel_gem_get_pipe_from_crtc_id;
+ bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
+
+ /* Initialize the linked lists for BO reuse cache. */
+ for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
+ DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
+ bufmgr_gem->cache_bucket[i].size = size;
+ }
+
+ return &bufmgr_gem->bufmgr;
+}
diff --git a/intel/intel_bufmgr_priv.h b/intel/intel_bufmgr_priv.h
new file mode 100644
index 00000000..475c402f
--- /dev/null
+++ b/intel/intel_bufmgr_priv.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr_priv.h
+ *
+ * Private definitions of Intel-specific bufmgr functions and structures.
+ */
+
+#ifndef INTEL_BUFMGR_PRIV_H
+#define INTEL_BUFMGR_PRIV_H
+
+/**
+ * Context for a buffer manager instance.
+ *
+ * Contains public methods followed by private storage for the buffer manager.
+ */
+struct _drm_intel_bufmgr {
+ /**
+ * Allocate a buffer object.
+ *
+ * Buffer objects are not necessarily initially mapped into CPU virtual
+ * address space or graphics device aperture. They must be mapped
+ * using bo_map() or drm_intel_gem_bo_map_gtt() to be used by the CPU.
+ */
+ drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+
+ /**
+ * Allocate a buffer object, hinting that it will be used as a
+ * render target.
+ *
+ * This is otherwise the same as bo_alloc.
+ */
+ drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment);
+
+ /**
+ * Allocate a tiled buffer object.
+ *
+ * Alignment for tiled objects is set automatically; the 'flags'
+ * argument provides a hint about how the object will be used initially.
+ *
+ * Valid tiling formats are:
+ * I915_TILING_NONE
+ * I915_TILING_X
+ * I915_TILING_Y
+ *
+ * Note the tiling format may be rejected; callers should check the
+ * 'tiling_mode' field on return, as well as the pitch value, which
+ * may have been rounded up to accommodate for tiling restrictions.
+ */
+ drm_intel_bo *(*bo_alloc_tiled) (drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags);
+
+ /** Takes a reference on a buffer object */
+ void (*bo_reference) (drm_intel_bo *bo);
+
+ /**
+ * Releases a reference on a buffer object, freeing the data if
+ * no references remain.
+ */
+ void (*bo_unreference) (drm_intel_bo *bo);
+
+ /**
+ * Maps the buffer into userspace.
+ *
+ * This function will block waiting for any existing execution on the
+ * buffer to complete, first. The resulting mapping is available at
+ * buf->virtual.
+ */
+ int (*bo_map) (drm_intel_bo *bo, int write_enable);
+
+ /**
+ * Reduces the refcount on the userspace mapping of the buffer
+ * object.
+ */
+ int (*bo_unmap) (drm_intel_bo *bo);
+
+ /**
+ * Write data into an object.
+ *
+ * This is an optional function, if missing,
+ * drm_intel_bo will map/memcpy/unmap.
+ */
+ int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+
+ /**
+ * Read data from an object
+ *
+ * This is an optional function, if missing,
+ * drm_intel_bo will map/memcpy/unmap.
+ */
+ int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+
+ /**
+ * Waits for rendering to an object by the GPU to have completed.
+ *
+ * This is not required for any access to the BO by bo_map,
+ * bo_subdata, etc. It is merely a way for the driver to implement
+ * glFinish.
+ */
+ void (*bo_wait_rendering) (drm_intel_bo *bo);
+
+ /**
+ * Tears down the buffer manager instance.
+ */
+ void (*destroy) (drm_intel_bufmgr *bufmgr);
+
+ /**
+ * Add relocation entry in reloc_buf, which will be updated with the
+ * target buffer's real offset on on command submission.
+ *
+ * Relocations remain in place for the lifetime of the buffer object.
+ *
+ * \param bo Buffer to write the relocation into.
+ * \param offset Byte offset within reloc_bo of the pointer to
+ * target_bo.
+ * \param target_bo Buffer whose offset should be written into the
+ * relocation entry.
+ * \param target_offset Constant value to be added to target_bo's
+ * offset in relocation entry.
+ * \param read_domains GEM read domains which the buffer will be
+ * read into by the command that this relocation
+ * is part of.
+ * \param write_domains GEM read domains which the buffer will be
+ * dirtied in by the command that this
+ * relocation is part of.
+ */
+ int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+
+ /** Executes the command buffer pointed to by bo. */
+ int (*bo_exec) (drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4);
+
+ /**
+ * Pin a buffer to the aperture and fix the offset until unpinned
+ *
+ * \param buf Buffer to pin
+ * \param alignment Required alignment for aperture, in bytes
+ */
+ int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
+
+ /**
+ * Unpin a buffer from the aperture, allowing it to be removed
+ *
+ * \param buf Buffer to unpin
+ */
+ int (*bo_unpin) (drm_intel_bo *bo);
+
+ /**
+ * Ask that the buffer be placed in tiling mode
+ *
+ * \param buf Buffer to set tiling mode for
+ * \param tiling_mode desired, and returned tiling mode
+ */
+ int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+
+ /**
+ * Get the current tiling (and resulting swizzling) mode for the bo.
+ *
+ * \param buf Buffer to get tiling mode for
+ * \param tiling_mode returned tiling mode
+ * \param swizzle_mode returned swizzling mode
+ */
+ int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+
+ /**
+ * Create a visible name for a buffer which can be used by other apps
+ *
+ * \param buf Buffer to create a name for
+ * \param name Returned name
+ */
+ int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
+
+ /**
+ * Returns 1 if mapping the buffer for write could cause the process
+ * to block, due to the object being active in the GPU.
+ */
+ int (*bo_busy) (drm_intel_bo *bo);
+
+ int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
+
+ /**
+ * Disable buffer reuse for buffers which will be shared in some way,
+ * as with scanout buffers. When the buffer reference count goes to
+ * zero, it will be freed and not placed in the reuse list.
+ *
+ * \param bo Buffer to disable reuse for
+ */
+ int (*bo_disable_reuse) (drm_intel_bo *bo);
+
+ /**
+ *
+ * Return the pipe associated with a crtc_id so that vblank
+ * synchronization can use the correct data in the request.
+ * This is only supported for KMS and gem at this point, when
+ * unsupported, this function returns -1 and leaves the decision
+ * of what to do in that case to the caller
+ *
+ * \param bufmgr the associated buffer manager
+ * \param crtc_id the crtc identifier
+ */
+ int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
+
+ /** Returns true if target_bo is in the relocation tree rooted at bo. */
+ int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
+
+ /**< Enables verbose debugging printouts */
+ int debug;
+};
+
+#define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1))
+#define ROUND_UP_TO(x, y) (((x) + (y) - 1) / (y) * (y))
+#define ROUND_UP_TO_MB(x) ROUND_UP_TO((x), 1024*1024)
+
+#endif /* INTEL_BUFMGR_PRIV_H */
diff --git a/intel/intel_chipset.h b/intel/intel_chipset.h
new file mode 100644
index 00000000..688476a4
--- /dev/null
+++ b/intel/intel_chipset.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_CHIPSET_H
+#define _INTEL_CHIPSET_H
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I855(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+
+#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
+ (dev)->pci_device == 0x27AE)
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2A42 || \
+ (dev)->pci_device == 0x2E02 || \
+ (dev)->pci_device == 0x2E12 || \
+ (dev)->pci_device == 0x2E22 || \
+ (dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42 || \
+ (dev)->pci_device == 0x0042 || \
+ (dev)->pci_device == 0x0046)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+
+#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
+ (dev)->pci_device == 0x2E12 || \
+ (dev)->pci_device == 0x2E22 || \
+ (dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42)
+
+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
+ (dev)->pci_device == 0x29B2 || \
+ (dev)->pci_device == 0x29D2)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
+
+#endif /* _INTEL_CHIPSET_H */
diff --git a/intel/libdrm_intel.pc.in b/intel/libdrm_intel.pc.in
new file mode 100644
index 00000000..ea71cc3e
--- /dev/null
+++ b/intel/libdrm_intel.pc.in
@@ -0,0 +1,10 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm
+Description: Userspace interface to kernel DRM services
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm -ldrm_intel
+Cflags: -I${includedir} -I${includedir}/drm
diff --git a/intel/mm.c b/intel/mm.c
new file mode 100644
index 00000000..10697452
--- /dev/null
+++ b/intel/mm.c
@@ -0,0 +1,271 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "xf86drm.h"
+#include "mm.h"
+
+void mmDumpMemInfo(const struct mem_block *heap)
+{
+ drmMsg("Memory heap %p:\n", (void *)heap);
+ if (heap == 0) {
+ drmMsg(" heap == 0\n");
+ } else {
+ const struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ drmMsg(" Offset:%08x, Size:%08x, %c%c\n", p->ofs,
+ p->size, p->free ? 'F' : '.',
+ p->reserved ? 'R' : '.');
+ }
+
+ drmMsg("\nFree list:\n");
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n", p->ofs,
+ p->size, p->free ? 'F' : '.',
+ p->reserved ? 'R' : '.');
+ }
+
+ }
+ drmMsg("End of memory blocks\n");
+}
+
+struct mem_block *mmInit(int ofs, int size)
+{
+ struct mem_block *heap, *block;
+
+ if (size <= 0)
+ return NULL;
+
+ heap = (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!heap)
+ return NULL;
+
+ block = (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!block) {
+ free(heap);
+ return NULL;
+ }
+
+ heap->next = block;
+ heap->prev = block;
+ heap->next_free = block;
+ heap->prev_free = block;
+
+ block->heap = heap;
+ block->next = heap;
+ block->prev = heap;
+ block->next_free = heap;
+ block->prev_free = heap;
+
+ block->ofs = ofs;
+ block->size = size;
+ block->free = 1;
+
+ return heap;
+}
+
+static struct mem_block *SliceBlock(struct mem_block *p,
+ int startofs, int size,
+ int reserved, int alignment)
+{
+ struct mem_block *newblock;
+
+ /* break left [p, newblock, p->next], then p = newblock */
+ if (startofs > p->ofs) {
+ newblock =
+ (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs;
+ newblock->size = p->size - (startofs - p->ofs);
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* break right, also [p, newblock, p->next] */
+ if (size < p->size) {
+ newblock =
+ (struct mem_block *)calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs + size;
+ newblock->size = p->size - size;
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size = size;
+ }
+
+ /* p = middle block */
+ p->free = 0;
+
+ /* Remove p from the free list:
+ */
+ p->next_free->prev_free = p->prev_free;
+ p->prev_free->next_free = p->next_free;
+
+ p->next_free = 0;
+ p->prev_free = 0;
+
+ p->reserved = reserved;
+ return p;
+}
+
+struct mem_block *mmAllocMem(struct mem_block *heap, int size, int align2,
+ int startSearch)
+{
+ struct mem_block *p;
+ const int mask = (1 << align2) - 1;
+ int startofs = 0;
+ int endofs;
+
+ if (!heap || align2 < 0 || size <= 0)
+ return NULL;
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ assert(p->free);
+
+ startofs = (p->ofs + mask) & ~mask;
+ if (startofs < startSearch) {
+ startofs = startSearch;
+ }
+ endofs = startofs + size;
+ if (endofs <= (p->ofs + p->size))
+ break;
+ }
+
+ if (p == heap)
+ return NULL;
+
+ assert(p->free);
+ p = SliceBlock(p, startofs, size, 0, mask + 1);
+
+ return p;
+}
+
+struct mem_block *mmFindBlock(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ if (p->ofs == start)
+ return p;
+ }
+
+ return NULL;
+}
+
+static int Join2Blocks(struct mem_block *p)
+{
+ /* XXX there should be some assertions here */
+
+ /* NOTE: heap->free == 0 */
+
+ if (p->free && p->next->free) {
+ struct mem_block *q = p->next;
+
+ assert(p->ofs + p->size == q->ofs);
+ p->size += q->size;
+
+ p->next = q->next;
+ q->next->prev = p;
+
+ q->next_free->prev_free = q->prev_free;
+ q->prev_free->next_free = q->next_free;
+
+ free(q);
+ return 1;
+ }
+ return 0;
+}
+
+int mmFreeMem(struct mem_block *b)
+{
+ if (!b)
+ return 0;
+
+ if (b->free) {
+ drmMsg("block already free\n");
+ return -1;
+ }
+ if (b->reserved) {
+ drmMsg("block is reserved\n");
+ return -1;
+ }
+
+ b->free = 1;
+ b->next_free = b->heap->next_free;
+ b->prev_free = b->heap;
+ b->next_free->prev_free = b;
+ b->prev_free->next_free = b;
+
+ Join2Blocks(b);
+ if (b->prev != b->heap)
+ Join2Blocks(b->prev);
+
+ return 0;
+}
+
+void mmDestroy(struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap)
+ return;
+
+ for (p = heap->next; p != heap;) {
+ struct mem_block *next = p->next;
+ free(p);
+ p = next;
+ }
+
+ free(heap);
+}
diff --git a/intel/mm.h b/intel/mm.h
new file mode 100644
index 00000000..8a5235b0
--- /dev/null
+++ b/intel/mm.h
@@ -0,0 +1,94 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * Memory manager code. Primarily used by device drivers to manage texture
+ * heaps, etc.
+ */
+
+#ifndef MM_H
+#define MM_H
+
+struct mem_block {
+ struct mem_block *next, *prev;
+ struct mem_block *next_free, *prev_free;
+ struct mem_block *heap;
+ int ofs, size;
+ unsigned int free:1;
+ unsigned int reserved:1;
+};
+
+/* Rename the variables in the drm copy of this code so that it doesn't
+ * conflict with mesa or whoever else has copied it around.
+ */
+#define mmInit drm_mmInit
+#define mmAllocMem drm_mmAllocMem
+#define mmFreeMem drm_mmFreeMem
+#define mmFindBlock drm_mmFindBlock
+#define mmDestroy drm_mmDestroy
+#define mmDumpMemInfo drm_mmDumpMemInfo
+
+/**
+ * input: total size in bytes
+ * return: a heap pointer if OK, NULL if error
+ */
+extern struct mem_block *mmInit(int ofs, int size);
+
+/**
+ * Allocate 'size' bytes with 2^align2 bytes alignment,
+ * restrict the search to free memory after 'startSearch'
+ * depth and back buffers should be in different 4mb banks
+ * to get better page hits if possible
+ * input: size = size of block
+ * align2 = 2^align2 bytes alignment
+ * startSearch = linear offset from start of heap to begin search
+ * return: pointer to the allocated block, 0 if error
+ */
+extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
+ int align2, int startSearch);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a block
+ * return: 0 if OK, -1 if error
+ */
+extern int mmFreeMem(struct mem_block *b);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a heap, start offset
+ * return: pointer to a block
+ */
+extern struct mem_block *mmFindBlock(struct mem_block *heap, int start);
+
+/**
+ * destroy MM
+ */
+extern void mmDestroy(struct mem_block *mmInit);
+
+/**
+ * For debuging purpose.
+ */
+extern void mmDumpMemInfo(const struct mem_block *mmInit);
+
+#endif