aboutsummaryrefslogtreecommitdiff
path: root/drivers/char/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/drm')
-rw-r--r--drivers/char/drm/README.drm16
-rw-r--r--drivers/char/drm/ati_pcigart.c84
-rw-r--r--drivers/char/drm/drm.h4
-rw-r--r--drivers/char/drm/drmP.h30
-rw-r--r--drivers/char/drm/drm_bufs.c75
-rw-r--r--drivers/char/drm/drm_dma.c2
-rw-r--r--drivers/char/drm/drm_drv.c15
-rw-r--r--drivers/char/drm/drm_fops.c96
-rw-r--r--drivers/char/drm/drm_hashtab.c17
-rw-r--r--drivers/char/drm/drm_hashtab.h1
-rw-r--r--drivers/char/drm/drm_irq.c4
-rw-r--r--drivers/char/drm/drm_lock.c134
-rw-r--r--drivers/char/drm/drm_mm.c2
-rw-r--r--drivers/char/drm/drm_os_linux.h3
-rw-r--r--drivers/char/drm/drm_pciids.h4
-rw-r--r--drivers/char/drm/drm_proc.c2
-rw-r--r--drivers/char/drm/drm_stub.c1
-rw-r--r--drivers/char/drm/drm_vm.c104
-rw-r--r--drivers/char/drm/i915_dma.c3
-rw-r--r--drivers/char/drm/r128_cce.c3
-rw-r--r--drivers/char/drm/r128_drv.h2
-rw-r--r--drivers/char/drm/r300_reg.h2
-rw-r--r--drivers/char/drm/radeon_cp.c79
-rw-r--r--drivers/char/drm/radeon_drm.h1
-rw-r--r--drivers/char/drm/radeon_drv.h24
-rw-r--r--drivers/char/drm/radeon_state.c52
-rw-r--r--drivers/char/drm/sis_drv.c2
-rw-r--r--drivers/char/drm/via_dma.c111
-rw-r--r--drivers/char/drm/via_drv.c3
-rw-r--r--drivers/char/drm/via_drv.h5
-rw-r--r--drivers/char/drm/via_mm.h40
31 files changed, 572 insertions, 349 deletions
diff --git a/drivers/char/drm/README.drm b/drivers/char/drm/README.drm
index 6441e01e587..af74cd79a27 100644
--- a/drivers/char/drm/README.drm
+++ b/drivers/char/drm/README.drm
@@ -1,6 +1,6 @@
************************************************************
* For the very latest on DRI development, please see: *
-* http://dri.sourceforge.net/ *
+* http://dri.freedesktop.org/ *
************************************************************
The Direct Rendering Manager (drm) is a device-independent kernel-level
@@ -26,21 +26,19 @@ ways:
Documentation on the DRI is available from:
- http://precisioninsight.com/piinsights.html
+ http://dri.freedesktop.org/wiki/Documentation
+ http://sourceforge.net/project/showfiles.php?group_id=387
+ http://dri.sourceforge.net/doc/
For specific information about kernel-level support, see:
The Direct Rendering Manager, Kernel Support for the Direct Rendering
Infrastructure
- http://precisioninsight.com/dr/drm.html
+ http://dri.sourceforge.net/doc/drm_low_level.html
Hardware Locking for the Direct Rendering Infrastructure
- http://precisioninsight.com/dr/locking.html
+ http://dri.sourceforge.net/doc/hardware_locking_low_level.html
A Security Analysis of the Direct Rendering Infrastructure
- http://precisioninsight.com/dr/security.html
+ http://dri.sourceforge.net/doc/security_low_level.html
-************************************************************
-* For the very latest on DRI development, please see: *
-* http://dri.sourceforge.net/ *
-************************************************************
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index bd7be09ea53..5b91bc04ea4 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -33,59 +33,44 @@
#include "drmP.h"
-#if PAGE_SIZE == 65536
-# define ATI_PCIGART_TABLE_ORDER 0
-# define ATI_PCIGART_TABLE_PAGES (1 << 0)
-#elif PAGE_SIZE == 16384
-# define ATI_PCIGART_TABLE_ORDER 1
-# define ATI_PCIGART_TABLE_PAGES (1 << 1)
-#elif PAGE_SIZE == 8192
-# define ATI_PCIGART_TABLE_ORDER 2
-# define ATI_PCIGART_TABLE_PAGES (1 << 2)
-#elif PAGE_SIZE == 4096
-# define ATI_PCIGART_TABLE_ORDER 3
-# define ATI_PCIGART_TABLE_PAGES (1 << 3)
-#else
-# error - PAGE_SIZE not 64K, 16K, 8K or 4K
-#endif
-
-# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-static void *drm_ati_alloc_pcigart_table(void)
+static void *drm_ati_alloc_pcigart_table(int order)
{
unsigned long address;
struct page *page;
int i;
- DRM_DEBUG("%s\n", __FUNCTION__);
+
+ DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
- ATI_PCIGART_TABLE_ORDER);
+ order);
if (address == 0UL) {
return NULL;
}
page = virt_to_page(address);
- for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++)
+ for (i = 0; i < order; i++, page++)
SetPageReserved(page);
DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
return (void *)address;
}
-static void drm_ati_free_pcigart_table(void *address)
+static void drm_ati_free_pcigart_table(void *address, int order)
{
struct page *page;
int i;
+ int num_pages = 1 << order;
DRM_DEBUG("%s\n", __FUNCTION__);
page = virt_to_page((unsigned long)address);
- for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++)
+ for (i = 0; i < num_pages; i++, page++)
ClearPageReserved(page);
- free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
+ free_pages((unsigned long)address, order);
}
int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
@@ -93,6 +78,8 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
drm_sg_mem_t *entry = dev->sg;
unsigned long pages;
int i;
+ int order;
+ int num_pages, max_pages;
/* we need to support large memory configurations */
if (!entry) {
@@ -100,15 +87,19 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
return 0;
}
+ order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
+ num_pages = 1 << order;
+
if (gart_info->bus_addr) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
pci_unmap_single(dev->pdev, gart_info->bus_addr,
- ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
+ num_pages * PAGE_SIZE,
PCI_DMA_TODEVICE);
}
- pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
- ? entry->pages : ATI_MAX_PCIGART_PAGES;
+ max_pages = (gart_info->table_size / sizeof(u32));
+ pages = (entry->pages <= max_pages)
+ ? entry->pages : max_pages;
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
@@ -123,13 +114,12 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
&& gart_info->addr) {
- drm_ati_free_pcigart_table(gart_info->addr);
+ drm_ati_free_pcigart_table(gart_info->addr, order);
gart_info->addr = NULL;
}
return 1;
}
-
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
@@ -139,6 +129,9 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
unsigned long pages;
u32 *pci_gart, page_base, bus_address = 0;
int i, j, ret = 0;
+ int order;
+ int max_pages;
+ int num_pages;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
@@ -148,7 +141,10 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- address = drm_ati_alloc_pcigart_table();
+ order = drm_order((gart_info->table_size +
+ (PAGE_SIZE-1)) / PAGE_SIZE);
+ num_pages = 1 << order;
+ address = drm_ati_alloc_pcigart_table(order);
if (!address) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
@@ -160,11 +156,13 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
}
bus_address = pci_map_single(dev->pdev, address,
- ATI_PCIGART_TABLE_PAGES *
- PAGE_SIZE, PCI_DMA_TODEVICE);
+ num_pages * PAGE_SIZE,
+ PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
- drm_ati_free_pcigart_table(address);
+ order = drm_order((gart_info->table_size +
+ (PAGE_SIZE-1)) / PAGE_SIZE);
+ drm_ati_free_pcigart_table(address, order);
address = NULL;
goto done;
}
@@ -177,10 +175,11 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
pci_gart = (u32 *) address;
- pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
- ? entry->pages : ATI_MAX_PCIGART_PAGES;
+ max_pages = (gart_info->table_size / sizeof(u32));
+ pages = (entry->pages <= max_pages)
+ ? entry->pages : max_pages;
- memset(pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32));
+ memset(pci_gart, 0, max_pages * sizeof(u32));
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
@@ -198,10 +197,18 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
- if (gart_info->is_pcie)
+ switch(gart_info->gart_reg_if) {
+ case DRM_ATI_GART_IGP:
+ *pci_gart = cpu_to_le32((page_base) | 0xc);
+ break;
+ case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
- else
+ break;
+ default:
+ case DRM_ATI_GART_PCI:
*pci_gart = cpu_to_le32(page_base);
+ break;
+ }
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
}
@@ -220,5 +227,4 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
gart_info->bus_addr = bus_address;
return ret;
}
-
EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 8db9041e306..089198491f1 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -654,11 +654,13 @@ typedef struct drm_set_version {
/**
* Device specific ioctls should only be in their respective headers
- * The device specific ioctl range is from 0x40 to 0x79.
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
* drmCommandReadWrite().
*/
#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
#endif
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 85d99e21e18..d494315752a 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -414,6 +414,10 @@ typedef struct drm_lock_data {
struct file *filp; /**< File descr of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
+ spinlock_t spinlock;
+ uint32_t kernel_waiters;
+ uint32_t user_waiters;
+ int idle_has_lock;
} drm_lock_data_t;
/**
@@ -515,12 +519,17 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
typedef struct ati_pcigart_info {
int gart_table_location;
- int is_pcie;
+ int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
+ int table_size;
} drm_ati_pcigart_info;
/*
@@ -590,6 +599,8 @@ struct drm_driver {
void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct file *filp);
+ void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
+ struct file * filp);
unsigned long (*get_map_ofs) (drm_map_t * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
@@ -764,7 +775,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
}
#ifdef __alpha__
-#define drm_get_pci_domain(dev) dev->hose->bus->number
+#define drm_get_pci_domain(dev) dev->hose->index
#else
#define drm_get_pci_domain(dev) 0
#endif
@@ -915,9 +926,18 @@ extern int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
-extern int drm_lock_free(drm_device_t * dev,
- __volatile__ unsigned int *lock, unsigned int context);
+extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
+extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
+extern void drm_idlelock_take(drm_lock_data_t *lock_data);
+extern void drm_idlelock_release(drm_lock_data_t *lock_data);
+
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
+
+extern int drm_i_have_hw_lock(struct file *filp);
+extern int drm_kernel_take_hw_lock(struct file *filp);
/* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index a6828cc14e5..c11345856ff 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -57,7 +57,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
if (entry->map && map->type == entry->map->type &&
- entry->map->offset == map->offset) {
+ ((entry->map->offset == map->offset) ||
+ (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
return entry;
}
}
@@ -180,8 +181,20 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
if (map->type == _DRM_REGISTERS)
map->handle = ioremap(map->offset, map->size);
break;
-
case _DRM_SHM:
+ list = drm_find_matching_map(dev, map);
+ if (list != NULL) {
+ if(list->map->size != map->size) {
+ DRM_DEBUG("Matching maps of type %d with "
+ "mismatched sizes, (%ld vs %ld)\n",
+ map->type, map->size, list->map->size);
+ list->map->size = map->size;
+ }
+
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ *maplist = list;
+ return 0;
+ }
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
map->size, drm_order(map->size), map->handle);
@@ -200,15 +213,45 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
- case _DRM_AGP:
- if (drm_core_has_AGP(dev)) {
+ case _DRM_AGP: {
+ drm_agp_mem_t *entry;
+ int valid = 0;
+
+ if (!drm_core_has_AGP(dev)) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -EINVAL;
+ }
#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
+ map->offset += dev->hose->mem_space->start;
#endif
- map->offset += dev->agp->base;
- map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+ /* Note: dev->agp->base may actually be 0 when the DRM
+ * is not in control of AGP space. But if user space is
+ * it should already have added the AGP base itself.
+ */
+ map->offset += dev->agp->base;
+ map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+
+ /* This assumes the DRM is in total control of AGP space.
+ * It's not always the case as AGP can be in the control
+ * of user space (i.e. i810 driver). So this loop will get
+ * skipped and we double check that dev->agp->memory is
+ * actually set as well as being invalid before EPERM'ing
+ */
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if ((map->offset >= entry->bound) &&
+ (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
+ valid = 1;
+ break;
+ }
}
+ if (dev->agp->memory && !valid) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -EPERM;
+ }
+ DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
+
break;
+ }
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
@@ -267,7 +310,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
*maplist = list;
return 0;
-}
+ }
int drm_addmap(drm_device_t * dev, unsigned int offset,
unsigned int size, drm_map_type_t type,
@@ -519,6 +562,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
+ drm_agp_mem_t *agp_entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
@@ -529,7 +573,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
int page_order;
int total;
int byte_count;
- int i;
+ int i, valid;
drm_buf_t **temp_buflist;
if (!dma)
@@ -560,6 +604,19 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
if (dev->queue_count)
return -EBUSY; /* Not while in use */
+ /* Make sure buffers are located in AGP memory that we own */
+ valid = 0;
+ for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
+ if ((agp_offset >= agp_entry->bound) &&
+ (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+ valid = 1;
+ break;
+ }
+ }
+ if (dev->agp->memory && !valid) {
+ DRM_DEBUG("zone invalid\n");
+ return -EINVAL;
+ }
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 892db709698..32ed19c9ec1 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -65,7 +65,7 @@ int drm_dma_setup(drm_device_t * dev)
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the the drm_device::dma structure itself.
+ * finally the drm_device::dma structure itself.
*/
void drm_dma_takedown(drm_device_t * dev)
{
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index f5b9b2480c1..8e77b7ed0f4 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -15,8 +15,6 @@
* #define DRIVER_DESC "Matrox G200/G400"
* #define DRIVER_DATE "20001127"
*
- * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
- *
* #define drm_x mga_##x
* \endcode
*/
@@ -120,7 +118,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
};
-#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
* Take down the DRM device.
@@ -496,11 +494,14 @@ int drm_ioctl(struct inode *inode, struct file *filp,
(long)old_encode_dev(priv->head->device),
priv->authenticated);
- if (nr < DRIVER_IOCTL_COUNT)
- ioctl = &drm_ioctls[nr];
- else if ((nr >= DRM_COMMAND_BASE)
- && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+ if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+ goto err_i1;
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
+ ioctl = &drm_ioctls[nr];
else
goto err_i1;
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 898f47dafec..3b159cab3bc 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -46,6 +46,7 @@ static int drm_setup(drm_device_t * dev)
drm_local_map_t *map;
int i;
int ret;
+ u32 sareapage;
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
@@ -56,7 +57,8 @@ static int drm_setup(drm_device_t * dev)
dev->magicfree.next = NULL;
/* prebuild the SAREA */
- i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
+ sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
+ i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
@@ -84,7 +86,7 @@ static int drm_setup(drm_device_t * dev)
INIT_LIST_HEAD(&dev->ctxlist->head);
dev->vmalist = NULL;
- dev->sigdata.lock = dev->lock.hw_lock = NULL;
+ dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
@@ -354,58 +356,56 @@ int drm_release(struct inode *inode, struct file *filp)
current->pid, (long)old_encode_dev(priv->head->device),
dev->open_count);
- if (priv->lock_count && dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.filp == filp) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
- if (dev->driver->reclaim_buffers_locked)
+ if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
+ if (drm_i_have_hw_lock(filp)) {
dev->driver->reclaim_buffers_locked(dev, filp);
-
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
- /* FIXME: may require heavy-handed reset of
- hardware at this point, possibly
- processed via a callback to the X
- server. */
- } else if (dev->driver->reclaim_buffers_locked && priv->lock_count
- && dev->lock.hw_lock) {
- /* The lock is required to reclaim buffers */
- DECLARE_WAITQUEUE(entry, current);
-
- add_wait_queue(&dev->lock.lock_queue, &entry);
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!dev->lock.hw_lock) {
- /* Device has been unregistered */
- retcode = -EINTR;
- break;
+ } else {
+ unsigned long _end=jiffies + 3*DRM_HZ;
+ int locked = 0;
+
+ drm_idlelock_take(&dev->lock);
+
+ /*
+ * Wait for a while.
+ */
+
+ do{
+ spin_lock(&dev->lock.spinlock);
+ locked = dev->lock.idle_has_lock;
+ spin_unlock(&dev->lock.spinlock);
+ if (locked)
+ break;
+ schedule();
+ } while (!time_after_eq(jiffies, _end));
+
+ if (!locked) {
+ DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+ "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+ "\tI will go on reclaiming the buffers anyway.\n");
}
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- dev->lock.filp = filp;
- dev->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
- /* Contention */
- schedule();
- if (signal_pending(current)) {
- retcode = -ERESTARTSYS;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->lock.lock_queue, &entry);
- if (!retcode) {
+
dev->driver->reclaim_buffers_locked(dev, filp);
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT);
+ drm_idlelock_release(&dev->lock);
}
}
+ if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
+
+ drm_idlelock_take(&dev->lock);
+ dev->driver->reclaim_buffers_idlelocked(dev, filp);
+ drm_idlelock_release(&dev->lock);
+
+ }
+
+ if (drm_i_have_hw_lock(filp)) {
+ DRM_DEBUG("File %p released, freeing lock for context %d\n",
+ filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ drm_lock_free(&dev->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ }
+
+
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, filp);
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c
index a0b2d6802ae..31acb621dcc 100644
--- a/drivers/char/drm/drm_hashtab.c
+++ b/drivers/char/drm/drm_hashtab.c
@@ -43,7 +43,16 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
ht->size = 1 << order;
ht->order = order;
ht->fill = 0;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
+ ht->table = NULL;
+ ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+ if (!ht->use_vmalloc) {
+ ht->table = drm_calloc(ht->size, sizeof(*ht->table),
+ DRM_MEM_HASHTAB);
+ }
+ if (!ht->table) {
+ ht->use_vmalloc = 1;
+ ht->table = vmalloc(ht->size*sizeof(*ht->table));
+ }
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
@@ -183,7 +192,11 @@ int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
void drm_ht_remove(drm_open_hash_t *ht)
{
if (ht->table) {
- vfree(ht->table);
+ if (ht->use_vmalloc)
+ vfree(ht->table);
+ else
+ drm_free(ht->table, ht->size * sizeof(*ht->table),
+ DRM_MEM_HASHTAB);
ht->table = NULL;
}
}
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
index 40afec05bff..613091c970a 100644
--- a/drivers/char/drm/drm_hashtab.h
+++ b/drivers/char/drm/drm_hashtab.h
@@ -47,6 +47,7 @@ typedef struct drm_open_hash{
unsigned int order;
unsigned int fill;
struct hlist_head *table;
+ int use_vmalloc;
} drm_open_hash_t;
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 9d00c51fe2c..2e75331fd83 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -424,7 +424,7 @@ static void drm_locked_tasklet_func(unsigned long data)
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
if (!dev->locked_tasklet_func ||
- !drm_lock_take(&dev->lock.hw_lock->lock,
+ !drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT)) {
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return;
@@ -435,7 +435,7 @@ static void drm_locked_tasklet_func(unsigned long data)
dev->locked_tasklet_func(dev);
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ drm_lock_free(&dev->lock,
DRM_KERNEL_CONTEXT);
dev->locked_tasklet_func = NULL;
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index e9993ba461a..befd1af19df 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -35,9 +35,6 @@
#include "drmP.h"
-static int drm_lock_transfer(drm_device_t * dev,
- __volatile__ unsigned int *lock,
- unsigned int context);
static int drm_notifier(void *priv);
/**
@@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp,
return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry);
+ spin_lock(&dev->lock.spinlock);
+ dev->lock.user_waiters++;
+ spin_unlock(&dev->lock.spinlock);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
@@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp,
ret = -EINTR;
break;
}
- if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
+ if (drm_lock_take(&dev->lock, lock.context)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
@@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct file *filp,
break;
}
}
+ spin_lock(&dev->lock.spinlock);
+ dev->lock.user_waiters--;
+ spin_unlock(&dev->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
- if (ret)
- return ret;
+ DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
+ if (ret) return ret;
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
@@ -127,14 +129,12 @@ int drm_lock(struct inode *inode, struct file *filp,
}
}
- /* dev->driver->kernel_context_switch isn't used by any of the x86
- * drivers but is used by the Sparc driver.
- */
if (dev->driver->kernel_context_switch &&
dev->last_context != lock.context) {
dev->driver->kernel_context_switch(dev, dev->last_context,
lock.context);
}
+
return 0;
}
@@ -184,12 +184,8 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
- drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT);
-
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- DRM_ERROR("\n");
+ if (drm_lock_free(&dev->lock,lock.context)) {
+ /* FIXME: Should really bail out here. */
}
}
@@ -206,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp,
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
-int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
+int drm_lock_take(drm_lock_data_t *lock_data,
+ unsigned int context)
{
unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ spin_lock(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
- else
- new = context | _DRM_LOCK_HELD;
+ else {
+ new = context | _DRM_LOCK_HELD |
+ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+ _DRM_LOCK_CONT : 0);
+ }
prev = cmpxchg(lock, old, new);
} while (prev != old);
+ spin_unlock(&lock_data->spinlock);
+
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
@@ -227,7 +231,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0;
}
}
- if (new == (context | _DRM_LOCK_HELD)) {
+
+ if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
@@ -246,13 +251,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
-static int drm_lock_transfer(drm_device_t * dev,
- __volatile__ unsigned int *lock,
+static int drm_lock_transfer(drm_lock_data_t *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
- dev->lock.filp = NULL;
+ lock_data->filp = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
@@ -272,23 +277,32 @@ static int drm_lock_transfer(drm_device_t * dev,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
-int drm_lock_free(drm_device_t * dev,
- __volatile__ unsigned int *lock, unsigned int context)
+int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
{
unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock(&lock_data->spinlock);
+ if (lock_data->kernel_waiters != 0) {
+ drm_lock_transfer(lock_data, 0);
+ lock_data->idle_has_lock = 1;
+ spin_unlock(&lock_data->spinlock);
+ return 1;
+ }
+ spin_unlock(&lock_data->spinlock);
- dev->lock.filp = NULL;
do {
old = *lock;
- new = 0;
+ new = _DRM_LOCKING_CONTEXT(old);
prev = cmpxchg(lock, old, new);
} while (prev != old);
+
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
- wake_up_interruptible(&dev->lock.lock_queue);
+ wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
@@ -322,3 +336,67 @@ static int drm_notifier(void *priv)
} while (prev != old);
return 0;
}
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(drm_lock_data_t *lock_data)
+{
+ int ret = 0;
+
+ spin_lock(&lock_data->spinlock);
+ lock_data->kernel_waiters++;
+ if (!lock_data->idle_has_lock) {
+
+ spin_unlock(&lock_data->spinlock);
+ ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+ spin_lock(&lock_data->spinlock);
+
+ if (ret == 1)
+ lock_data->idle_has_lock = 1;
+ }
+ spin_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(drm_lock_data_t *lock_data)
+{
+ unsigned int old, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock(&lock_data->spinlock);
+ if (--lock_data->kernel_waiters == 0) {
+ if (lock_data->idle_has_lock) {
+ do {
+ old = *lock;
+ prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+ } while (prev != old);
+ wake_up_interruptible(&lock_data->lock_queue);
+ lock_data->idle_has_lock = 0;
+ }
+ }
+ spin_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+
+int drm_i_have_hw_lock(struct file *filp)
+{
+ DRM_DEVICE;
+
+ return (priv->lock_count && dev->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+ dev->lock.filp == filp);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
index 9b46b85027d..2ec1d9f2626 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/char/drm/drm_mm.c
@@ -274,7 +274,6 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
return drm_mm_create_tail_node(mm, start, size);
}
-EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(drm_mm_t * mm)
{
@@ -295,4 +294,3 @@ void drm_mm_takedown(drm_mm_t * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
}
-EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index 2908b72daa6..0fe7b449792 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -70,9 +70,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#endif
-/** Task queue handler arguments */
-#define DRM_TASKQUEUE_ARGS void *arg
-
/** For data going into the kernel through the ioctl argument */
#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \
if ( copy_from_user(&arg1, arg2, arg3) ) \
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index ad54b845978..31cdde83713 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -102,6 +102,7 @@
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
@@ -230,10 +231,10 @@
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
+ {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
{0, 0, 0}
#define i810_PCI_IDS \
@@ -296,5 +297,6 @@
{0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 7fd0da71214..b204498d1a2 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -72,7 +72,7 @@ static struct drm_proc_list {
#endif
};
-#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
+#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
/**
* Initialize the DRI proc filesystem for a device.
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 120d10256fe..19408adcc77 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -62,6 +62,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
+ spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 54a63284895..b5c5b9fa84c 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -41,6 +41,30 @@
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
+static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+ pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+ pgprot_val(tmp) |= _PAGE_PCD;
+ pgprot_val(tmp) &= ~_PAGE_PWT;
+ }
+#elif defined(__powerpc__)
+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
+ if (map_type == _DRM_REGISTERS)
+ pgprot_val(tmp) |= _PAGE_GUARDED;
+#endif
+#if defined(__ia64__)
+ if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+ vma->vm_start))
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
+}
+
/**
* \c nopage method for AGP virtual memory.
*
@@ -133,7 +157,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* \param address access address.
* \return pointer to the page structure.
*
- * Get the the mapping, find the real physical page to map, get the page, and
+ * Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
@@ -151,8 +175,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset;
- page = (map->type == _DRM_CONSISTENT) ?
- virt_to_page((void *)i) : vmalloc_to_page((void *)i);
+ page = vmalloc_to_page((void *)i);
if (!page)
return NOPAGE_SIGBUS;
get_page(page);
@@ -389,7 +412,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-static void drm_vm_open(struct vm_area_struct *vma)
+static void drm_vm_open_locked(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
@@ -401,15 +424,23 @@ static void drm_vm_open(struct vm_area_struct *vma)
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
- mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
- mutex_unlock(&dev->struct_mutex);
}
}
+static void drm_vm_open(struct vm_area_struct *vma)
+{
+ drm_file_t *priv = vma->vm_file->private_data;
+ drm_device_t *dev = priv->head->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_vm_open_locked(vma);
+ mutex_unlock(&dev->struct_mutex);
+}
+
/**
* \c close method for all virtual memory types.
*
@@ -460,7 +491,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
drm_device_dma_t *dma;
unsigned long length = vma->vm_end - vma->vm_start;
- lock_kernel();
dev = priv->head->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
@@ -468,10 +498,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- unlock_kernel();
return -EINVAL;
}
- unlock_kernel();
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
@@ -494,7 +522,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open(vma);
+ drm_vm_open_locked(vma);
return 0;
}
@@ -529,7 +557,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
-int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
@@ -565,7 +593,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return -EPERM;
/* Check for valid size. */
- if (map->size != vma->vm_end - vma->vm_start)
+ if (map->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
@@ -600,37 +628,16 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
-#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
- pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
- }
-#elif defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
- if (map->type == _DRM_REGISTERS)
- pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
-#endif
- vma->vm_flags |= VM_IO; /* not in core dump */
-#if defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
- vma->vm_page_prot =
- pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
offset = dev->driver->get_reg_ofs(dev);
+ vma->vm_flags |= VM_IO; /* not in core dump */
+ vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
-#else
- if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
-#endif
return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n",
@@ -638,10 +645,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_start, vma->vm_end, map->offset + offset);
vma->vm_ops = &drm_vm_ops;
break;
- case _DRM_SHM:
case _DRM_CONSISTENT:
- /* Consistent memory is really like shared memory. It's only
- * allocate in a different way */
+ /* Consistent memory is really like shared memory. But
+ * it's allocated in a different way, so avoid nopage */
+ if (remap_pfn_range(vma, vma->vm_start,
+ page_to_pfn(virt_to_page(map->handle)),
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+ /* fall through to _DRM_SHM */
+ case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
/* Don't let this area swap. Change when
@@ -659,8 +671,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open(vma);
+ drm_vm_open_locked(vma);
return 0;
}
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mmap_locked(filp, vma);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_mmap);
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index 9354ce3b009..1ba15d9a171 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -34,7 +34,8 @@
#define IS_I965G(dev) (dev->pci_device == 0x2972 || \
dev->pci_device == 0x2982 || \
dev->pci_device == 0x2992 || \
- dev->pci_device == 0x29A2)
+ dev->pci_device == 0x29A2 || \
+ dev->pci_device == 0x2A02)
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index db5a60450e6..1014602c43a 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -560,9 +560,10 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
if (dev_priv->is_pci) {
#endif
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
+ dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0;
- dev_priv->gart_info.is_pcie = 0;
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv;
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index f1efb49de8d..9086835686d 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -383,6 +383,8 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
#define R128_PERFORMANCE_BOXES 0
+#define R128_PCIGART_TABLE_SIZE 32768
+
#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index a881f96c983..ecda760ae8c 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -293,7 +293,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
# define R300_PVS_CNTL_1_POS_END_SHIFT 10
# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
-/* Addresses are relative the the vertex program parameters area. */
+/* Addresses are relative to the vertex program parameters area. */
#define R300_VAP_PVS_CNTL_2 0x22D4
# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 5ed96568829..68338389d83 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -830,6 +830,15 @@ static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
return RADEON_READ(RADEON_PCIE_DATA);
}
+static u32 RADEON_READ_IGPGART(drm_radeon_private_t *dev_priv, int addr)
+{
+ u32 ret;
+ RADEON_WRITE(RADEON_IGPGART_INDEX, addr & 0x7f);
+ ret = RADEON_READ(RADEON_IGPGART_DATA);
+ RADEON_WRITE(RADEON_IGPGART_INDEX, 0x7f);
+ return ret;
+}
+
#if RADEON_FIFO_DEBUG
static void radeon_status(drm_radeon_private_t * dev_priv)
{
@@ -1267,7 +1276,44 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
}
}
-/* Enable or disable PCI-E GART on the chip */
+/* Enable or disable IGP GART on the chip */
+static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
+{
+ u32 temp, tmp;
+
+ tmp = RADEON_READ(RADEON_AIC_CNTL);
+ if (on) {
+ DRM_DEBUG("programming igpgart %08X %08lX %08X\n",
+ dev_priv->gart_vm_start,
+ (long)dev_priv->gart_info.bus_addr,
+ dev_priv->gart_size);
+
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_UNK_18, 0x1000);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_ENABLE, 0x1);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_CTRL, 0x42040800);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_BASE_ADDR,
+ dev_priv->gart_info.bus_addr);
+
+ temp = RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_UNK_39);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_UNK_39, temp);
+
+ RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
+ dev_priv->gart_size = 32*1024*1024;
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION,
+ (((dev_priv->gart_vm_start - 1 +
+ dev_priv->gart_size) & 0xffff0000) |
+ (dev_priv->gart_vm_start >> 16)));
+
+ temp = RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_ENABLE);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_ENABLE, temp);
+
+ RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x1);
+ RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x0);
+ }
+}
+
static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
@@ -1302,6 +1348,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp;
+ if (dev_priv->flags & RADEON_IS_IGPGART) {
+ radeon_set_igpgart(dev_priv, on);
+ return;
+ }
+
if (dev_priv->flags & RADEON_IS_PCIE) {
radeon_set_pciegart(dev_priv, on);
return;
@@ -1560,8 +1611,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (dev_priv->flags & RADEON_IS_AGP) {
base = dev->agp->base;
/* Check if valid */
- if ((base + dev_priv->gart_size) > dev_priv->fb_location &&
- base < (dev_priv->fb_location + dev_priv->fb_size)) {
+ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+ base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
dev->agp->base);
base = 0;
@@ -1571,8 +1622,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
if (base == 0) {
base = dev_priv->fb_location + dev_priv->fb_size;
- if (((base + dev_priv->gart_size) & 0xfffffffful)
- < base)
+ if (base < dev_priv->fb_location ||
+ ((base + dev_priv->gart_size) & 0xfffffffful) < base)
base = dev_priv->fb_location
- dev_priv->gart_size;
}
@@ -1620,20 +1671,22 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
#endif
{
/* if we have an offset set from userspace */
- if (dev_priv->pcigart_offset) {
+ if (dev_priv->pcigart_offset_set) {
dev_priv->gart_info.bus_addr =
dev_priv->pcigart_offset + dev_priv->fb_location;
dev_priv->gart_info.mapping.offset =
dev_priv->gart_info.bus_addr;
dev_priv->gart_info.mapping.size =
- RADEON_PCIGART_TABLE_SIZE;
+ dev_priv->gart_info.table_size;
drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
- dev_priv->gart_info.is_pcie =
- !!(dev_priv->flags & RADEON_IS_PCIE);
+ if (dev_priv->flags & RADEON_IS_PCIE)
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
+ else
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_FB;
@@ -1641,6 +1694,10 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->gart_info.addr,
dev_priv->pcigart_offset);
} else {
+ if (dev_priv->flags & RADEON_IS_IGPGART)
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
+ else
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_MAIN;
dev_priv->gart_info.addr = NULL;
@@ -1714,7 +1771,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
{
drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
- dev_priv->gart_info.addr = NULL;
+ dev_priv->gart_info.addr = 0;
}
}
/* only clear to the start of flags */
@@ -2222,6 +2279,8 @@ int radeon_driver_firstopen(struct drm_device *dev)
drm_local_map_t *map;
drm_radeon_private_t *dev_priv = dev->dev_private;
+ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+
ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
drm_get_resource_len(dev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY, &dev_priv->mmio);
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 8d6350dd536..66c4b6fed04 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -707,6 +707,7 @@ typedef struct drm_radeon_setparam {
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
+#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
/* 1.14: Clients can allocate/free a surface
*/
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 8b105f1460a..54f49ef4bef 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -95,9 +95,11 @@
* 1.24- Add general-purpose packet for manipulating scratch registers (r300)
* 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
* new packet type)
+ * 1.26- Add support for variable size PCI(E) gart aperture
+ * 1.27- Add support for IGP GART
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 25
+#define DRIVER_MINOR 27
#define DRIVER_PATCHLEVEL 0
/*
@@ -143,6 +145,7 @@ enum radeon_chip_flags {
RADEON_IS_PCIE = 0x00200000UL,
RADEON_NEW_MEMMAP = 0x00400000UL,
RADEON_IS_PCI = 0x00800000UL,
+ RADEON_IS_IGPGART = 0x01000000UL,
};
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
@@ -240,7 +243,6 @@ typedef struct drm_radeon_private {
int do_boxes;
int page_flipping;
- int current_page;
u32 color_fmt;
unsigned int front_offset;
@@ -280,6 +282,7 @@ typedef struct drm_radeon_private {
struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
unsigned long pcigart_offset;
+ unsigned int pcigart_offset_set;
drm_ati_pcigart_info gart_info;
u32 scratch_ages[5];
@@ -432,6 +435,15 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
#define RADEON_PCIE_TX_GART_END_LO 0x16
#define RADEON_PCIE_TX_GART_END_HI 0x17
+#define RADEON_IGPGART_INDEX 0x168
+#define RADEON_IGPGART_DATA 0x16c
+#define RADEON_IGPGART_UNK_18 0x18
+#define RADEON_IGPGART_CTRL 0x2b
+#define RADEON_IGPGART_BASE_ADDR 0x2c
+#define RADEON_IGPGART_FLUSH 0x2e
+#define RADEON_IGPGART_ENABLE 0x38
+#define RADEON_IGPGART_UNK_39 0x39
+
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
@@ -964,6 +976,14 @@ do { \
RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \
} while (0)
+#define RADEON_WRITE_IGPGART( addr, val ) \
+do { \
+ RADEON_WRITE( RADEON_IGPGART_INDEX, \
+ ((addr) & 0x7f) | (1 << 8)); \
+ RADEON_WRITE( RADEON_IGPGART_DATA, (val) ); \
+ RADEON_WRITE( RADEON_IGPGART_INDEX, 0x7f ); \
+} while (0)
+
#define RADEON_WRITE_PCIE( addr, val ) \
do { \
RADEON_WRITE8( RADEON_PCIE_INDEX, \
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 938eccb78cc..98c5f1d3a8e 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -773,7 +773,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
- if (dev_priv->page_flipping && dev_priv->current_page == 1) {
+ if (dev_priv->sarea_priv->pfCurrentPage == 1) {
OUT_RING(dev_priv->front_pitch_offset);
} else {
OUT_RING(dev_priv->back_pitch_offset);
@@ -861,7 +861,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
dev_priv->stats.clears++;
- if (dev_priv->page_flipping && dev_priv->current_page == 1) {
+ if (dev_priv->sarea_priv->pfCurrentPage == 1) {
unsigned int tmp = flags;
flags &= ~(RADEON_FRONT | RADEON_BACK);
@@ -1382,7 +1382,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
/* Make this work even if front & back are flipped:
*/
OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
- if (dev_priv->current_page == 0) {
+ if (dev_priv->sarea_priv->pfCurrentPage == 0) {
OUT_RING(dev_priv->back_pitch_offset);
OUT_RING(dev_priv->front_pitch_offset);
} else {
@@ -1416,12 +1416,12 @@ static void radeon_cp_dispatch_flip(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle;
- int offset = (dev_priv->current_page == 1)
+ int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
+ DRM_DEBUG("%s: pfCurrentPage=%d\n",
__FUNCTION__,
- dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
+ dev_priv->sarea_priv->pfCurrentPage);
/* Do some trivial performance monitoring...
*/
@@ -1449,8 +1449,8 @@ static void radeon_cp_dispatch_flip(drm_device_t * dev)
* performing the swapbuffer ioctl.
*/
dev_priv->sarea_priv->last_frame++;
- dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
- 1 - dev_priv->current_page;
+ dev_priv->sarea_priv->pfCurrentPage =
+ 1 - dev_priv->sarea_priv->pfCurrentPage;
BEGIN_RING(2);
@@ -2152,24 +2152,10 @@ static int radeon_do_init_pageflip(drm_device_t * dev)
ADVANCE_RING();
dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
- return 0;
-}
-
-/* Called whenever a client dies, from drm_release.
- * NOTE: Lock isn't necessarily held when this is called!
- */
-static int radeon_do_cleanup_pageflip(drm_device_t * dev)
-{
- drm_radeon_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("\n");
-
- if (dev_priv->current_page != 0)
- radeon_cp_dispatch_flip(dev);
+ if (dev_priv->sarea_priv->pfCurrentPage != 1)
+ dev_priv->sarea_priv->pfCurrentPage = 0;
- dev_priv->page_flipping = 0;
return 0;
}
@@ -3145,10 +3131,16 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
break;
case RADEON_SETPARAM_PCIGART_LOCATION:
dev_priv->pcigart_offset = sp.value;
+ dev_priv->pcigart_offset_set = 1;
break;
case RADEON_SETPARAM_NEW_MEMMAP:
dev_priv->new_memmap = sp.value;
break;
+ case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
+ dev_priv->gart_info.table_size = sp.value;
+ if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
+ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", sp.param);
return DRM_ERR(EINVAL);
@@ -3168,9 +3160,7 @@ void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
- radeon_do_cleanup_pageflip(dev);
- }
+ dev_priv->page_flipping = 0;
radeon_mem_release(filp, dev_priv->gart_heap);
radeon_mem_release(filp, dev_priv->fb_heap);
radeon_surfaces_release(filp, dev_priv);
@@ -3179,6 +3169,14 @@ void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
void radeon_driver_lastclose(drm_device_t * dev)
{
+ if (dev->dev_private) {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->sarea_priv &&
+ dev_priv->sarea_priv->pfCurrentPage != 0)
+ radeon_cp_dispatch_flip(dev);
+ }
+
radeon_do_release(dev);
}
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 3d5b3218b6f..690e0af8e7c 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -71,7 +71,7 @@ static struct drm_driver driver = {
.context_dtor = NULL,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
- .reclaim_buffers_locked = sis_reclaim_buffers_locked,
+ .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index c0539c6299c..13a9c5ca459 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -252,7 +252,7 @@ static int via_dma_init(DRM_IOCTL_ARGS)
break;
case VIA_DMA_INITIALIZED:
retcode = (dev_priv->ring.virtual_start != NULL) ?
- 0 : DRM_ERR(EFAULT);
+ 0 : DRM_ERR(EFAULT);
break;
default:
retcode = DRM_ERR(EINVAL);
@@ -432,56 +432,34 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
{
int paused, count;
volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
+ uint32_t reader,ptr;
+ paused = 0;
via_flush_write_combine();
- while (!*(via_get_dma(dev_priv) - 1)) ;
- *dev_priv->last_pause_ptr = pause_addr_lo;
+ (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
+ *paused_at = pause_addr_lo;
via_flush_write_combine();
-
- /*
- * The below statement is inserted to really force the flush.
- * Not sure it is needed.
- */
-
- while (!*dev_priv->last_pause_ptr) ;
+ (void) *paused_at;
+ reader = *(dev_priv->hw_addr_ptr);
+ ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
+ dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
- while (!*dev_priv->last_pause_ptr) ;
- paused = 0;
- count = 20;
-
- while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--) ;
- if ((count <= 8) && (count >= 0)) {
- uint32_t rgtr, ptr;
- rgtr = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)dev_priv->last_pause_ptr -
- dev_priv->dma_ptr) + dev_priv->dma_offset +
- (uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE;
- if (rgtr <= ptr) {
- DRM_ERROR
- ("Command regulator\npaused at count %d, address %x, "
- "while current pause address is %x.\n"
- "Please mail this message to "
- "<unichrome-devel@lists.sourceforge.net>\n", count,
- rgtr, ptr);
- }
+ if ((ptr - reader) <= dev_priv->dma_diff ) {
+ count = 10000000;
+ while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
}
if (paused && !no_pci_fire) {
- uint32_t rgtr, ptr;
- uint32_t ptr_low;
+ reader = *(dev_priv->hw_addr_ptr);
+ if ((ptr - reader) == dev_priv->dma_diff) {
- count = 1000000;
- while ((VIA_READ(VIA_REG_STATUS) & VIA_CMD_RGTR_BUSY)
- && count--) ;
+ /*
+ * There is a concern that these writes may stall the PCI bus
+ * if the GPU is not idle. However, idling the GPU first
+ * doesn't make a difference.
+ */
- rgtr = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
-
- ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ?
- ptr - 3 * CMDBUF_ALIGNMENT_SIZE : 0;
- if (rgtr <= ptr && rgtr >= ptr_low) {
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
@@ -494,6 +472,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
static int via_wait_idle(drm_via_private_t * dev_priv)
{
int count = 10000000;
+
+ while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
+
while (count-- && (VIA_READ(VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY))) ;
@@ -537,6 +518,9 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
uint32_t end_addr, end_addr_lo;
uint32_t command;
uint32_t agp_base;
+ uint32_t ptr;
+ uint32_t reader;
+ int count;
dev_priv->dma_low = 0;
@@ -554,7 +538,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
&pause_addr_hi, &pause_addr_lo, 1) - 1;
via_flush_write_combine();
- while (!*dev_priv->last_pause_ptr) ;
+ (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, command);
@@ -566,6 +550,24 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
DRM_WRITEMEMORYBARRIER();
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
VIA_READ(VIA_REG_TRANSPACE);
+
+ dev_priv->dma_diff = 0;
+
+ count = 10000000;
+ while (!(VIA_READ(0x41c) & 0x80000000) && count--);
+
+ reader = *(dev_priv->hw_addr_ptr);
+ ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
+ dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+ /*
+ * This is the difference between where we tell the
+ * command reader to pause and where it actually pauses.
+ * This differs between hw implementation so we need to
+ * detect it.
+ */
+
+ dev_priv->dma_diff = ptr - reader;
}
static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
@@ -592,7 +594,6 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t jump_addr_lo, jump_addr_hi;
volatile uint32_t *last_pause_ptr;
- uint32_t dma_low_save1, dma_low_save2;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
@@ -619,31 +620,11 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
- dma_low_save1 = dev_priv->dma_low;
-
- /*
- * Now, set a trap that will pause the regulator if it tries to rerun the old
- * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
- * and reissues the jump command over PCI, while the regulator has already taken the jump
- * and actually paused at the current buffer end).
- * There appears to be no other way to detect this condition, since the hw_addr_pointer
- * does not seem to get updated immediately when a jump occurs.
- */
- last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0) - 1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0);
- *last_pause_ptr = pause_addr_lo;
-
- dma_low_save2 = dev_priv->dma_low;
- dev_priv->dma_low = dma_low_save1;
- via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
- dev_priv->dma_low = dma_low_save2;
- via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+ via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
}
+
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
{
via_cmdbuf_jump(dev_priv);
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index bb9dde8b191..2d4957ab256 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -52,7 +52,8 @@ static struct drm_driver driver = {
.dma_quiescent = via_driver_dma_quiescent,
.dri_library_name = dri_library_name,
.reclaim_buffers = drm_core_reclaim_buffers,
- .reclaim_buffers_locked = via_reclaim_buffers_locked,
+ .reclaim_buffers_locked = NULL,
+ .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 8b8778d4a42..b46ca8e6306 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -29,11 +29,11 @@
#define DRIVER_NAME "via"
#define DRIVER_DESC "VIA Unichrome / Pro"
-#define DRIVER_DATE "20061227"
+#define DRIVER_DATE "20070202"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 11
-#define DRIVER_PATCHLEVEL 0
+#define DRIVER_PATCHLEVEL 1
#include "via_verifier.h"
@@ -93,6 +93,7 @@ typedef struct drm_via_private {
unsigned long vram_offset;
unsigned long agp_offset;
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
+ uint32_t dma_diff;
} drm_via_private_t;
enum via_family {
diff --git a/drivers/char/drm/via_mm.h b/drivers/char/drm/via_mm.h
deleted file mode 100644
index d57efda57c7..00000000000
--- a/drivers/char/drm/via_mm.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _via_drm_mm_h_
-#define _via_drm_mm_h_
-
-typedef struct {
- unsigned int context;
- unsigned int size;
- unsigned long offset;
- unsigned long free;
-} drm_via_mm_t;
-
-typedef struct {
- unsigned int size;
- unsigned long handle;
- void *virtual;
-} drm_via_dma_t;
-
-#endif