diff options
author | Alan Hourihane <alanh@fairlite.demon.co.uk> | 2007-06-29 20:09:44 +0100 |
---|---|---|
committer | Alan Hourihane <alanh@fairlite.demon.co.uk> | 2007-06-29 20:09:44 +0100 |
commit | 8a78dead291ffdb5a8774419cdca369a1e27cad9 (patch) | |
tree | 399b8378e32036bdc7dadc10c639465a8fb21681 /shared-core | |
parent | e79e2a58161d44754fd55507e155b7e12a09c4d2 (diff) | |
parent | a27af4c4a665864df09123f177ca7269e48f6171 (diff) |
Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drm into modesetting-101
Conflicts:
linux-core/drm_drv.c
linux-core/drm_fops.c
linux-core/drm_objects.h
linux-core/drm_stub.c
shared-core/i915_dma.c
Diffstat (limited to 'shared-core')
36 files changed, 2446 insertions, 827 deletions
diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index ad67dda2..126974d0 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -98,13 +98,20 @@ 0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26" 0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP" 0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP" +0x1002 0x5954 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G" 0x1002 0x5955 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5955" +0x1002 0x5974 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS482 XPRESS 200" +0x1002 0x5975 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS485 XPRESS 1100 IGP" 0x1002 0x5960 CHIP_RV280 "ATI Radeon RV280 9250" 0x1002 0x5961 CHIP_RV280 "ATI Radeon RV280 9200" 0x1002 0x5962 CHIP_RV280 "ATI Radeon RV280 9200" 0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE" 0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI" 0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50" +0x1002 0x5a41 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200" +0x1002 0x5a42 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200M" +0x1002 0x5a61 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200" +0x1002 0x5a62 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M" 0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE" 0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro" 0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550" @@ -204,6 +211,8 @@ 0x1039 0x6300 0 "SiS 630" 0x1039 0x6330 SIS_CHIP_315 "SiS 661" 0x1039 0x7300 0 "SiS 730" +0x18CA 0x0040 SIS_CHIP_315 "Volari V3XT/V5/V8" +0x18CA 0x0042 SIS_CHIP_315 "Volari Unknown" [tdfx] 0x121a 0x0003 0 "3dfx Voodoo Banshee" @@ -275,11 +284,16 @@ 0x8086 0x2592 CHIP_I9XX|CHIP_I915 "Intel i915GM" 0x8086 0x2772 CHIP_I9XX|CHIP_I915 "Intel i945G" 0x8086 0x27A2 CHIP_I9XX|CHIP_I915 "Intel i945GM" +0x8086 0x27AE CHIP_I9XX|CHIP_I915 "Intel i945GME" 0x8086 0x2972 CHIP_I9XX|CHIP_I965 "Intel i946GZ" 0x8086 0x2982 CHIP_I9XX|CHIP_I965 "Intel i965G" 0x8086 0x2992 CHIP_I9XX|CHIP_I965 "Intel i965Q" 0x8086 0x29A2 CHIP_I9XX|CHIP_I965 "Intel i965G" 0x8086 0x2A02 CHIP_I9XX|CHIP_I965 "Intel i965GM" +0x8086 0x2A12 CHIP_I9XX|CHIP_I965 "Intel i965GME/GLE" +0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33" +0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35" +0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33" [imagine] 0x105d 0x2309 IMAGINE_128 "Imagine 128" @@ -582,6 +596,9 @@ 0x10de 0x018d NV_17 "GeForce4 448 Go" 0x10de 0x0191 NV_50 "GeForce 8800 GTX" 0x10de 0x0193 NV_50 "GeForce 8800 GTS" +0x10de 0x0194 NV_50 "GeForce 8800 Ultra" +0x10de 0x019d NV_50 "Quadro FX 5600" +0x10de 0x019e NV_50 "Quadro FX 4600" 0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics" 0x10de 0x01d1 NV_44 "GeForce 7300 LE" 0x10de 0x01d6 NV_44 "GeForce Go 7200" @@ -688,6 +705,11 @@ 0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405" 0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400" 0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420" +0x10de 0x0400 NV_50 "GeForce 8600 GTS" +0x10de 0x0402 NV_50 "GeForce 8600 GT" +0x10de 0x0421 NV_50 "GeForce 8500 GT" +0x10de 0x0422 NV_50 "GeForce 8400 GS" +0x10de 0x0423 NV_50 "GeForce 8300 GS" 0x12d2 0x0008 NV_03 "NV1" 0x12d2 0x0009 NV_03 "DAC64" 0x12d2 0x0018 NV_03 "Riva128" diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index a5e51de4..c95f1857 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -78,19 +78,15 @@ void i915_kernel_lost_context(drm_device_t * dev) dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } -int i915_dma_cleanup(drm_device_t * dev) +static int i915_dma_cleanup(drm_device_t * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ - I915_WRITE(LP_RING + RING_LEN, 0); - if (dev->irq) drm_irq_uninstall(dev); - return 0; } @@ -98,6 +94,8 @@ static int i915_initialize(drm_device_t * dev, drm_i915_private_t * dev_priv, drm_i915_init_t * init) { + memset(dev_priv, 0, sizeof(drm_i915_private_t)); + dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); @@ -106,11 +104,13 @@ static int i915_initialize(drm_device_t * dev, return DRM_ERR(EINVAL); } - memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size); - - I915_WRITE(LP_RING + RING_START, dev_priv->ring.Start); - I915_WRITE(LP_RING + RING_LEN, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) | (RING_NO_REPORT | RING_VALID)); - + dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); + if (!dev_priv->mmio_map) { + dev->dev_private = (void *)dev_priv; + i915_dma_cleanup(dev); + DRM_ERROR("can not find mmio map!\n"); + return DRM_ERR(EINVAL); + } dev_priv->cpp = init->cpp; dev_priv->sarea_priv->pf_current_page = 0; @@ -128,24 +128,6 @@ static int i915_initialize(drm_device_t * dev, */ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; - /* Program Hardware Status Page */ - dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, - 0xffffffff); - - if (!dev_priv->status_page_dmah) { - dev->dev_private = (void *)dev_priv; - i915_dma_cleanup(dev); - DRM_ERROR("Can not allocate hardware status page\n"); - return DRM_ERR(ENOMEM); - } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); - - I915_WRITE(0x02080, dev_priv->dma_status_page); - DRM_DEBUG("Enabled hardware status page\n"); dev->dev_private = (void *)dev_priv; return 0; } @@ -156,8 +138,15 @@ static int i915_dma_resume(drm_device_t * dev) DRM_DEBUG("%s\n", __FUNCTION__); - I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG("Enabled hardware status page\n"); + if (!dev_priv->sarea) { + DRM_ERROR("can not find sarea!\n"); + return DRM_ERR(EINVAL); + } + + if (!dev_priv->mmio_map) { + DRM_ERROR("can not find mmio map!\n"); + return DRM_ERR(EINVAL); + } return 0; } @@ -353,10 +342,12 @@ void i915_emit_breadcrumb(drm_device_t *dev) drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; + if (++dev_priv->counter > BREADCRUMB_MASK) { + dev_priv->counter = 1; + DRM_DEBUG("Breadcrumb counter wrapped around\n"); + } - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; BEGIN_LP_RING(4); OUT_RING(CMD_STORE_DWORD_IDX); @@ -391,7 +382,9 @@ int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) static int i915_dispatch_cmdbuffer(drm_device_t * dev, drm_i915_cmdbuffer_t * cmd) { +#ifdef I915_HAVE_FENCE drm_i915_private_t *dev_priv = dev->dev_private; +#endif int nbox = cmd->num_cliprects; int i = 0, count, ret; @@ -776,7 +769,7 @@ static int i915_mmio(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); e = &mmio_table[mmio.reg]; - base = dev_priv->mmio_map->handle + e->offset; + base = (u8 *) dev_priv->mmio_map->handle + e->offset; switch (mmio.read_write) { case I915_MMIO_READ: @@ -802,6 +795,47 @@ static int i915_mmio(DRM_IOCTL_ARGS) return 0; } +static int i915_set_status_page(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_hws_addr_t hws; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, + sizeof(hws)); + printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr); + + dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12); + + dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr; + dev_priv->hws_map.size = 4*1024; + dev_priv->hws_map.type = 0; + dev_priv->hws_map.flags = 0; + dev_priv->hws_map.mtrr = 0; + + drm_core_ioremap(&dev_priv->hws_map, dev); + if (dev_priv->hws_map.handle == NULL) { + dev->dev_private = (void *)dev_priv; + i915_dma_cleanup(dev); + dev_priv->status_gfx_addr = 0; + DRM_ERROR("can not ioremap virtual address for" + " G33 hw status page\n"); + return DRM_ERR(ENOMEM); + } + dev_priv->hw_status_page = dev_priv->hws_map.handle; + + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + I915_WRITE(I915REG_HWS_PGA, dev_priv->status_gfx_addr); + DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n", + dev_priv->status_gfx_addr); + DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); + return 0; +} + drm_ioctl_desc_t i915_ioctls[] = { [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, @@ -820,6 +854,7 @@ drm_ioctl_desc_t i915_ioctls[] = { [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH }, [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH}, [DRM_IOCTL_NR(DRM_I915_MMIO)] = {i915_mmio, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH}, }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 1f32313e..1c6ff4d3 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -159,6 +159,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GET_VBLANK_PIPE 0x0e #define DRM_I915_VBLANK_SWAP 0x0f #define DRM_I915_MMIO 0x10 +#define DRM_I915_HWS_ADDR 0x11 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -314,4 +315,8 @@ typedef struct drm_i915_mmio { void __user *data; } drm_i915_mmio_t; +typedef struct drm_i915_hws_addr { + uint64_t addr; +} drm_i915_hws_addr_t; + #endif /* _I915_DRM_H_ */ diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index b0aa5df1..35451feb 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -103,6 +103,8 @@ typedef struct drm_i915_private { void *hw_status_page; dma_addr_t dma_status_page; uint32_t counter; + unsigned int status_gfx_addr; + drm_local_map_t hws_map; unsigned int cpp; int use_mi_batchbuffer_start; @@ -526,6 +528,9 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define BREADCRUMB_BITS 31 +#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) + #define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) @@ -1012,12 +1017,18 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ (dev)->pci_device == 0x2982 || \ (dev)->pci_device == 0x2992 || \ - (dev)->pci_device == 0x29A2) + (dev)->pci_device == 0x29A2 || \ + (dev)->pci_device == 0x2A02 || \ + (dev)->pci_device == 0x2A12) #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ IS_I945GM(dev) || IS_I965G(dev)) +#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ + (dev)->pci_device == 0x29B2 || \ + (dev)->pci_device == 0x29D2) + #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ IS_I945GM(dev)) diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c index 7516b2ce..83219e49 100644 --- a/shared-core/i915_init.c +++ b/shared-core/i915_init.c @@ -228,22 +228,23 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags) dev_priv->allow_batchbuffer = 1; /* Program Hardware Status Page */ - dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, - 0xffffffff); - - if (!dev_priv->status_page_dmah) { - dev->dev_private = (void *)dev_priv; - i915_dma_cleanup(dev); - DRM_ERROR("Can not allocate hardware status page\n"); - return DRM_ERR(ENOMEM); + if (!IS_G33(dev)) { + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); + + if (!dev_priv->status_page_dmah) { + dev->dev_private = (void *)dev_priv; + i915_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return DRM_ERR(ENOMEM); + } + dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + + I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page); } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); - - I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); intel_modeset_init(dev); diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 2d4df76e..98f51b2b 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -381,13 +381,6 @@ void i915_user_irq_off(drm_i915_private_t *dev_priv) spin_unlock(&dev_priv->user_irq_lock); } -static int wait_compare(struct drm_device *dev, void *priv) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int irq_nr = (u64)priv; - - return (READ_BREADCRUMB(dev_priv) >= irq_nr); -} static int i915_wait_irq(drm_device_t * dev, int irq_nr) { @@ -403,8 +396,8 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr) dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; i915_user_irq_on(dev_priv); - ret = drm_wait_on(dev, &dev_priv->irq_queue, 3 * DRM_HZ, wait_compare, - (void *)(u64)irq_nr); + DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, + READ_BREADCRUMB(dev_priv) >= irq_nr); i915_user_irq_off(dev_priv); if (ret == DRM_ERR(EBUSY)) { @@ -735,11 +728,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED; - INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); - dev_priv->swaps_pending = 0; - - dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&dev_priv->swaps_lock); INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); dev_priv->swaps_pending = 0; diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index 78448282..d48313c7 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -551,7 +551,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, drm_map_list_t *_entry; unsigned long agp_token = 0; - list_for_each_entry(_entry, &dev->maplist->head, head) { + list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == dev->agp_buffer_map) agp_token = _entry->user_token; } diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index 1e7322e0..0758991a 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -25,9 +25,12 @@ #ifndef __NOUVEAU_DRM_H__ #define __NOUVEAU_DRM_H__ -#define NOUVEAU_DRM_HEADER_PATCHLEVEL 6 +#define NOUVEAU_DRM_HEADER_PATCHLEVEL 7 typedef struct drm_nouveau_fifo_alloc { + uint32_t fb_ctxdma_handle; + uint32_t tt_ctxdma_handle; + int channel; uint32_t put_base; /* FIFO control regs */ @@ -36,29 +39,30 @@ typedef struct drm_nouveau_fifo_alloc { /* DMA command buffer */ drm_handle_t cmdbuf; int cmdbuf_size; + /* Notifier memory */ + drm_handle_t notifier; + int notifier_size; } drm_nouveau_fifo_alloc_t; -typedef struct drm_nouveau_object_init { +typedef struct drm_nouveau_grobj_alloc { int channel; uint32_t handle; int class; } -drm_nouveau_object_init_t; +drm_nouveau_grobj_alloc_t; #define NOUVEAU_MEM_ACCESS_RO 1 #define NOUVEAU_MEM_ACCESS_WO 2 #define NOUVEAU_MEM_ACCESS_RW 3 -typedef struct drm_nouveau_dma_object_init { +typedef struct drm_nouveau_notifier_alloc { int channel; uint32_t handle; - int class; - int access; - int target; + int count; + uint32_t offset; - int size; } -drm_nouveau_dma_object_init_t; +drm_nouveau_notifier_alloc_t; #define NOUVEAU_MEM_FB 0x00000001 #define NOUVEAU_MEM_AGP 0x00000002 @@ -68,7 +72,7 @@ drm_nouveau_dma_object_init_t; #define NOUVEAU_MEM_USER_BACKED 0x00000020 #define NOUVEAU_MEM_MAPPED 0x00000040 #define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */ - +#define NOUVEAU_MEM_NOTIFIER 0x00000100 /* internal */ typedef struct drm_nouveau_mem_alloc { int flags; int alignment; @@ -141,8 +145,8 @@ typedef struct drm_nouveau_sarea { drm_nouveau_sarea_t; #define DRM_NOUVEAU_FIFO_ALLOC 0x00 -#define DRM_NOUVEAU_OBJECT_INIT 0x01 -#define DRM_NOUVEAU_DMA_OBJECT_INIT 0x02 +#define DRM_NOUVEAU_GROBJ_ALLOC 0x01 +#define DRM_NOUVEAU_NOTIFIER_ALLOC 0x02 #define DRM_NOUVEAU_MEM_ALLOC 0x03 #define DRM_NOUVEAU_MEM_FREE 0x04 #define DRM_NOUVEAU_GETPARAM 0x05 diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index debee8e4..3cca07fc 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -34,7 +34,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 6 +#define DRIVER_PATCHLEVEL 7 #define NOUVEAU_FAMILY 0x0000FFFF #define NOUVEAU_FLAGS 0xFFFF0000 @@ -64,7 +64,6 @@ struct nouveau_object int channel; struct mem_block *instance; - uint32_t ht_loc; uint32_t handle; int class; @@ -83,6 +82,11 @@ struct nouveau_fifo /* dma object for the command buffer itself */ struct mem_block *cmdbuf_mem; struct nouveau_object *cmdbuf_obj; + uint32_t pushbuf_base; + /* notifier memory */ + struct mem_block *notifier_block; + struct mem_block *notifier_heap; + drm_local_map_t *notifier_map; /* PGRAPH context, for cards that keep it in RAMIN */ struct mem_block *ramin_grctx; /* objects belonging to this fifo */ @@ -99,32 +103,42 @@ struct nouveau_config { } cmdbuf; }; -struct nouveau_engine_func { +typedef struct nouveau_engine_func { struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Mc; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } mc; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Timer; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } timer; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Fb; + int (*init)(drm_device_t *dev); + void (*takedown)(drm_device_t *dev); + } fb; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Graph; + int (*init)(drm_device_t *); + void (*takedown)(drm_device_t *); + + int (*create_context)(drm_device_t *, int channel); + void (*destroy_context)(drm_device_t *, int channel); + int (*load_context)(drm_device_t *, int channel); + int (*save_context)(drm_device_t *, int channel); + } graph; struct { - int (*Init)(drm_device_t *dev); - void (*Takedown)(drm_device_t *dev); - } Fifo; -}; + int (*init)(drm_device_t *); + void (*takedown)(drm_device_t *); + + int (*create_context)(drm_device_t *, int channel); + void (*destroy_context)(drm_device_t *, int channel); + int (*load_context)(drm_device_t *, int channel); + int (*save_context)(drm_device_t *, int channel); + } fifo; +} nouveau_engine_func_t; typedef struct drm_nouveau_private { /* the card type, takes NV_* as values */ @@ -186,6 +200,12 @@ extern void nouveau_wait_for_idle(struct drm_device *dev); extern int nouveau_ioctl_card_init(DRM_IOCTL_ARGS); /* nouveau_mem.c */ +extern int nouveau_mem_init_heap(struct mem_block **, + uint64_t start, uint64_t size); +extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, + uint64_t size, int align2, + DRMFILE); +extern void nouveau_mem_free_block(struct mem_block *); extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap); extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS); @@ -199,11 +219,13 @@ extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev, uint32_t size, uint32_t align); extern void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block); -extern uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index); -extern void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index, - uint32_t val); + +/* nouveau_notifier.c */ +extern int nouveau_notifier_init_channel(drm_device_t *, int channel, DRMFILE); +extern void nouveau_notifier_takedown_channel(drm_device_t *, int channel); +extern int nouveau_notifier_alloc(drm_device_t *, int channel, + uint32_t handle, int cout, uint32_t *offset); +extern int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS); /* nouveau_fifo.c */ extern int nouveau_fifo_init(drm_device_t *dev); @@ -214,7 +236,13 @@ extern int nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel); extern void nouveau_fifo_free(drm_device_t *dev, int channel); /* nouveau_object.c */ +extern int nouveau_object_init_channel(drm_device_t *, int channel, + uint32_t vram_handle, + uint32_t tt_handle); +extern void nouveau_object_takedown_channel(drm_device_t *dev, int channel); extern void nouveau_object_cleanup(drm_device_t *dev, int channel); +extern int nouveau_ramht_insert(drm_device_t *, int channel, + uint32_t handle, struct nouveau_object *); extern struct nouveau_object * nouveau_object_gr_create(drm_device_t *dev, int channel, int class); extern struct nouveau_object * @@ -222,8 +250,7 @@ nouveau_object_dma_create(drm_device_t *dev, int channel, int class, uint32_t offset, uint32_t size, int access, int target); extern void nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj); -extern int nouveau_ioctl_object_init(DRM_IOCTL_ARGS); -extern int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem); /* nouveau_irq.c */ @@ -244,35 +271,82 @@ extern void nv10_fb_takedown(drm_device_t *dev); extern int nv40_fb_init(drm_device_t *dev); extern void nv40_fb_takedown(drm_device_t *dev); +/* nv04_fifo.c */ +extern int nv04_fifo_create_context(drm_device_t *dev, int channel); +extern void nv04_fifo_destroy_context(drm_device_t *dev, int channel); +extern int nv04_fifo_load_context(drm_device_t *dev, int channel); +extern int nv04_fifo_save_context(drm_device_t *dev, int channel); + +/* nv10_fifo.c */ +extern int nv10_fifo_create_context(drm_device_t *dev, int channel); +extern void nv10_fifo_destroy_context(drm_device_t *dev, int channel); +extern int nv10_fifo_load_context(drm_device_t *dev, int channel); +extern int nv10_fifo_save_context(drm_device_t *dev, int channel); + +/* nv40_fifo.c */ +extern int nv40_fifo_create_context(drm_device_t *, int channel); +extern void nv40_fifo_destroy_context(drm_device_t *, int channel); +extern int nv40_fifo_load_context(drm_device_t *, int channel); +extern int nv40_fifo_save_context(drm_device_t *, int channel); + +/* nv50_fifo.c */ +extern int nv50_fifo_init(drm_device_t *); +extern void nv50_fifo_takedown(drm_device_t *); +extern int nv50_fifo_create_context(drm_device_t *, int channel); +extern void nv50_fifo_destroy_context(drm_device_t *, int channel); +extern int nv50_fifo_load_context(drm_device_t *, int channel); +extern int nv50_fifo_save_context(drm_device_t *, int channel); + /* nv04_graph.c */ extern void nouveau_nv04_context_switch(drm_device_t *dev); -extern int nv04_graph_init(drm_device_t *dev); +extern int nv04_graph_init(drm_device_t *dev); extern void nv04_graph_takedown(drm_device_t *dev); -extern int nv04_graph_context_create(drm_device_t *dev, int channel); +extern int nv04_graph_create_context(drm_device_t *dev, int channel); +extern void nv04_graph_destroy_context(drm_device_t *dev, int channel); +extern int nv04_graph_load_context(drm_device_t *dev, int channel); +extern int nv04_graph_save_context(drm_device_t *dev, int channel); /* nv10_graph.c */ extern void nouveau_nv10_context_switch(drm_device_t *dev); -extern int nv10_graph_init(drm_device_t *dev); +extern int nv10_graph_init(drm_device_t *dev); extern void nv10_graph_takedown(drm_device_t *dev); -extern int nv10_graph_context_create(drm_device_t *dev, int channel); +extern int nv10_graph_create_context(drm_device_t *dev, int channel); +extern void nv10_graph_destroy_context(drm_device_t *dev, int channel); +extern int nv10_graph_load_context(drm_device_t *dev, int channel); +extern int nv10_graph_save_context(drm_device_t *dev, int channel); /* nv20_graph.c */ extern void nouveau_nv20_context_switch(drm_device_t *dev); -extern int nv20_graph_init(drm_device_t *dev); +extern int nv20_graph_init(drm_device_t *dev); extern void nv20_graph_takedown(drm_device_t *dev); -extern int nv20_graph_context_create(drm_device_t *dev, int channel); +extern int nv20_graph_create_context(drm_device_t *dev, int channel); +extern void nv20_graph_destroy_context(drm_device_t *dev, int channel); +extern int nv20_graph_load_context(drm_device_t *dev, int channel); +extern int nv20_graph_save_context(drm_device_t *dev, int channel); /* nv30_graph.c */ -extern int nv30_graph_init(drm_device_t *dev); +extern int nv30_graph_init(drm_device_t *dev); extern void nv30_graph_takedown(drm_device_t *dev); -extern int nv30_graph_context_create(drm_device_t *dev, int channel); +extern int nv30_graph_create_context(drm_device_t *, int channel); +extern void nv30_graph_destroy_context(drm_device_t *, int channel); +extern int nv30_graph_load_context(drm_device_t *, int channel); +extern int nv30_graph_save_context(drm_device_t *, int channel); /* nv40_graph.c */ -extern int nv40_graph_init(drm_device_t *dev); -extern void nv40_graph_takedown(drm_device_t *dev); -extern int nv40_graph_context_create(drm_device_t *dev, int channel); -extern void nv40_graph_context_save_current(drm_device_t *dev); -extern void nv40_graph_context_restore(drm_device_t *dev, int channel); +extern int nv40_graph_init(drm_device_t *); +extern void nv40_graph_takedown(drm_device_t *); +extern int nv40_graph_create_context(drm_device_t *, int channel); +extern void nv40_graph_destroy_context(drm_device_t *, int channel); +extern int nv40_graph_load_context(drm_device_t *, int channel); +extern int nv40_graph_save_context(drm_device_t *, int channel); + +/* nv50_graph.c */ +extern int nv50_graph_init(drm_device_t *); +extern void nv50_graph_takedown(drm_device_t *); +extern int nv50_graph_create_context(drm_device_t *, int channel); +extern void nv50_graph_destroy_context(drm_device_t *, int channel); +extern int nv50_graph_load_context(drm_device_t *, int channel); +extern int nv50_graph_save_context(drm_device_t *, int channel); /* nv04_mc.c */ extern int nv04_mc_init(drm_device_t *dev); @@ -282,6 +356,10 @@ extern void nv04_mc_takedown(drm_device_t *dev); extern int nv40_mc_init(drm_device_t *dev); extern void nv40_mc_takedown(drm_device_t *dev); +/* nv50_mc.c */ +extern int nv50_mc_init(drm_device_t *dev); +extern void nv50_mc_takedown(drm_device_t *dev); + /* nv04_timer.c */ extern int nv04_timer_init(drm_device_t *dev); extern void nv04_timer_takedown(drm_device_t *dev); @@ -297,8 +375,17 @@ extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, #define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) #endif -#define INSTANCE_WR(mem,ofs,val) nouveau_instmem_w32(dev_priv,(mem),(ofs),(val)) -#define INSTANCE_RD(mem,ofs) nouveau_instmem_r32(dev_priv,(mem),(ofs)) +/* PRAMIN access */ +#if defined(__powerpc__) +#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o)) +#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v)) +#else +#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o)) +#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v)) +#endif + +#define INSTANCE_RD(o,i) NV_RI32((o)->start + ((i)<<2)) +#define INSTANCE_WR(o,i,v) NV_WI32((o)->start + ((i)<<2), (v)) #endif /* __NOUVEAU_DRV_H__ */ diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 8e66ca2e..81dbfcda 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -83,6 +83,8 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev) case NV_50: case NV_40: NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); + if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b)) + NV_WRITE(0x2230,0x00000001); break; case NV_44: NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | @@ -232,235 +234,20 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) return DRM_ERR(ENOMEM); } + dev_priv->fifos[channel].pushbuf_base = 0; dev_priv->fifos[channel].cmdbuf_mem = cb; dev_priv->fifos[channel].cmdbuf_obj = cb_dma; return 0; } -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val)) -static void nouveau_nv04_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *cb_obj; - uint32_t fifoctx, ctx_size = 32; - int i; - - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; - - fifoctx=NV_RAMIN+dev_priv->ramfc_offset+channel*ctx_size; - - // clear the fifo context - for(i=0;i<ctx_size/4;i++) - NV_WRITE(fifoctx+4*i,0x0); - - RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, cb_obj->instance)); - - RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); -} -#undef RAMFC_WR - -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val)) -static void nouveau_nv10_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *cb_obj; - uint32_t fifoctx; - int ctx_size = nouveau_fifo_ctx_size(dev); - int i; - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*ctx_size; - - for (i=0;i<ctx_size;i+=4) - NV_WRITE(fifoctx + i, 0); - - /* Fill entries that are seen filled in dumps of nvidia driver just - * after channel's is put into DMA mode - */ - - RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, - cb_obj->instance)); - - RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); -} - -static void nouveau_nv30_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - struct nouveau_object *cb_obj; - uint32_t fifoctx, grctx_inst, cb_inst, ctx_size = 64; - int i; - - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; - cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel * ctx_size; - - for (i = 0; i < ctx_size; i += 4) - NV_WRITE(fifoctx + i, 0); - - RAMFC_WR(REF_CNT, NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); - RAMFC_WR(DMA_INSTANCE, cb_inst); - RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); - RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); - - RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE)); - RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); - RAMFC_WR(ACQUIRE_VALUE, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); - RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); - RAMFC_WR(ACQUIRE_TIMEOUT, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); - RAMFC_WR(SEMAPHORE, NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); -} - -#if 0 -static void nouveau_nv10_context_save(drm_device_t *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; - int channel; - - channel = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; - - RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); - RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); - RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); - RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); - RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); - RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); - RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); - RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); - RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); - RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); - RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); - RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); - RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV10_PFIFO_CACHE1_DMA_SUBROUTINE)); -} -#endif -#undef RAMFC_WR - -#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val)) -static void nouveau_nv40_context_init(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t fifoctx, cb_inst, grctx_inst; - int i; - - cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; - for (i=0;i<128;i+=4) - NV_WRITE(fifoctx + i, 0); - - /* Fill entries that are seen filled in dumps of nvidia driver just - * after channel's is put into DMA mode - */ - RAMFC_WR(DMA_INSTANCE , cb_inst); - RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x30000000 /* no idea.. */); - RAMFC_WR(GRCTX_INSTANCE, grctx_inst); - RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); -} - -static void nouveau_nv40_context_save(drm_device_t *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t fifoctx; - int channel; - - channel = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1); - fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; - - RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); - RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); - RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); - RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); - RAMFC_WR(DMA_DCOUNT , NV_READ(NV10_PFIFO_CACHE1_DMA_DCOUNT)); - RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); - RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); - RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); - RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); - RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); - RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); - RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); - RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); - RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); - RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); - RAMFC_WR(DMA_TIMESLICE , NV_READ(NV04_PFIFO_DMA_TIMESLICE) & 0x1FFFF); - RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); -} -#undef RAMFC_WR - -/* This function should load values from RAMFC into PFIFO, but for now - * it just clobbers PFIFO with what nouveau_fifo_alloc used to setup - * unconditionally. - */ -static void -nouveau_fifo_context_restore(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t cb_inst; - - cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); - - // FIXME check if we need to refill the time quota with something like NV_WRITE(0x204C, 0x0003FFFF); - - if (dev_priv->card_type >= NV_40) - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00010000|channel); - else - NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000100|channel); - - NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0 /*RAMFC_DMA_PUT*/); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0 /*RAMFC_DMA_GET*/); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, cb_inst); - NV_WRITE(NV04_PFIFO_SIZE , 0x0000FFFF); - NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF); - - NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000); - NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); - NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000); - - NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0x00000000); -} - /* allocates and initializes a fifo for user space consumption */ -static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) +int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp, + uint32_t vram_handle, uint32_t tt_handle) { int ret; drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *cb_obj; + nouveau_engine_func_t *engine = &dev_priv->Engine; + struct nouveau_fifo *chan; int channel; /* @@ -478,14 +265,16 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) if (channel==nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); (*chan_ret) = channel; + chan = &dev_priv->fifos[channel]; DRM_INFO("Allocating FIFO number %d\n", channel); /* that fifo is used */ - dev_priv->fifos[channel].used = 1; - dev_priv->fifos[channel].filp = filp; + chan->used = 1; + chan->filp = filp; + /* FIFO has no objects yet */ - dev_priv->fifos[channel].objs = NULL; + chan->objs = NULL; /* allocate a command buffer, and create a dma object for the gpu */ ret = nouveau_fifo_cmdbuf_alloc(dev, channel); @@ -493,7 +282,20 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) nouveau_fifo_free(dev, channel); return ret; } - cb_obj = dev_priv->fifos[channel].cmdbuf_obj; + + /* Setup channel's default objects */ + ret = nouveau_object_init_channel(dev, channel, vram_handle, tt_handle); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } + + /* Allocate space for per-channel fixed notifier memory */ + ret = nouveau_notifier_init_channel(dev, channel, filp); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } nouveau_wait_for_idle(dev); @@ -503,73 +305,54 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); + /* Create a graphics context for new channel */ + ret = engine->graph.create_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } + /* Construct inital RAMFC for new channel */ - switch(dev_priv->card_type) - { - case NV_04: - case NV_05: - nv04_graph_context_create(dev, channel); - nouveau_nv04_context_init(dev, channel); - break; - case NV_10: - case NV_17: - nv10_graph_context_create(dev, channel); - nouveau_nv10_context_init(dev, channel); - break; - case NV_20: - ret = nv20_graph_context_create(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } - nouveau_nv10_context_init(dev, channel); - break; - case NV_30: - ret = nv30_graph_context_create(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } - nouveau_nv30_context_init(dev, channel); - break; - case NV_40: - case NV_44: - case NV_50: - ret = nv40_graph_context_create(dev, channel); - if (ret) { - nouveau_fifo_free(dev, channel); - return ret; - } - nouveau_nv40_context_init(dev, channel); - break; + ret = engine->fifo.create_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; } /* enable the fifo dma operation */ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel)); /* setup channel's default get/put values */ - NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), 0); - NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), 0); + NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base); + NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base); /* If this is the first channel, setup PFIFO ourselves. For any * other case, the GPU will handle this when it switches contexts. */ if (dev_priv->fifo_alloc_count == 0) { - nouveau_fifo_context_restore(dev, channel); - if (dev_priv->card_type >= NV_30) { - struct nouveau_fifo *chan; - uint32_t inst; - - chan = &dev_priv->fifos[channel]; - inst = nouveau_chip_instance_get(dev, - chan->ramin_grctx); - - /* see comments in nv40_graph_context_restore() */ - NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, inst); - if (dev_priv->card_type >= NV_40) { - NV_WRITE(0x40032C, inst | 0x01000000); - NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); - } + ret = engine->fifo.load_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } + + ret = engine->graph.load_context(dev, channel); + if (ret) { + nouveau_fifo_free(dev, channel); + return ret; + } + + /* Temporary hack, to avoid breaking Xv on cards where the + * initial context value for 0x400710 doesn't have these bits + * set. Proper fix would be to find which object+method is + * responsible for modifying this state. + */ + if (dev_priv->chipset >= 0x10) { + uint32_t tmp; + tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; + NV_WRITE(NV10_PGRAPH_SURFACE, tmp); + tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; + NV_WRITE(NV10_PGRAPH_SURFACE, tmp); } } @@ -591,9 +374,8 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp) void nouveau_fifo_free(drm_device_t* dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - int i; - int ctx_size = nouveau_fifo_ctx_size(dev); chan->used = 0; DRM_INFO("%s: freeing fifo %d\n", __func__, channel); @@ -604,25 +386,10 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel)); // FIXME XXX needs more code - /* Clean RAMFC */ - for (i=0;i<ctx_size;i+=4) { - DRM_DEBUG("RAMFC +%02x: 0x%08x\n", i, NV_READ(NV_RAMIN + - dev_priv->ramfc_offset + - channel*ctx_size + i)); - NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + - channel*ctx_size + i, 0); - } + engine->fifo.destroy_context(dev, channel); /* Cleanup PGRAPH state */ - if (dev_priv->card_type >= NV_40) - nouveau_instmem_free(dev, chan->ramin_grctx); - else if (dev_priv->card_type >= NV_30) { - } - else if (dev_priv->card_type >= NV_20) { - /* clear ctx table */ - INSTANCE_WR(dev_priv->ctx_table, channel, 0); - nouveau_instmem_free(dev, chan->ramin_grctx); - } + engine->graph.destroy_context(dev, channel); /* reenable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); @@ -631,6 +398,8 @@ void nouveau_fifo_free(drm_device_t* dev, int channel) if (chan->cmdbuf_mem) nouveau_mem_free(dev, chan->cmdbuf_mem); + nouveau_notifier_takedown_channel(dev, channel); + /* Destroy objects belonging to the channel */ nouveau_object_cleanup(dev, channel); @@ -669,31 +438,42 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan; drm_nouveau_fifo_alloc_t init; int res; DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, sizeof(init)); - res = nouveau_fifo_alloc(dev, &init.channel, filp); + res = nouveau_fifo_alloc(dev, &init.channel, filp, + init.fb_ctxdma_handle, + init.tt_ctxdma_handle); if (res) return res; + chan = &dev_priv->fifos[init.channel]; - /* this should probably disappear in the next abi break? */ - init.put_base = 0; + init.put_base = chan->pushbuf_base; /* make the fifo available to user space */ /* first, the fifo control regs */ init.ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init.channel); init.ctrl_size = NV03_FIFO_REGS_SIZE; res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS, - 0, &dev_priv->fifos[init.channel].regs); + 0, &chan->regs); if (res != 0) return res; /* pass back FIFO map info to the caller */ - init.cmdbuf = dev_priv->fifos[init.channel].cmdbuf_mem->start; - init.cmdbuf_size = dev_priv->fifos[init.channel].cmdbuf_mem->size; + init.cmdbuf = chan->cmdbuf_mem->start; + init.cmdbuf_size = chan->cmdbuf_mem->size; + + /* and the notifier block */ + init.notifier = chan->notifier_block->start; + init.notifier_size = chan->notifier_block->size; + res = drm_addmap(dev, init.notifier, init.notifier_size, _DRM_REGISTERS, + 0, &chan->notifier_map); + if (res != 0) + return res; DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, init, sizeof(init)); @@ -706,8 +486,8 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) drm_ioctl_desc_t nouveau_ioctls[] = { [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_OBJECT_INIT)] = {nouveau_ioctl_object_init, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_NOUVEAU_DMA_OBJECT_INIT)] = {nouveau_ioctl_dma_object_init, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH}, [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH}, diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index a5343b99..d8ae52b7 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -77,8 +77,8 @@ out: return p; } -static struct mem_block *alloc_block(struct mem_block *heap, uint64_t size, - int align2, DRMFILE filp) +struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, + int align2, DRMFILE filp) { struct mem_block *p; uint64_t mask = (1 << align2) - 1; @@ -106,7 +106,7 @@ static struct mem_block *find_block(struct mem_block *heap, uint64_t start) return NULL; } -static void free_block(struct mem_block *p) +void nouveau_mem_free_block(struct mem_block *p) { p->filp = NULL; @@ -132,7 +132,8 @@ static void free_block(struct mem_block *p) /* Initialize. How to check for an uninitialized heap? */ -static int init_heap(struct mem_block **heap, uint64_t start, uint64_t size) +int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, + uint64_t size) { struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); @@ -331,7 +332,9 @@ int nouveau_mem_init(struct drm_device *dev) goto no_agp; } - if (init_heap(&dev_priv->agp_heap, info.aperture_base, info.aperture_size)) + if (nouveau_mem_init_heap(&dev_priv->agp_heap, + info.aperture_base, + info.aperture_size)) goto no_agp; dev_priv->agp_phys = info.aperture_base; @@ -357,12 +360,19 @@ no_agp: if (fb_size>256*1024*1024) { /* On cards with > 256Mb, you can't map everything. * So we create a second FB heap for that type of memory */ - if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), 256*1024*1024)) + if (nouveau_mem_init_heap(&dev_priv->fb_heap, + drm_get_resource_start(dev,1), + 256*1024*1024)) return DRM_ERR(ENOMEM); - if (init_heap(&dev_priv->fb_nomap_heap, drm_get_resource_start(dev,1)+256*1024*1024, fb_size-256*1024*1024)) + if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, + drm_get_resource_start(dev,1) + + 256*1024*1024, + fb_size-256*1024*1024)) return DRM_ERR(ENOMEM); } else { - if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), fb_size)) + if (nouveau_mem_init_heap(&dev_priv->fb_heap, + drm_get_resource_start(dev,1), + fb_size)) return DRM_ERR(ENOMEM); dev_priv->fb_nomap_heap=NULL; } @@ -397,21 +407,25 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6 if (flags&NOUVEAU_MEM_AGP) { type=NOUVEAU_MEM_AGP; - block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->agp_heap, size, + alignment, filp); if (block) goto alloc_ok; } if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) { type=NOUVEAU_MEM_FB; if (!(flags&NOUVEAU_MEM_MAPPED)) { - block = alloc_block(dev_priv->fb_nomap_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap, + size, alignment, filp); if (block) goto alloc_ok; } - block = alloc_block(dev_priv->fb_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->fb_heap, size, + alignment, filp); if (block) goto alloc_ok; } if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) { type=NOUVEAU_MEM_AGP; - block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + block = nouveau_mem_alloc_block(dev_priv->agp_heap, size, + alignment, filp); if (block) goto alloc_ok; } @@ -432,7 +446,7 @@ alloc_ok: ret = drm_addmap(dev, block->start, block->size, _DRM_FRAME_BUFFER, 0, &block->map); if (ret) { - free_block(block); + nouveau_mem_free_block(block); return NULL; } } @@ -446,7 +460,7 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) DRM_INFO("freeing 0x%llx\n", block->start); if (block->flags&NOUVEAU_MEM_MAPPED) drm_rmmap(dev, block->map); - free_block(block); + nouveau_mem_free_block(block); } static void @@ -475,13 +489,8 @@ nouveau_instmem_determine_amount(struct drm_device *dev) DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10); /* Clear all of it, except the BIOS image that's in the first 64KiB */ - if (dev_priv->ramin) { - for (i=(64*1024); i<dev_priv->ramin_size; i+=4) - DRM_WRITE32(dev_priv->ramin, i, 0x00000000); - } else { - for (i=(64*1024); i<dev_priv->ramin_size; i+=4) - DRM_WRITE32(dev_priv->mmio, NV_RAMIN + i, 0x00000000); - } + for (i=(64*1024); i<dev_priv->ramin_size; i+=4) + NV_WI32(i, 0x00000000); } static void @@ -549,8 +558,8 @@ int nouveau_instmem_init(struct drm_device *dev) * the space that was reserved for RAMHT/FC/RO. */ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; - ret = init_heap(&dev_priv->ramin_heap, - offset, dev_priv->ramin_size - offset); + ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, + offset, dev_priv->ramin_size - offset); if (ret) { dev_priv->ramin_heap = NULL; DRM_ERROR("Failed to init RAMIN heap\n"); @@ -570,7 +579,8 @@ struct mem_block *nouveau_instmem_alloc(struct drm_device *dev, return NULL; } - block = alloc_block(dev_priv->ramin_heap, size, align, (DRMFILE)-2); + block = nouveau_mem_alloc_block(dev_priv->ramin_heap, size, align, + (DRMFILE)-2); if (block) { block->flags = NOUVEAU_MEM_INSTANCE; DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n", @@ -583,39 +593,7 @@ struct mem_block *nouveau_instmem_alloc(struct drm_device *dev, void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block) { if (dev && block) { - free_block(block); - } -} - -uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index) -{ - uint32_t ofs = (uint32_t)mem->start + (index<<2); - - if (dev_priv->ramin) { -#if defined(__powerpc__) - return in_be32((void __iomem *)(dev_priv->ramin)->handle + ofs); -#else - return DRM_READ32(dev_priv->ramin, ofs); -#endif - } else { - return NV_READ(NV_RAMIN+ofs); - } -} - -void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, - struct mem_block *mem, int index, uint32_t val) -{ - uint32_t ofs = (uint32_t)mem->start + (index<<2); - - if (dev_priv->ramin) { -#if defined(__powerpc__) - out_be32((void __iomem *)(dev_priv->ramin)->handle + ofs, val); -#else - DRM_WRITE32(dev_priv->ramin, ofs, val); -#endif - } else { - NV_WRITE(NV_RAMIN+ofs, val); + nouveau_mem_free_block(block); } } diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c new file mode 100644 index 00000000..0cfe733e --- /dev/null +++ b/shared-core/nouveau_notifier.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +int +nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + int flags, ret; + + /*TODO: PCI notifier blocks */ + if (dev_priv->agp_heap) + flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; + else + flags = NOUVEAU_MEM_FB; + + chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,filp); + if (!chan->notifier_block) + return DRM_ERR(ENOMEM); + + ret = nouveau_mem_init_heap(&chan->notifier_heap, + 0, chan->notifier_block->size); + if (ret) + return ret; + + return 0; +} + +void +nouveau_notifier_takedown_channel(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + + if (chan->notifier_block) { + nouveau_mem_free(dev, chan->notifier_block); + chan->notifier_block = NULL; + } + + /*XXX: heap destroy */ +} + +int +nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle, + int count, uint32_t *b_offset) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_object *obj; + struct mem_block *mem; + uint32_t offset; + int target; + + if (!chan->notifier_heap) { + DRM_ERROR("Channel %d doesn't have a notifier heap!\n", + channel); + return DRM_ERR(EINVAL); + } + + mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, chan->filp); + if (!mem) { + DRM_ERROR("Channel %d notifier block full\n", channel); + return DRM_ERR(ENOMEM); + } + mem->flags = NOUVEAU_MEM_NOTIFIER; + + offset = chan->notifier_block->start + mem->start; + if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { + offset -= drm_get_resource_start(dev, 1); + target = NV_DMA_TARGET_VIDMEM; + } else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { + offset -= dev_priv->agp_phys; + target = NV_DMA_TARGET_AGP; + } else { + DRM_ERROR("Bad DMA target, flags 0x%08x!\n", + chan->notifier_block->flags); + return DRM_ERR(EINVAL); + } + + obj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + offset, mem->size, NV_DMA_ACCESS_RW, + target); + if (!obj) { + nouveau_mem_free_block(mem); + DRM_ERROR("Error creating notifier ctxdma\n"); + return DRM_ERR(ENOMEM); + } + + obj->handle = handle; + if (nouveau_ramht_insert(dev, channel, handle, obj)) { + nouveau_object_free(dev, obj); + nouveau_mem_free_block(mem); + DRM_ERROR("Error inserting notifier ctxdma into RAMHT\n"); + return DRM_ERR(ENOMEM); + } + + *b_offset = mem->start; + return 0; +} + +int +nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_notifier_alloc_t na; + int ret; + + DRM_COPY_FROM_USER_IOCTL(na, (drm_nouveau_notifier_alloc_t __user*)data, + sizeof(na)); + + if (!nouveau_fifo_owner(dev, filp, na.channel)) { + DRM_ERROR("pid %d doesn't own channel %d\n", + DRM_CURRENTPID, na.channel); + return DRM_ERR(EPERM); + } + + ret = nouveau_notifier_alloc(dev, na.channel, na.handle, + na.count, &na.offset); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((drm_nouveau_notifier_alloc_t __user*)data, + na, sizeof(na)); + return 0; +} + diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index ace7c2aa..dac08df4 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -139,7 +139,7 @@ nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) is given as: */ static uint32_t -nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) +nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) { drm_nouveau_private_t *dev_priv=dev->dev_private; uint32_t hash = 0; @@ -154,62 +154,89 @@ nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) } static int -nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, - struct nouveau_object *obj) +nouveau_ramht_entry_valid(drm_device_t *dev, uint32_t ramht, uint32_t offset) { drm_nouveau_private_t *dev_priv=dev->dev_private; - int ht_base = NV_RAMIN + dev_priv->ramht_offset; - int ht_end = ht_base + dev_priv->ramht_size; - int o_ofs, ofs; - - obj->handle = handle; - o_ofs = ofs = nouveau_ht_handle_hash(dev, channel, obj->handle); - - while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) { - ofs += 8; - if (ofs == ht_end) ofs = ht_base; - if (ofs == o_ofs) { - DRM_ERROR("no free hash table entries\n"); - return 1; - } + uint32_t ctx = NV_RI32(ramht + offset + 4); + + if (dev_priv->card_type < NV_40) + return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); + return (ctx != 0); +} + +int +nouveau_ramht_insert(drm_device_t* dev, int channel, uint32_t handle, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + uint32_t ramht = dev_priv->ramht_offset; + uint32_t ctx, co, ho; + uint32_t inst; + + inst = nouveau_chip_instance_get(dev, obj->instance); + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | inst | + (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = inst | + (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + ctx = inst | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } - ofs += ht_base; - - DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n", - channel, obj->handle, ofs); - - NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle); - if (dev_priv->card_type >= NV_40) - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); - else - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - NV_RAMHT_CONTEXT_VALID | - (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); - - obj->ht_loc = ofs; - return 0; + + co = ho = nouveau_ramht_hash_handle(dev, channel, handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { + DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + channel, co, handle, ctx); + NV_WI32(ramht + co + 0, handle); + NV_WI32(ramht + co + 4, ctx); + obj->handle = handle; + return 0; + } + DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", + channel, co, NV_RI32(ramht + co)); + + co += 8; + if (co == dev_priv->ramht_size) + co = 0; + } while (co != ho); + + DRM_ERROR("RAMHT space exhausted. ch=%d\n", channel); + return DRM_ERR(ENOMEM); } -static void nouveau_hash_table_remove(drm_device_t* dev, - struct nouveau_object *obj) +static void +nouveau_ramht_remove(drm_device_t* dev, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t ramht = dev_priv->ramht_offset; + uint32_t co, ho; + + co = ho = nouveau_ramht_hash_handle(dev, obj->channel, obj->handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && + (obj->handle == NV_RI32(ramht + co))) { + DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + obj->channel, co, obj->handle, + NV_RI32(ramht + co + 4)); + NV_WI32(ramht + co + 0, 0x00000000); + NV_WI32(ramht + co + 4, 0x00000000); + obj->handle = ~0; + return; + } - DRM_DEBUG("Remove handle 0x%08x at 0x%08x from HT\n", - obj->handle, obj->ht_loc); - if (obj->ht_loc) { - DRM_DEBUG("... HT entry was: 0x%08x/0x%08x\n", - NV_READ(obj->ht_loc), NV_READ(obj->ht_loc+4)); - NV_WRITE(obj->ht_loc , 0x00000000); - NV_WRITE(obj->ht_loc+4, 0x00000000); - } + co += 8; + if (co == dev_priv->ramht_size) + co = 0; + } while (co != ho); + + DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", + obj->channel, obj->handle); } static struct nouveau_object * @@ -457,119 +484,74 @@ nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) { nouveau_object_instance_free(dev, obj); if (obj->handle != ~0) - nouveau_hash_table_remove(dev, obj); + nouveau_ramht_remove(dev, obj); drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); } -void nouveau_object_cleanup(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - - while (dev_priv->fifos[channel].objs) { - nouveau_object_free(dev, dev_priv->fifos[channel].objs); - } -} - -int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) +int +nouveau_object_init_channel(drm_device_t *dev, int channel, + uint32_t vram_handle, + uint32_t tt_handle) { - DRM_DEVICE; - drm_nouveau_object_init_t init; - struct nouveau_object *obj; - - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *) - data, sizeof(init)); - - if (!nouveau_fifo_owner(dev, filp, init.channel)) { - DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, init.channel); - return DRM_ERR(EINVAL); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *gpuobj; + int ret; + + /* VRAM ctxdma */ + gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM); + if (!gpuobj) { + DRM_ERROR("Error creating VRAM ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); } - //FIXME: check args, only allow trusted objects to be created - - if (nouveau_object_handle_find(dev, init.channel, init.handle)) { - DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - init.channel, init.handle); - return DRM_ERR(EINVAL); + ret = nouveau_ramht_insert(dev, channel, vram_handle, gpuobj); + if (ret) { + DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); + return ret; } - obj = nouveau_object_gr_create(dev, init.channel, init.class); - if (!obj) + /* non-AGP unimplemented */ + if (dev_priv->agp_heap == NULL) + return 0; + + /* GART ctxdma */ + gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP); + if (!gpuobj) { + DRM_ERROR("Error creating TT ctxdma: %d\n", DRM_ERR(ENOMEM)); return DRM_ERR(ENOMEM); + } - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { - nouveau_object_free(dev, obj); - return DRM_ERR(ENOMEM); + ret = nouveau_ramht_insert(dev, channel, tt_handle, gpuobj); + if (ret) { + DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); + return ret; } return 0; } -static int -nouveau_dma_object_check_access(drm_device_t *dev, - drm_nouveau_dma_object_init_t *init) +void nouveau_object_cleanup(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint64_t limit; - - /* Check for known DMA object classes */ - switch (init->class) { - case NV_CLASS_DMA_IN_MEMORY: - case NV_CLASS_DMA_FROM_MEMORY: - case NV_CLASS_DMA_TO_MEMORY: - break; - default: - DRM_ERROR("invalid class = 0x%x\n", init->class); - return DRM_ERR(EPERM); - } - - /* Check access mode, and translate to NV_DMA_ACCESS_* */ - switch (init->access) { - case NOUVEAU_MEM_ACCESS_RO: - init->access = NV_DMA_ACCESS_RO; - break; - case NOUVEAU_MEM_ACCESS_WO: - init->access = NV_DMA_ACCESS_WO; - break; - case NOUVEAU_MEM_ACCESS_RW: - init->access = NV_DMA_ACCESS_RW; - break; - default: - DRM_ERROR("invalid access mode = %d\n", init->access); - return DRM_ERR(EPERM); - } - - /* Check that request is within the allowed limits of "target" */ - switch (init->target) { - case NOUVEAU_MEM_FB: - limit = dev_priv->fb_available_size; - init->target = NV_DMA_TARGET_VIDMEM; - break; - case NOUVEAU_MEM_AGP: - limit = dev_priv->agp_available_size; - init->target = NV_DMA_TARGET_AGP; - break; - default: - DRM_ERROR("invalid target = 0x%x\n", init->target); - return DRM_ERR(EPERM); - } + drm_nouveau_private_t *dev_priv=dev->dev_private; - if ((init->offset > limit) || (init->offset + init->size) > limit) { - DRM_ERROR("access out of allowed range (%d,0x%08x,0x%08x)\n", - init->target, init->offset, init->size); - return DRM_ERR(EPERM); + while (dev_priv->fifos[channel].objs) { + nouveau_object_free(dev, dev_priv->fifos[channel].objs); } - - return 0; } -int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) +int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_dma_object_init_t init; + drm_nouveau_grobj_alloc_t init; struct nouveau_object *obj; - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *) + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *) data, sizeof(init)); if (!nouveau_fifo_owner(dev, filp, init.channel)) { @@ -578,8 +560,7 @@ int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - if (nouveau_dma_object_check_access(dev, &init)) - return DRM_ERR(EPERM); + //FIXME: check args, only allow trusted objects to be created if (nouveau_object_handle_find(dev, init.channel, init.handle)) { DRM_ERROR("Channel %d: handle 0x%08x already exists\n", @@ -587,14 +568,11 @@ int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - obj = nouveau_object_dma_create(dev, init.channel, init.class, - init.offset, init.size, - init.access, init.target); + obj = nouveau_object_gr_create(dev, init.channel, init.class); if (!obj) return DRM_ERR(ENOMEM); - obj->handle = init.handle; - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { + if (nouveau_ramht_insert(dev, init.channel, init.handle, obj)) { nouveau_object_free(dev, obj); return DRM_ERR(ENOMEM); } diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index ea4a2f6b..4c013c53 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -135,6 +135,17 @@ #define NV10_PGRAPH_CTX_CACHE4 0x004001C0 #define NV04_PGRAPH_CTX_CACHE4 0x004001E0 #define NV10_PGRAPH_CTX_CACHE5 0x004001E0 +#define NV40_PGRAPH_CTXCTL_0304 0x00400304 +#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 +#define NV40_PGRAPH_CTXCTL_0310 0x00400310 +#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 +#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 +#define NV40_PGRAPH_CTXCTL_030C 0x0040030c +#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 +#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 +#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 +#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK 0x000FFFFF #define NV03_PGRAPH_ABS_X_RAM 0x00400400 #define NV03_PGRAPH_ABS_Y_RAM 0x00400480 #define NV03_PGRAPH_X_MISC 0x00400500 @@ -230,7 +241,11 @@ #define NV10_PGRAPH_SCALED_FORMAT 0x00400778 #define NV10_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 #define NV10_PGRAPH_CHANNEL_CTX_SIZE 0x00400784 +#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 #define NV10_PGRAPH_CHANNEL_CTX_POINTER 0x00400788 +#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 #define NV04_PGRAPH_PATT_COLOR0 0x00400800 #define NV04_PGRAPH_PATT_COLOR1 0x00400804 #define NV04_PGRAPH_PATTERN 0x00400808 @@ -404,7 +419,7 @@ #define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C #define NV03_PFIFO_CACHE1_GET 0x00003270 #define NV04_PFIFO_CACHE1_ENGINE 0x00003280 -#define NV10_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 +#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 #define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 #define NV40_PFIFO_UNK32E4 0x000032E4 #define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) @@ -427,7 +442,10 @@ #define NV04_RAMFC_DMA_PUT 0x00 #define NV04_RAMFC_DMA_GET 0x04 #define NV04_RAMFC_DMA_INSTANCE 0x08 +#define NV04_RAMFC_DMA_STATE 0x0C #define NV04_RAMFC_DMA_FETCH 0x10 +#define NV04_RAMFC_ENGINE 0x14 +#define NV04_RAMFC_PULL1_ENGINE 0x18 #define NV10_RAMFC_DMA_PUT 0x00 #define NV10_RAMFC_DMA_GET 0x04 @@ -462,6 +480,6 @@ #define NV40_RAMFC_UNK_40 0x40 #define NV40_RAMFC_UNK_44 0x44 #define NV40_RAMFC_UNK_48 0x48 -#define NV40_RAMFC_2088 0x4C -#define NV40_RAMFC_3300 0x50 +#define NV40_RAMFC_UNK_4C 0x4C +#define NV40_RAMFC_UNK_50 0x50 diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index e7930b9e..fa773d28 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -51,6 +51,7 @@ static int nouveau_init_card_mappings(drm_device_t *dev) DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset); /* map larger RAMIN aperture on NV40 cards */ + dev_priv->ramin = NULL; if (dev_priv->card_type >= NV_40) { int ramin_resource = 2; if (drm_get_resource_len(dev, ramin_resource) == 0) @@ -66,12 +67,26 @@ static int nouveau_init_card_mappings(drm_device_t *dev) "limited instance memory available\n"); dev_priv->ramin = NULL; } - } else - dev_priv->ramin = NULL; + } + + /* On older cards (or if the above failed), create a map covering + * the BAR0 PRAMIN aperture */ + if (!dev_priv->ramin) { + ret = drm_addmap(dev, + drm_get_resource_start(dev, 0) + NV_RAMIN, + (1*1024*1024), + _DRM_REGISTERS, _DRM_READ_ONLY, + &dev_priv->ramin); + if (ret) { + DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret); + return ret; + } + } return 0; } +static int nouveau_stub_init(drm_device_t *dev) { return 0; } static void nouveau_stub_takedown(drm_device_t *dev) {} static int nouveau_init_engine_ptrs(drm_device_t *dev) { @@ -80,66 +95,126 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev) switch (dev_priv->chipset & 0xf0) { case 0x00: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv04_fb_init; - engine->Fb.Takedown = nv04_fb_takedown; - engine->Graph.Init = nv04_graph_init; - engine->Graph.Takedown = nv04_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv04_fb_init; + engine->fb.takedown = nv04_fb_takedown; + engine->graph.init = nv04_graph_init; + engine->graph.takedown = nv04_graph_takedown; + engine->graph.create_context = nv04_graph_create_context; + engine->graph.destroy_context = nv04_graph_destroy_context; + engine->graph.load_context = nv04_graph_load_context; + engine->graph.save_context = nv04_graph_save_context; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv04_fifo_create_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv04_fifo_load_context; + engine->fifo.save_context = nv04_fifo_save_context; break; case 0x10: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv10_fb_init; - engine->Fb.Takedown = nv10_fb_takedown; - engine->Graph.Init = nv10_graph_init; - engine->Graph.Takedown = nv10_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.init = nv10_graph_init; + engine->graph.takedown = nv10_graph_takedown; + engine->graph.create_context = nv10_graph_create_context; + engine->graph.destroy_context = nv10_graph_destroy_context; + engine->graph.load_context = nv10_graph_load_context; + engine->graph.save_context = nv10_graph_save_context; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.save_context = nv10_fifo_save_context; break; case 0x20: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv10_fb_init; - engine->Fb.Takedown = nv10_fb_takedown; - engine->Graph.Init = nv20_graph_init; - engine->Graph.Takedown = nv20_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.init = nv20_graph_init; + engine->graph.takedown = nv20_graph_takedown; + engine->graph.create_context = nv20_graph_create_context; + engine->graph.destroy_context = nv20_graph_destroy_context; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.save_context = nv20_graph_save_context; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.save_context = nv10_fifo_save_context; break; case 0x30: - engine->Mc.Init = nv04_mc_init; - engine->Mc.Takedown = nv04_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv10_fb_init; - engine->Fb.Takedown = nv10_fb_takedown; - engine->Graph.Init = nv30_graph_init; - engine->Graph.Takedown = nv30_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.init = nv30_graph_init; + engine->graph.takedown = nv30_graph_takedown; + engine->graph.create_context = nv30_graph_create_context; + engine->graph.destroy_context = nv30_graph_destroy_context; + engine->graph.load_context = nv30_graph_load_context; + engine->graph.save_context = nv30_graph_save_context; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.save_context = nv10_fifo_save_context; break; case 0x40: - engine->Mc.Init = nv40_mc_init; - engine->Mc.Takedown = nv40_mc_takedown; - engine->Timer.Init = nv04_timer_init; - engine->Timer.Takedown = nv04_timer_takedown; - engine->Fb.Init = nv40_fb_init; - engine->Fb.Takedown = nv40_fb_takedown; - engine->Graph.Init = nv40_graph_init; - engine->Graph.Takedown = nv40_graph_takedown; - engine->Fifo.Init = nouveau_fifo_init; - engine->Fifo.Takedown = nouveau_stub_takedown; + engine->mc.init = nv40_mc_init; + engine->mc.takedown = nv40_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv40_fb_init; + engine->fb.takedown = nv40_fb_takedown; + engine->graph.init = nv40_graph_init; + engine->graph.takedown = nv40_graph_takedown; + engine->graph.create_context = nv40_graph_create_context; + engine->graph.destroy_context = nv40_graph_destroy_context; + engine->graph.load_context = nv40_graph_load_context; + engine->graph.save_context = nv40_graph_save_context; + engine->fifo.init = nouveau_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.create_context = nv40_fifo_create_context; + engine->fifo.destroy_context = nv40_fifo_destroy_context; + engine->fifo.load_context = nv40_fifo_load_context; + engine->fifo.save_context = nv40_fifo_save_context; break; case 0x50: + case 0x80: /* gotta love NVIDIA's consistency.. */ + engine->mc.init = nv50_mc_init; + engine->mc.takedown = nv50_mc_takedown; + engine->timer.init = nouveau_stub_init; + engine->timer.takedown = nouveau_stub_takedown; + engine->fb.init = nouveau_stub_init; + engine->fb.takedown = nouveau_stub_takedown; + engine->graph.init = nv50_graph_init; + engine->graph.takedown = nv50_graph_takedown; + engine->graph.create_context = nv50_graph_create_context; + engine->graph.destroy_context = nv50_graph_destroy_context; + engine->graph.load_context = nv50_graph_load_context; + engine->graph.save_context = nv50_graph_save_context; + engine->fifo.init = nv50_fifo_init; + engine->fifo.takedown = nv50_fifo_takedown; + engine->fifo.create_context = nv50_fifo_create_context; + engine->fifo.destroy_context = nv50_fifo_destroy_context; + engine->fifo.load_context = nv50_fifo_load_context; + engine->fifo.save_context = nv50_fifo_save_context; + break; default: DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset); return 1; @@ -184,23 +259,23 @@ static int nouveau_card_init(drm_device_t *dev) /* Parse BIOS tables / Run init tables? */ /* PMC */ - ret = engine->Mc.Init(dev); + ret = engine->mc.init(dev); if (ret) return ret; /* PTIMER */ - ret = engine->Timer.Init(dev); + ret = engine->timer.init(dev); if (ret) return ret; /* PFB */ - ret = engine->Fb.Init(dev); + ret = engine->fb.init(dev); if (ret) return ret; /* PGRAPH */ - ret = engine->Graph.Init(dev); + ret = engine->graph.init(dev); if (ret) return ret; /* PFIFO */ - ret = engine->Fifo.Init(dev); + ret = engine->fifo.init(dev); if (ret) return ret; /* what about PVIDEO/PCRTC/PRAMDAC etc? */ @@ -213,9 +288,9 @@ void nouveau_preclose(drm_device_t * dev, DRMFILE filp) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_fifo_cleanup(dev, filp); nouveau_mem_release(filp,dev_priv->fb_heap); nouveau_mem_release(filp,dev_priv->agp_heap); - nouveau_fifo_cleanup(dev, filp); } /* first module load, setup the mmio/fb mapping */ @@ -235,7 +310,6 @@ int nouveau_firstopen(struct drm_device *dev) int nouveau_load(struct drm_device *dev, unsigned long flags) { drm_nouveau_private_t *dev_priv; - int ret; if (flags==NV_UNKNOWN) return DRM_ERR(EINVAL); diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c new file mode 100644 index 00000000..bfae432e --- /dev/null +++ b/shared-core/nv04_fifo.c @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV04_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_RI32(fifoctx + NV04_RAMFC_##offset) +#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) +#define NV04_RAMFC__SIZE 32 + +int +nv04_fifo_create_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_object *pb = chan->cmdbuf_obj; + uint32_t fifoctx = NV04_RAMFC(channel); + int i; + + if (!pb || !pb->instance) + return DRM_ERR(EINVAL); + + /* Clear RAMFC */ + for (i=0; i<NV04_RAMFC__SIZE; i+=4) + NV_WI32(fifoctx + i, 0); + + /* Setup initial state */ + RAMFC_WR(DMA_PUT, chan->pushbuf_base); + RAMFC_WR(DMA_GET, chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE, nouveau_chip_instance_get(dev, pb->instance)); + RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0)); + return 0; +} + +void +nv04_fifo_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV04_RAMFC(channel); + int i; + + for (i=0; i<NV04_RAMFC__SIZE; i+=4) + NV_WI32(fifoctx + i, 0); +} + +int +nv04_fifo_load_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV04_RAMFC(channel); + uint32_t tmp; + + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel); + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); + + tmp = RAMFC_RD(DMA_INSTANCE); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH)); + NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE)); + NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE)); + + /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv04_fifo_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV04_RAMFC(channel); + uint32_t tmp; + + RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); + RAMFC_WR(DMA_GET, NV04_PFIFO_CACHE1_DMA_GET); + + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; + tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE); + RAMFC_WR(DMA_INSTANCE, tmp); + + RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); + RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); + RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); + + return 0; +} + diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index 0cd4d3b8..1aaae33c 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -336,7 +336,7 @@ void nouveau_nv04_context_switch(drm_device_t *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x1); } -int nv04_graph_context_create(drm_device_t *dev, int channel) { +int nv04_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; DRM_DEBUG("nv04_graph_context_create %d\n", channel); @@ -351,6 +351,21 @@ int nv04_graph_context_create(drm_device_t *dev, int channel) { return 0; } +void nv04_graph_destroy_context(drm_device_t *dev, int channel) +{ +} + +int nv04_graph_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int nv04_graph_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} int nv04_graph_init(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c new file mode 100644 index 00000000..b84971de --- /dev/null +++ b/shared-core/nv10_fifo.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV10_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_RI32(fifoctx + NV10_RAMFC_##offset) +#define NV10_RAMFC(c) (dev_priv->ramfc_offset + NV10_RAMFC__SIZE) +#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) + +int +nv10_fifo_create_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t fifoctx = NV10_RAMFC(channel), pushbuf; + int i; + + pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + + for (i=0; i<NV10_RAMFC__SIZE; i+=4) + NV_WI32(fifoctx + i, 0); + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + RAMFC_WR(DMA_PUT , chan->pushbuf_base); + RAMFC_WR(DMA_GET , chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE , pushbuf); + RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0); + + return 0; +} + +void +nv10_fifo_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV10_RAMFC(channel); + int i; + + for (i=0; i<NV10_RAMFC__SIZE; i+=4) + NV_WI32(fifoctx + i, 0); +} + +int +nv10_fifo_load_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV10_RAMFC(channel); + uint32_t tmp; + + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); + NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); + + tmp = RAMFC_RD(DMA_INSTANCE); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16); + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH)); + NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); + NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); + + if (dev_priv->chipset >= 0x17) { + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE, + RAMFC_RD(ACQUIRE_VALUE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, + RAMFC_RD(ACQUIRE_TIMESTAMP)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, + RAMFC_RD(ACQUIRE_TIMEOUT)); + NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE, + RAMFC_RD(SEMAPHORE)); + NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE, + RAMFC_RD(DMA_SUBROUTINE)); + } + + /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv10_fifo_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV10_RAMFC(channel); + uint32_t tmp; + + RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); + RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); + + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; + tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); + RAMFC_WR(DMA_INSTANCE , tmp); + + RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); + RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); + RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); + + if (dev_priv->chipset >= 0x17) { + RAMFC_WR(ACQUIRE_VALUE, + NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); + RAMFC_WR(ACQUIRE_TIMESTAMP, + NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); + RAMFC_WR(ACQUIRE_TIMEOUT, + NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE, + NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); + RAMFC_WR(DMA_SUBROUTINE, + NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + } + + return 0; +} + diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index fb189709..d1fe0a54 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -611,7 +611,7 @@ void nouveau_nv10_context_switch(drm_device_t *dev) if (offset > 0) \ fifo->pgraph_ctx[offset] = val; \ } while (0) -int nv10_graph_context_create(drm_device_t *dev, int channel) { +int nv10_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *fifo = &dev_priv->fifos[channel]; uint32_t tmp, vramsz; @@ -663,6 +663,21 @@ int nv10_graph_context_create(drm_device_t *dev, int channel) { return 0; } +void nv10_graph_destroy_context(drm_device_t *dev, int channel) +{ +} + +int nv10_graph_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int nv10_graph_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} int nv10_graph_init(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 7190fc84..1b8a6727 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -29,7 +29,7 @@ #define NV20_GRCTX_SIZE (3529*4) -int nv20_graph_context_create(drm_device_t *dev, int channel) { +int nv20_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; @@ -47,10 +47,21 @@ int nv20_graph_context_create(drm_device_t *dev, int channel) { INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */ INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx)); - return 0; } +void nv20_graph_destroy_context(drm_device_t *dev, int channel) { + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + + if (chan->ramin_grctx) { + nouveau_instmem_free(dev, chan->ramin_grctx); + chan->ramin_grctx = NULL; + } + + INSTANCE_WR(dev_priv->ctx_table, channel, 0); +} + static void nv20_graph_rdi(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; @@ -65,40 +76,42 @@ static void nv20_graph_rdi(drm_device_t *dev) { /* Save current context (from PGRAPH) into the channel's context */ -static void nv20_graph_context_save_current(drm_device_t *dev, int channel) { +int nv20_graph_save_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; uint32_t instance; instance = INSTANCE_RD(dev_priv->ctx_table, channel); if (!instance) { - return; + return DRM_ERR(EINVAL); } if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) - DRM_ERROR("nv20_graph_context_save_current : bad instance\n"); + DRM_ERROR("nv20_graph_save_context : bad instance\n"); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 2 /* save ctx */); + return 0; } /* Restore the context for a specific channel into PGRAPH */ -static void nv20_graph_context_restore(drm_device_t *dev, int channel) { +int nv20_graph_load_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; uint32_t instance; instance = INSTANCE_RD(dev_priv->ctx_table, channel); if (!instance) { - return; + return DRM_ERR(EINVAL); } if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) - DRM_ERROR("nv20_graph_context_restore_current : bad instance\n"); + DRM_ERROR("nv20_graph_load_context_current : bad instance\n"); NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */); + return 0; } void nouveau_nv20_context_switch(drm_device_t *dev) @@ -113,13 +126,13 @@ void nouveau_nv20_context_switch(drm_device_t *dev) NV_WRITE(NV04_PGRAPH_FIFO,0x0); - nv20_graph_context_save_current(dev, channel_old); + nv20_graph_save_context(dev, channel_old); nouveau_wait_for_idle(dev); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); - nv20_graph_context_restore(dev, channel); + nv20_graph_load_context(dev, channel); nouveau_wait_for_idle(dev); diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c index f4faadd8..7a87990a 100644 --- a/shared-core/nv30_graph.c +++ b/shared-core/nv30_graph.c @@ -100,7 +100,7 @@ static void nv30_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } -int nv30_graph_context_create(drm_device_t *dev, int channel) +int nv30_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; @@ -132,6 +132,72 @@ int nv30_graph_context_create(drm_device_t *dev, int channel) return 0; } +void nv30_graph_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + + if (chan->ramin_grctx) { + nouveau_instmem_free(dev, chan->ramin_grctx); + chan->ramin_grctx = NULL; + } + + INSTANCE_WR(dev_priv->ctx_table, channel, 0); +} + +static int +nouveau_graph_wait_idle(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int tv = 1000; + + while (tv--) { + if (NV_READ(0x400700) == 0) + break; + } + + if (NV_READ(0x400700)) { + DRM_ERROR("timeout!\n"); + return DRM_ERR(EBUSY); + } + return 0; +} + +int nv30_graph_load_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t inst; + + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, + NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); + + return nouveau_graph_wait_idle(dev); +} + +int nv30_graph_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t inst; + + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, + NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); + + return nouveau_graph_wait_idle(dev); +} + int nv30_graph_init(drm_device_t *dev) { drm_nouveau_private_t *dev_priv = diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c new file mode 100644 index 00000000..6f25349c --- /dev/null +++ b/shared-core/nv40_fifo.c @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV40_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) NV_RI32(fifoctx + NV40_RAMFC_##offset) +#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE)) +#define NV40_RAMFC__SIZE 128 + +int +nv40_fifo_create_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t fifoctx = NV40_RAMFC(channel), grctx, pushbuf; + int i; + + for (i = 0; i < NV40_RAMFC__SIZE; i+=4) + NV_WI32(fifoctx + i, 0); + + grctx = nouveau_chip_instance_get(dev, chan->ramin_grctx); + pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + RAMFC_WR(DMA_PUT , chan->pushbuf_base); + RAMFC_WR(DMA_GET , chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE , pushbuf); + RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0x30000000 /* no idea.. */); + RAMFC_WR(DMA_SUBROUTINE, 0); + RAMFC_WR(GRCTX_INSTANCE, grctx); + RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); + + return 0; +} + +void +nv40_fifo_destroy_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV40_RAMFC(channel); + int i; + + for (i = 0; i < NV40_RAMFC__SIZE; i+=4) + NV_WI32(fifoctx + i, 0); +} + +int +nv40_fifo_load_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV40_RAMFC(channel); + uint32_t tmp, tmp2; + + NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); + NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); + + /* No idea what 0x2058 is.. */ + tmp = RAMFC_RD(DMA_FETCH); + tmp2 = NV_READ(0x2058) & 0xFFF; + tmp2 |= (tmp & 0x30000000); + NV_WRITE(0x2058, tmp2); + tmp &= ~0x30000000; + NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp); + + NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); + NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); + NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); + NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); + NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE)); + NV_WRITE(0x32e4, RAMFC_RD(UNK_40)); + /* NVIDIA does this next line twice... */ + NV_WRITE(0x32e8, RAMFC_RD(UNK_44)); + NV_WRITE(0x2088, RAMFC_RD(UNK_4C)); + NV_WRITE(0x3300, RAMFC_RD(UNK_50)); + + /* not sure what part is PUT, and which is GET.. never seen a non-zero + * value appear in a mmio-trace yet.. + */ +#if 0 + tmp = NV_READ(UNK_84); + NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???); + NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???); +#endif + + /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ + tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; + tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF; + NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); + + /* Set channel active, and in DMA mode */ + NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | channel); + /* Reset DMA_CTL_AT_INFO to INVALID */ + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); + NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv40_fifo_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx = NV40_RAMFC(channel); + uint32_t tmp; + + RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); + RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); + RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); + RAMFC_WR(DMA_DCOUNT , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT)); + RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); + + tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH); + tmp |= NV_READ(0x2058) & 0x30000000; + RAMFC_WR(DMA_FETCH , tmp); + + RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); + RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); + tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); + RAMFC_WR(ACQUIRE_TIMESTAMP, tmp); + RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); + + /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something + * more involved depending on the value of 0x3228? + */ + RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); + + RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); + + /* No idea what the below is for exactly, ripped from a mmio-trace */ + RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); + + /* NVIDIA do this next line twice.. bug? */ + RAMFC_WR(UNK_44 , NV_READ(0x32e8)); + RAMFC_WR(UNK_4C , NV_READ(0x2088)); + RAMFC_WR(UNK_50 , NV_READ(0x3300)); + +#if 0 /* no real idea which is PUT/GET in UNK_48.. */ + tmp = NV_READ(NV04_PFIFO_CACHE1_GET); + tmp |= (NV_READ(NV04_PFIFO_CACHE1_PUT) << 16); + RAMFC_WR(UNK_48 , tmp); +#endif + + return 0; +} + diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 792734ed..6fb575db 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1,7 +1,32 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" -#include "nouveau_drm.h" /* The sizes are taken from the difference between the start of two * grctx addresses while running the nvidia driver. Probably slightly @@ -11,7 +36,9 @@ #define NV40_GRCTX_SIZE (175*1024) #define NV43_GRCTX_SIZE (70*1024) #define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */ +#define NV49_GRCTX_SIZE (164640) #define NV4A_GRCTX_SIZE (64*1024) +#define NV4B_GRCTX_SIZE (164640) #define NV4C_GRCTX_SIZE (25*1024) #define NV4E_GRCTX_SIZE (25*1024) @@ -19,7 +46,8 @@ * contexts are taken from dumps just after the 3D object is * created. */ -static void nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -275,7 +303,8 @@ nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); }; -static void nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -425,7 +454,231 @@ static void nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } -static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv49_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); + INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); + INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); + INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); + INSTANCE_WR(ctx, 0x00218/4, 0x00000040); + INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00220/4, 0x00000040); + INSTANCE_WR(ctx, 0x00228/4, 0x00000040); + INSTANCE_WR(ctx, 0x00234/4, 0x80000000); + INSTANCE_WR(ctx, 0x00238/4, 0x80000000); + INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00240/4, 0x80000000); + INSTANCE_WR(ctx, 0x00244/4, 0x80000000); + INSTANCE_WR(ctx, 0x00248/4, 0x80000000); + INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00250/4, 0x80000000); + INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); + INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); + INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); + INSTANCE_WR(ctx, 0x00428/4, 0x00000008); + INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); + INSTANCE_WR(ctx, 0x00460/4, 0x00000111); + INSTANCE_WR(ctx, 0x00464/4, 0x00000111); + INSTANCE_WR(ctx, 0x00468/4, 0x00000111); + INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00470/4, 0x00000111); + INSTANCE_WR(ctx, 0x00474/4, 0x00000111); + INSTANCE_WR(ctx, 0x00478/4, 0x00000111); + INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00480/4, 0x00000111); + INSTANCE_WR(ctx, 0x00484/4, 0x00000111); + INSTANCE_WR(ctx, 0x00488/4, 0x00000111); + INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00490/4, 0x00000111); + INSTANCE_WR(ctx, 0x00494/4, 0x00000111); + INSTANCE_WR(ctx, 0x00498/4, 0x00000111); + INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); + INSTANCE_WR(ctx, 0x00514/4, 0x00000080); + INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); + INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00530/4, 0x46400000); + INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00544/4, 0x88888888); + INSTANCE_WR(ctx, 0x00548/4, 0x88888888); + INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00550/4, 0x88888888); + INSTANCE_WR(ctx, 0x00554/4, 0x88888888); + INSTANCE_WR(ctx, 0x00558/4, 0x88888888); + INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00560/4, 0x88888888); + INSTANCE_WR(ctx, 0x00564/4, 0x88888888); + INSTANCE_WR(ctx, 0x00568/4, 0x88888888); + INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00570/4, 0x88888888); + INSTANCE_WR(ctx, 0x00574/4, 0x88888888); + INSTANCE_WR(ctx, 0x00578/4, 0x88888888); + INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00580/4, 0x88888888); + INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); + INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); + INSTANCE_WR(ctx, 0x00630/4, 0x70605040); + INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); + INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); + INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); + INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); + for (i=0x00750; i<=0x0078c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00790; i<=0x007cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00810; i<=0x0084c; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x00850; i<=0x0088c; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00890; i<=0x008cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00910; i<=0x0094c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x009a0; i<=0x009ac; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x009b0; i<=0x009bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x009d0; i<=0x009dc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x009f0; i<=0x009fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); + INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); + INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); + INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); + INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); + INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); + INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); + INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); + INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); + INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); + INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); + INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); + for(i=0x030a0; i<=0x03118; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x098a0; i<=0x0ba90; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x0baa0; i<=0x0be90; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x0e2e0; i<=0x0fff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x10008; i<=0x104d0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x104e0; i<=0x108d0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x12d20; i<=0x14f10; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x14f20; i<=0x15310; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x17760; i<=0x19950; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x19960; i<=0x19d50; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x1c1a0; i<=0x1e390; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x1e3a0; i<=0x1e790; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x20be0; i<=0x22dd0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x22de0; i<=0x231d0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +static void +nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -541,8 +794,223 @@ static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } +static void +nv4b_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); + INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); + INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); + INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); + INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); + INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); + INSTANCE_WR(ctx, 0x00218/4, 0x00000040); + INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00220/4, 0x00000040); + INSTANCE_WR(ctx, 0x00228/4, 0x00000040); + INSTANCE_WR(ctx, 0x00234/4, 0x80000000); + INSTANCE_WR(ctx, 0x00238/4, 0x80000000); + INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00240/4, 0x80000000); + INSTANCE_WR(ctx, 0x00244/4, 0x80000000); + INSTANCE_WR(ctx, 0x00248/4, 0x80000000); + INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00250/4, 0x80000000); + INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); + INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); + INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); + INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); + INSTANCE_WR(ctx, 0x00428/4, 0x00000008); + INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); + INSTANCE_WR(ctx, 0x00460/4, 0x00000111); + INSTANCE_WR(ctx, 0x00464/4, 0x00000111); + INSTANCE_WR(ctx, 0x00468/4, 0x00000111); + INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00470/4, 0x00000111); + INSTANCE_WR(ctx, 0x00474/4, 0x00000111); + INSTANCE_WR(ctx, 0x00478/4, 0x00000111); + INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00480/4, 0x00000111); + INSTANCE_WR(ctx, 0x00484/4, 0x00000111); + INSTANCE_WR(ctx, 0x00488/4, 0x00000111); + INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); + INSTANCE_WR(ctx, 0x00490/4, 0x00000111); + INSTANCE_WR(ctx, 0x00494/4, 0x00000111); + INSTANCE_WR(ctx, 0x00498/4, 0x00000111); + INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); + INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); + INSTANCE_WR(ctx, 0x00514/4, 0x00000080); + INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); + INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00530/4, 0x46400000); + INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00544/4, 0x88888888); + INSTANCE_WR(ctx, 0x00548/4, 0x88888888); + INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00550/4, 0x88888888); + INSTANCE_WR(ctx, 0x00554/4, 0x88888888); + INSTANCE_WR(ctx, 0x00558/4, 0x88888888); + INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00560/4, 0x88888888); + INSTANCE_WR(ctx, 0x00564/4, 0x88888888); + INSTANCE_WR(ctx, 0x00568/4, 0x88888888); + INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00570/4, 0x88888888); + INSTANCE_WR(ctx, 0x00574/4, 0x88888888); + INSTANCE_WR(ctx, 0x00578/4, 0x88888888); + INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); + INSTANCE_WR(ctx, 0x00580/4, 0x88888888); + INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); + INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); + INSTANCE_WR(ctx, 0x00630/4, 0x70605040); + INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); + INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); + INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); + INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); + for (i=0x00750; i<=0x0078c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00790; i<=0x007cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00810; i<=0x0084c; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x00850; i<=0x0088c; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00890; i<=0x008cc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00910; i<=0x0094c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x009a0; i<=0x009ac; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x009b0; i<=0x009bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x009d0; i<=0x009dc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x009f0; i<=0x009fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); + INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); + INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); + INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); + INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); + INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); + INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); + INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); + INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); + INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); + INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); + INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); + INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); + for(i=0x030a0; i<=0x03118; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x098a0; i<=0x0ba90; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x0baa0; i<=0x0be90; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x0e2e0; i<=0x0fff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x10008; i<=0x104d0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x104e0; i<=0x108d0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x12d20; i<=0x14f10; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x14f20; i<=0x15310; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for(i=0x17760; i<=0x19950; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for(i=0x19960; i<=0x19d50; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} -static void nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -648,7 +1116,8 @@ static void nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx) INSTANCE_WR(ctx, i/4, 0x3f800000); } -static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +static void +nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) { drm_nouveau_private_t *dev_priv = dev->dev_private; int i; @@ -755,7 +1224,7 @@ static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) } int -nv40_graph_context_create(drm_device_t *dev, int channel) +nv40_graph_create_context(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; @@ -777,10 +1246,18 @@ nv40_graph_context_create(drm_device_t *dev, int channel) ctx_size = NV46_GRCTX_SIZE; ctx_init = nv46_graph_context_init; break; + case 0x49: + ctx_size = NV49_GRCTX_SIZE; + ctx_init = nv49_graph_context_init; + break; case 0x4a: ctx_size = NV4A_GRCTX_SIZE; ctx_init = nv4a_graph_context_init; break; + case 0x4b: + ctx_size = NV4B_GRCTX_SIZE; + ctx_init = nv4b_graph_context_init; + break; case 0x4c: ctx_size = NV4C_GRCTX_SIZE; ctx_init = nv4c_graph_context_init; @@ -808,89 +1285,99 @@ nv40_graph_context_create(drm_device_t *dev, int channel) return 0; } -/* Save current context (from PGRAPH) into the channel's context - *XXX: fails sometimes, not sure why.. - */ void -nv40_graph_context_save_current(drm_device_t *dev) +nv40_graph_destroy_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; - uint32_t instance; - int i; - - NV_WRITE(NV04_PGRAPH_FIFO, 0); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - instance = NV_READ(0x40032C) & 0xFFFFF; - if (!instance) { - NV_WRITE(NV04_PGRAPH_FIFO, 1); - return; + if (chan->ramin_grctx) { + nouveau_instmem_free(dev, chan->ramin_grctx); + chan->ramin_grctx = NULL; } +} + +static int +nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t old_cp, tv = 1000; + int i; - NV_WRITE(0x400784, instance); - NV_WRITE(0x400310, NV_READ(0x400310) | 0x20); - NV_WRITE(0x400304, 1); - /* just in case, we don't want to spin in-kernel forever */ - for (i=0; i<1000; i++) { - if (NV_READ(0x40030C) == 0) + old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV40_PGRAPH_CTXCTL_0310, + save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : + NV40_PGRAPH_CTXCTL_0310_XFER_LOAD); + NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); + + for (i = 0; i < tv; i++) { + if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) break; } - if (i==1000) { - DRM_ERROR("failed to save current grctx to ramin\n"); - DRM_ERROR("instance = 0x%08x\n", NV_READ(0x40032C)); - DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); - NV_WRITE(NV04_PGRAPH_FIFO, 1); - return; + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); + + if (i == tv) { + DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); + DRM_ERROR("0x40030C = 0x%08x\n", + NV_READ(NV40_PGRAPH_CTXCTL_030C)); + return DRM_ERR(EBUSY); } - NV_WRITE(NV04_PGRAPH_FIFO, 1); + return 0; +} + +/* Save current context (from PGRAPH) into the channel's context + *XXX: fails sometimes, not sure why.. + */ +int +nv40_graph_save_context(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t inst; + + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + + return nv40_graph_transfer_context(dev, inst, 1); } /* Restore the context for a specific channel into PGRAPH * XXX: fails sometimes.. not sure why */ -void -nv40_graph_context_restore(drm_device_t *dev, int channel) +int +nv40_graph_load_context(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = - (drm_nouveau_private_t *)dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - uint32_t instance; - int i; + uint32_t inst; + int ret; - instance = nouveau_chip_instance_get(dev, chan->ramin_grctx); - - NV_WRITE(NV04_PGRAPH_FIFO, 0); - NV_WRITE(0x400784, instance); - NV_WRITE(0x400310, NV_READ(0x400310) | 0x40); - NV_WRITE(0x400304, 1); - /* just in case, we don't want to spin in-kernel forever */ - for (i=0; i<1000; i++) { - if (NV_READ(0x40030C) == 0) - break; - } - if (i==1000) { - DRM_ERROR("failed to restore grctx for ch%d to PGRAPH\n", - channel); - DRM_ERROR("instance = 0x%08x\n", instance); - DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); - NV_WRITE(NV04_PGRAPH_FIFO, 1); - return; - } + if (!chan->ramin_grctx) + return DRM_ERR(EINVAL); + inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + ret = nv40_graph_transfer_context(dev, inst, 0); + if (ret) + return ret; /* 0x40032C, no idea of it's exact function. Could simply be a * record of the currently active PGRAPH context. It's currently * unknown as to what bit 24 does. The nv ddx has it set, so we will * set it here too. */ - NV_WRITE(0x40032C, instance | 0x01000000); + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, + (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) | + NV40_PGRAPH_CTXCTL_CUR_LOADED); /* 0x32E0 records the instance address of the active FIFO's PGRAPH * context. If at any time this doesn't match 0x40032C, you will * recieve PGRAPH_INTR_CONTEXT_SWITCH */ - NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, instance); - NV_WRITE(NV04_PGRAPH_FIFO, 1); + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); + return 0; } /* Some voodoo that makes context switching work without the binary driver @@ -1007,6 +1494,39 @@ static uint32_t nv46_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; +//this is used for nv49 and nv4b +static uint32_t nv49_4b_ctx_voodoo[] ={ + 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020, + 0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000, + 0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e, + 0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000, + 0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a, + 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210, + 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280, + 0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, + 0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118, + 0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, + 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, + 0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800, + 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, + 0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, + 0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, + 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a, + 0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88, + 0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f, + 0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280, + 0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68, + 0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e, + 0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, + 0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e, + 0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60, + 0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e, + 0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005, + 0x00700006, 0x0060000e, ~0 +}; + + static uint32_t nv4a_ctx_voodoo[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, @@ -1100,7 +1620,9 @@ nv40_graph_init(drm_device_t *dev) case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; + case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; + case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; default: DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", @@ -1114,15 +1636,15 @@ nv40_graph_init(drm_device_t *dev) DRM_DEBUG("Loading context-switch voodoo\n"); i = 0; - NV_WRITE(0x400324, 0); + NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); while (ctx_voodoo[i] != ~0) { - NV_WRITE(0x400328, ctx_voodoo[i]); + NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_voodoo[i]); i++; } } /* No context present currently */ - NV_WRITE(0x40032C, 0x00000000); + NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c new file mode 100644 index 00000000..e5d37949 --- /dev/null +++ b/shared-core/nv50_fifo.c @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +static void +nv50_fifo_init_reset(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t pmc_e; + + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PFIFO); + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO); +} + +int +nv50_fifo_init(drm_device_t *dev) +{ + nv50_fifo_init_reset(dev); + + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_fifo_takedown(drm_device_t *dev) +{ + DRM_ERROR("stub!\n"); +} + +int +nv50_fifo_create_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_fifo_destroy_context(drm_device_t *dev, int channel) +{ +} + +int +nv50_fifo_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int +nv50_fifo_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c new file mode 100644 index 00000000..8c3e2b9b --- /dev/null +++ b/shared-core/nv50_graph.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +static void +nv50_graph_init_reset(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t pmc_e; + + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PGRAPH); + pmc_e = NV_READ(NV03_PMC_ENABLE); + NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH); +} + +int +nv50_graph_init(drm_device_t *dev) +{ + nv50_graph_init_reset(dev); + + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_graph_takedown(drm_device_t *dev) +{ + DRM_ERROR("stub!\n"); +} + +int +nv50_graph_create_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +void +nv50_graph_destroy_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); +} + +int +nv50_graph_load_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + +int +nv50_graph_save_context(drm_device_t *dev, int channel) +{ + DRM_ERROR("stub!\n"); + return 0; +} + diff --git a/shared-core/nv50_mc.c b/shared-core/nv50_mc.c new file mode 100644 index 00000000..7f7537f0 --- /dev/null +++ b/shared-core/nv50_mc.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +int +nv50_mc_init(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); + return 0; +} + +void nv50_mc_takedown(drm_device_t *dev) +{ +} diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index d3c52d43..0cd5d7e2 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -155,7 +155,8 @@ void r300_init_reg_flags(void) ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2); ADD_RANGE(0x21DC, 1); ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); - ADD_RANGE(0x2220, 4); + ADD_RANGE(R300_VAP_CLIP_X_0, 4); + ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1); ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); diff --git a/shared-core/r300_reg.h b/shared-core/r300_reg.h index 0a31f0b9..e59919be 100644 --- a/shared-core/r300_reg.h +++ b/shared-core/r300_reg.h @@ -116,6 +116,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */ #define R300_VAP_OUTPUT_VTX_FMT_1 0x2094 + /* each of the following is 3 bits wide, specifies number + of components */ # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 @@ -299,6 +301,18 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_221C_NORMAL 0x00000000 # define R300_221C_CLEAR 0x0001C000 +/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first + * plane is per-pixel and the second plane is per-vertex. + * + * This was determined by experimentation alone but I believe it is correct. + * + * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest. + */ +#define R300_VAP_CLIP_X_0 0x2220 +#define R300_VAP_CLIP_X_1 0x2224 +#define R300_VAP_CLIP_Y_0 0x2228 +#define R300_VAP_CLIP_Y_1 0x2230 + /* gap */ /* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between @@ -322,13 +336,15 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. * The meaning of the two UNKNOWN fields is obviously not known. However, * experiments so far have shown that both *must* point to an instruction * inside the vertex program, otherwise the GPU locks up. + * * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and - * CNTL_1_UNKNOWN points to instruction where last write to position takes - * place. + * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to + * position takes place. + * * Most likely this is used to ignore rest of the program in cases * where group of verts arent visible. For some reason this "section" * is sometimes accepted other instruction that have no relationship with - *position calculations. + * position calculations. */ #define R300_VAP_PVS_CNTL_1 0x22D0 # define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 @@ -967,7 +983,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. * first node is stored in NODE_2, the second node is stored in NODE_3. * * Offsets are relative to the master offset from PFS_CNTL_2. - * LAST_NODE is set for the last node, and only for the last node. */ #define R300_PFS_NODE_0 0x4610 #define R300_PFS_NODE_1 0x4614 @@ -981,7 +996,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12) # define R300_PFS_NODE_TEX_END_SHIFT 17 # define R300_PFS_NODE_TEX_END_MASK (31 << 17) -/*# define R300_PFS_NODE_LAST_NODE (1 << 22) */ # define R300_PFS_NODE_OUTPUT_COLOR (1 << 22) # define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23) @@ -1591,6 +1605,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_EB_UNK1_SHIFT 24 # define R300_EB_UNK1 (0x80<<24) # define R300_EB_UNK2 0x0810 +#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400 #define R300_PACKET3_3D_DRAW_INDX_2 0x00003600 /* END: Packet 3 commands */ diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index bde98b30..ec2e688b 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -1190,9 +1190,15 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, /* Set ring buffer size */ #ifdef __BIG_ENDIAN RADEON_WRITE(RADEON_CP_RB_CNTL, - dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT); + RADEON_BUF_SWAP_32BIT | + (dev_priv->ring.fetch_size_l2ow << 18) | + (dev_priv->ring.rptr_update_l2qw << 8) | + dev_priv->ring.size_l2qw); #else - RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw); + RADEON_WRITE(RADEON_CP_RB_CNTL, + (dev_priv->ring.fetch_size_l2ow << 18) | + (dev_priv->ring.rptr_update_l2qw << 8) | + dev_priv->ring.size_l2qw); #endif /* Start with assuming that writeback doesn't work */ @@ -1391,8 +1397,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) DRM_DEBUG("\n"); /* if we require new memory map but we don't have it fail */ - if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) - { + if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); radeon_do_cleanup_cp(dev); return DRM_ERR(EINVAL); @@ -1424,6 +1429,10 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) return DRM_ERR(EINVAL); } + /* Enable vblank on CRTC1 for older X servers + */ + dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; + switch(init->func) { case RADEON_INIT_R200_CP: dev_priv->microcode_version = UCODE_R200; @@ -1663,6 +1672,12 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); + dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; + dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); + + dev_priv->ring.fetch_size = /* init->fetch_size */ 32; + dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); + dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h index e96e7851..6a57b804 100644 --- a/shared-core/radeon_drm.h +++ b/shared-core/radeon_drm.h @@ -655,6 +655,7 @@ typedef struct drm_radeon_indirect { #define RADEON_PARAM_GART_TEX_HANDLE 10 #define RADEON_PARAM_SCRATCH_OFFSET 11 #define RADEON_PARAM_CARD_TYPE 12 +#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ typedef struct drm_radeon_getparam { int param; @@ -709,7 +710,7 @@ typedef struct drm_radeon_setparam { #define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ #define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ - +#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ /* 1.14: Clients can allocate/free a surface */ typedef struct drm_radeon_surface_alloc { @@ -722,5 +723,7 @@ typedef struct drm_radeon_surface_free { unsigned int address; } drm_radeon_surface_free_t; +#define DRM_RADEON_VBLANK_CRTC1 1 +#define DRM_RADEON_VBLANK_CRTC2 2 #endif diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 4a36ea70..92a9b65e 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -97,10 +97,11 @@ * new packet type) * 1.26- Add support for variable size PCI(E) gart aperture * 1.27- Add support for IGP GART + * 1.28- Add support for VBL on CRTC2 */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 27 +#define DRIVER_MINOR 28 #define DRIVER_PATCHLEVEL 0 /* @@ -163,8 +164,14 @@ typedef struct drm_radeon_freelist { typedef struct drm_radeon_ring_buffer { u32 *start; u32 *end; - int size; - int size_l2qw; + int size; /* Double Words */ + int size_l2qw; /* log2 Quad Words */ + + int rptr_update; /* Double Words */ + int rptr_update_l2qw; /* log2 Quad Words */ + + int fetch_size; /* Double Words */ + int fetch_size_l2ow; /* log2 Oct Words */ u32 tail; u32 tail_mask; @@ -279,6 +286,9 @@ typedef struct drm_radeon_private { /* SW interrupt */ wait_queue_head_t swi_queue; atomic_t swi_emitted; + int vblank_crtc; + uint32_t irq_enable_reg; + int irq_enabled; struct radeon_surface surfaces[RADEON_MAX_SURFACES]; struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES]; @@ -355,10 +365,14 @@ extern int radeon_irq_wait(DRM_IOCTL_ARGS); extern void radeon_do_release(drm_device_t * dev); extern int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); +extern int radeon_driver_vblank_wait2(drm_device_t * dev, + unsigned int *sequence); extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); extern void radeon_driver_irq_preinstall(drm_device_t * dev); extern void radeon_driver_irq_postinstall(drm_device_t * dev); extern void radeon_driver_irq_uninstall(drm_device_t * dev); +extern int radeon_vblank_crtc_get(drm_device_t *dev); +extern int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value); extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); extern int radeon_driver_unload(struct drm_device *dev); @@ -495,12 +509,15 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp, #define RADEON_GEN_INT_CNTL 0x0040 # define RADEON_CRTC_VBLANK_MASK (1 << 0) +# define RADEON_CRTC2_VBLANK_MASK (1 << 9) # define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) # define RADEON_SW_INT_ENABLE (1 << 25) #define RADEON_GEN_INT_STATUS 0x0044 # define RADEON_CRTC_VBLANK_STAT (1 << 0) # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) +# define RADEON_CRTC2_VBLANK_STAT (1 << 9) +# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) @@ -601,9 +618,51 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp, # define RADEON_SOFT_RESET_E2 (1 << 5) # define RADEON_SOFT_RESET_RB (1 << 6) # define RADEON_SOFT_RESET_HDP (1 << 7) +/* + * 6:0 Available slots in the FIFO + * 8 Host Interface active + * 9 CP request active + * 10 FIFO request active + * 11 Host Interface retry active + * 12 CP retry active + * 13 FIFO retry active + * 14 FIFO pipeline busy + * 15 Event engine busy + * 16 CP command stream busy + * 17 2D engine busy + * 18 2D portion of render backend busy + * 20 3D setup engine busy + * 26 GA engine busy + * 27 CBA 2D engine busy + * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or + * command stream queue not empty or Ring Buffer not empty + */ #define RADEON_RBBM_STATUS 0x0e40 +/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */ +/* #define RADEON_RBBM_STATUS 0x1740 */ +/* bits 6:0 are dword slots available in the cmd fifo */ # define RADEON_RBBM_FIFOCNT_MASK 0x007f -# define RADEON_RBBM_ACTIVE (1 << 31) +# define RADEON_HIRQ_ON_RBB (1 << 8) +# define RADEON_CPRQ_ON_RBB (1 << 9) +# define RADEON_CFRQ_ON_RBB (1 << 10) +# define RADEON_HIRQ_IN_RTBUF (1 << 11) +# define RADEON_CPRQ_IN_RTBUF (1 << 12) +# define RADEON_CFRQ_IN_RTBUF (1 << 13) +# define RADEON_PIPE_BUSY (1 << 14) +# define RADEON_ENG_EV_BUSY (1 << 15) +# define RADEON_CP_CMDSTRM_BUSY (1 << 16) +# define RADEON_E2_BUSY (1 << 17) +# define RADEON_RB2D_BUSY (1 << 18) +# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */ +# define RADEON_VAP_BUSY (1 << 20) +# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */ +# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */ +# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */ +# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */ +# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */ +# define RADEON_GA_BUSY (1 << 26) +# define RADEON_CBA2D_BUSY (1 << 27) +# define RADEON_RBBM_ACTIVE (1 << 31) #define RADEON_RE_LINE_PATTERN 0x1cd0 #define RADEON_RE_MISC 0x26c4 #define RADEON_RE_TOP_LEFT 0x26c0 diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index 3ff0baa2..a4be86e3 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -73,18 +73,35 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) * outside the DRM */ stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | - RADEON_CRTC_VBLANK_STAT)); + RADEON_CRTC_VBLANK_STAT | + RADEON_CRTC2_VBLANK_STAT)); if (!stat) return IRQ_NONE; + stat &= dev_priv->irq_enable_reg; + /* SW interrupt */ if (stat & RADEON_SW_INT_TEST) { DRM_WAKEUP(&dev_priv->swi_queue); } /* VBLANK interrupt */ - if (stat & RADEON_CRTC_VBLANK_STAT) { - atomic_inc(&dev->vbl_received); + if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) { + int vblank_crtc = dev_priv->vblank_crtc; + + if ((vblank_crtc & + (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) == + (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { + if (stat & RADEON_CRTC_VBLANK_STAT) + atomic_inc(&dev->vbl_received); + if (stat & RADEON_CRTC2_VBLANK_STAT) + atomic_inc(&dev->vbl_received2); + } else if (((stat & RADEON_CRTC_VBLANK_STAT) && + (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) || + ((stat & RADEON_CRTC2_VBLANK_STAT) && + (vblank_crtc & DRM_RADEON_VBLANK_CRTC2))) + atomic_inc(&dev->vbl_received); + DRM_WAKEUP(&dev->vbl_queue); drm_vbl_send_signals(dev); } @@ -127,19 +144,30 @@ static int radeon_wait_irq(drm_device_t * dev, int swi_nr) return ret; } -int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) +int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence, + int crtc) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; unsigned int cur_vblank; int ret = 0; - + int ack = 0; + atomic_t *counter; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } - radeon_acknowledge_irqs(dev_priv, RADEON_CRTC_VBLANK_STAT); + if (crtc == DRM_RADEON_VBLANK_CRTC1) { + counter = &dev->vbl_received; + ack |= RADEON_CRTC_VBLANK_STAT; + } else if (crtc == DRM_RADEON_VBLANK_CRTC2) { + counter = &dev->vbl_received2; + ack |= RADEON_CRTC2_VBLANK_STAT; + } else + return DRM_ERR(EINVAL); + + radeon_acknowledge_irqs(dev_priv, ack); dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; @@ -148,7 +176,7 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) * using vertical blanks... */ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, - (((cur_vblank = atomic_read(&dev->vbl_received)) + (((cur_vblank = atomic_read(counter)) - *sequence) <= (1 << 23))); *sequence = cur_vblank; @@ -156,6 +184,16 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) return ret; } +int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) +{ + return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1); +} + +int radeon_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence) +{ + return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2); +} + /* Needs the lock as it touches the ring. */ int radeon_irq_emit(DRM_IOCTL_ARGS) @@ -204,6 +242,21 @@ int radeon_irq_wait(DRM_IOCTL_ARGS) return radeon_wait_irq(dev, irqwait.irq_seq); } +static void radeon_enable_interrupt(drm_device_t *dev) +{ + drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; + + dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE; + if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1) + dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK; + + if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2) + dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK; + + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); + dev_priv->irq_enabled = 1; +} + /* drm_dma.h hooks */ void radeon_driver_irq_preinstall(drm_device_t * dev) @@ -216,7 +269,8 @@ void radeon_driver_irq_preinstall(drm_device_t * dev) /* Clear bits if they're already high */ radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | - RADEON_CRTC_VBLANK_STAT)); + RADEON_CRTC_VBLANK_STAT | + RADEON_CRTC2_VBLANK_STAT)); } void radeon_driver_irq_postinstall(drm_device_t * dev) @@ -227,9 +281,7 @@ void radeon_driver_irq_postinstall(drm_device_t * dev) atomic_set(&dev_priv->swi_emitted, 0); DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); - /* Turn on SW and VBL ints */ - RADEON_WRITE(RADEON_GEN_INT_CNTL, - RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE); + radeon_enable_interrupt(dev); } void radeon_driver_irq_uninstall(drm_device_t * dev) @@ -239,6 +291,38 @@ void radeon_driver_irq_uninstall(drm_device_t * dev) if (!dev_priv) return; + dev_priv->irq_enabled = 0; + /* Disable *all* interrupts */ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); } + + +int radeon_vblank_crtc_get(drm_device_t *dev) +{ + drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; + u32 flag; + u32 value; + + flag = RADEON_READ(RADEON_GEN_INT_CNTL); + value = 0; + + if (flag & RADEON_CRTC_VBLANK_MASK) + value |= DRM_RADEON_VBLANK_CRTC1; + + if (flag & RADEON_CRTC2_VBLANK_MASK) + value |= DRM_RADEON_VBLANK_CRTC2; + return value; +} + +int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value) +{ + drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; + if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { + DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); + return DRM_ERR(EINVAL); + } + dev_priv->vblank_crtc = (unsigned int)value; + radeon_enable_interrupt(dev); + return 0; +} diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index bad054d7..8ccd0981 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -3131,6 +3131,9 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) else value = RADEON_CARD_PCI; break; + case RADEON_PARAM_VBLANK_CRTC: + value = radeon_vblank_crtc_get(dev); + break; default: DRM_DEBUG( "Invalid parameter %d\n", param.param ); return DRM_ERR(EINVAL); @@ -3192,6 +3195,9 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; break; + case RADEON_SETPARAM_VBLANK_CRTC: + return radeon_vblank_crtc_set(dev, sp.value); + break; default: DRM_DEBUG("Invalid parameter %d\n", sp.param); return DRM_ERR(EINVAL); diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index 006d148c..ec572ad4 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -33,11 +33,11 @@ #define DRIVER_AUTHOR "SIS, Tungsten Graphics" #define DRIVER_NAME "sis" -#define DRIVER_DESC "SIS 300/630/540" -#define DRIVER_DATE "20060619" +#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8" +#define DRIVER_DATE "20070626" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_MINOR 3 +#define DRIVER_PATCHLEVEL 0 enum sis_family { SIS_OTHER = 0, diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index c73634e4..333c4bcf 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -84,9 +84,9 @@ static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv) { uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; - - return ((hw_addr <= dev_priv->dma_low) ? - (dev_priv->dma_low - hw_addr) : + + return ((hw_addr <= dev_priv->dma_low) ? + (dev_priv->dma_low - hw_addr) : (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr)); } @@ -103,7 +103,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size) uint32_t count; hw_addr_ptr = dev_priv->hw_addr_ptr; cur_addr = dev_priv->dma_low; - next_addr = cur_addr + size + 512*1024; + next_addr = cur_addr + size + 512 * 1024; count = 1000000; do { hw_addr = *hw_addr_ptr - agp_base; @@ -207,8 +207,8 @@ static int via_initialize(drm_device_t * dev, dev_priv->dma_offset = init->offset; dev_priv->last_pause_ptr = NULL; dev_priv->hw_addr_ptr = - (volatile uint32_t *)((char *)dev_priv->mmio->handle + - init->reg_pause_addr); + (volatile uint32_t *)((char *)dev_priv->mmio->handle + + init->reg_pause_addr); via_cmdbuf_start(dev_priv); @@ -239,8 +239,8 @@ static int via_dma_init(DRM_IOCTL_ARGS) retcode = via_dma_cleanup(dev); break; case VIA_DMA_INITIALIZED: - retcode = (dev_priv->ring.virtual_start != NULL) ? - 0: DRM_ERR( EFAULT ); + retcode = (dev_priv->ring.virtual_start != NULL) ? + 0 : DRM_ERR(EFAULT); break; default: retcode = DRM_ERR(EINVAL); @@ -268,8 +268,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) if (cmd->size > VIA_PCI_BUF_SIZE) { return DRM_ERR(ENOMEM); - } - + } if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) return DRM_ERR(EFAULT); @@ -292,7 +291,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) } memcpy(vb, dev_priv->pci_buf, cmd->size); - + dev_priv->dma_low += cmd->size; /* @@ -301,7 +300,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) */ if (cmd->size < 0x100) - via_pad_cache(dev_priv,(0x100 - cmd->size) >> 3); + via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3); via_cmdbuf_pause(dev_priv); return 0; @@ -321,7 +320,7 @@ static int via_flush_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - LOCK_TEST_WITH_RETURN( dev, filp ); + LOCK_TEST_WITH_RETURN(dev, filp); return via_driver_dma_quiescent(dev); } @@ -332,7 +331,7 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS) drm_via_cmdbuffer_t cmdbuf; int ret; - LOCK_TEST_WITH_RETURN( dev, filp ); + LOCK_TEST_WITH_RETURN(dev, filp); DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, sizeof(cmdbuf)); @@ -355,16 +354,16 @@ static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, if (cmd->size > VIA_PCI_BUF_SIZE) { return DRM_ERR(ENOMEM); - } + } if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) return DRM_ERR(EFAULT); - - if ((ret = - via_verify_command_stream((uint32_t *)dev_priv->pci_buf, + + if ((ret = + via_verify_command_stream((uint32_t *) dev_priv->pci_buf, cmd->size, dev, 0))) { return ret; } - + ret = via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf, cmd->size); @@ -377,7 +376,7 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) drm_via_cmdbuffer_t cmdbuf; int ret; - LOCK_TEST_WITH_RETURN( dev, filp ); + LOCK_TEST_WITH_RETURN(dev, filp); DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, sizeof(cmdbuf)); @@ -393,7 +392,6 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) return 0; } - static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv, uint32_t * vb, int qw_count) { @@ -403,7 +401,6 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv, return vb; } - /* * This function is used internally by ring buffer mangement code. * @@ -419,8 +416,7 @@ static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv) * modifying the pause address stored in the buffer itself. If * the regulator has already paused, restart it. */ - -static int via_hook_segment(drm_via_private_t *dev_priv, +static int via_hook_segment(drm_via_private_t * dev_priv, uint32_t pause_addr_hi, uint32_t pause_addr_lo, int no_pci_fire) { @@ -479,7 +475,7 @@ static int via_wait_idle(drm_via_private_t * dev_priv) } static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, - uint32_t addr, uint32_t *cmd_addr_hi, + uint32_t addr, uint32_t *cmd_addr_hi, uint32_t *cmd_addr_lo, int skip_wait) { uint32_t agp_base; @@ -508,9 +504,6 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, return vb; } - - - static void via_cmdbuf_start(drm_via_private_t * dev_priv) { uint32_t pause_addr_lo, pause_addr_hi; diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index b5a1d33a..4b844af0 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -255,7 +255,6 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, drm_device_t * dev) { #ifdef __linux__ - struct list_head *list; drm_map_list_t *r_list; #endif drm_local_map_t *map = seq->map_cache; @@ -265,8 +264,7 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, return map; } #ifdef __linux__ - list_for_each(list, &dev->maplist->head) { - r_list = (drm_map_list_t *) list; + list_for_each_entry(r_list, &dev->maplist, head) { map = r_list->map; if (!map) continue; |