diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 182 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 161 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 2 |
11 files changed, 246 insertions, 155 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 81f1cff56fd..2d797ffe813 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) dev_priv->ring.map.flags = 0; dev_priv->ring.map.mtrr = 0; - drm_core_ioremap(&dev_priv->ring.map, dev); + drm_core_ioremap_wc(&dev_priv->ring.map, dev); if (dev_priv->ring.map.handle == NULL) { i915_dma_cleanup(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index aac12ee31a4..0692622ee2b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -27,6 +27,7 @@ * */ +#include <linux/device.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -66,6 +67,12 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) i915_save_state(dev); + /* If KMS is active, we do the leavevt stuff here */ + if (drm_core_check_feature(dev, DRIVER_MODESET) && i915_gem_idle(dev)) { + dev_err(&dev->pdev->dev, "GEM idle failed, aborting suspend\n"); + return -EBUSY; + } + intel_opregion_free(dev); if (state.event == PM_EVENT_SUSPEND) { @@ -79,6 +86,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) static int i915_resume(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); if (pci_enable_device(dev->pdev)) @@ -89,11 +99,24 @@ static int i915_resume(struct drm_device *dev) intel_opregion_init(dev); - return 0; + /* KMS EnterVT equivalent */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + dev_priv->mm.suspended = 0; + + ret = i915_gem_init_ringbuffer(dev); + if (ret != 0) + ret = -1; + mutex_unlock(&dev->struct_mutex); + } + + return ret; } static struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, }; static struct drm_driver driver = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7325363164f..17fa40858d2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -184,6 +184,8 @@ typedef struct drm_i915_private { unsigned int lvds_dither:1; unsigned int lvds_vbt:1; unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + int lvds_ssc_freq; struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ @@ -616,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); +int i915_gem_idle(struct drm_device *dev); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 81857665409..25b337438ca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,10 +34,6 @@ #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) -static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); @@ -607,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case -EAGAIN: return VM_FAULT_OOM; case -EFAULT: - case -EBUSY: - DRM_ERROR("can't insert pfn?? fault or busy...\n"); return VM_FAULT_SIGBUS; default: return VM_FAULT_NOPAGE; @@ -684,6 +678,30 @@ out_free_list: return ret; } +static void +i915_gem_free_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map_list *list; + + list = &obj->map_list; + drm_ht_remove_item(&mm->offset_hash, &list->hash); + + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + if (list->map) { + drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); + list->map = NULL; + } + + obj_priv->mmap_offset = 0; +} + /** * i915_gem_get_gtt_alignment - return required GTT alignment for an object * @obj: object to check @@ -758,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (!obj_priv->mmap_offset) { ret = i915_gem_create_mmap_offset(obj); - if (ret) + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return ret; + } } args->offset = obj_priv->mmap_offset; @@ -1996,30 +2017,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain) +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); - BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", __func__, obj, - obj->read_domains, read_domains, - obj->write_domain, write_domain); + obj->read_domains, obj->pending_read_domains, + obj->write_domain, obj->pending_write_domain); #endif /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ - if (write_domain == 0) - read_domains |= obj->read_domains; + if (obj->pending_write_domain == 0) + obj->pending_read_domains |= obj->read_domains; else obj_priv->dirty = 1; @@ -2029,15 +2048,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * any read domains which differ from the old * write domain */ - if (obj->write_domain && obj->write_domain != read_domains) { + if (obj->write_domain && + obj->write_domain != obj->pending_read_domains) { flush_domains |= obj->write_domain; - invalidate_domains |= read_domains & ~obj->write_domain; + invalidate_domains |= + obj->pending_read_domains & ~obj->write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ - invalidate_domains |= read_domains & ~obj->read_domains; + invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { #if WATCH_BUF DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", @@ -2046,9 +2067,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, i915_gem_clflush_object(obj); } - if ((write_domain | flush_domains) != 0) - obj->write_domain = write_domain; - obj->read_domains = read_domains; + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. + */ + if (flush_domains == 0 && obj->pending_write_domain == 0) + obj->pending_write_domain = obj->write_domain; + obj->read_domains = obj->pending_read_domains; dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; @@ -2251,6 +2278,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc.offset, reloc.read_domains, reloc.write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } @@ -2480,13 +2509,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, if (dev_priv->mm.wedged) { DRM_ERROR("Execbuf while wedged\n"); mutex_unlock(&dev->struct_mutex); - return -EIO; + ret = -EIO; + goto pre_mutex_err; } if (dev_priv->mm.suspended) { DRM_ERROR("Execbuf while VT-switched.\n"); mutex_unlock(&dev->struct_mutex); - return -EBUSY; + ret = -EBUSY; + goto pre_mutex_err; } /* Look up object handles */ @@ -2554,9 +2585,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *obj = object_list[i]; /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj, - obj->pending_read_domains, - obj->pending_write_domain); + i915_gem_object_set_to_gpu_domain(obj); } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -2575,6 +2604,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, (void)i915_add_request(dev, dev->flush_domains); } + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + + obj->write_domain = obj->pending_write_domain; + } + i915_verify_inactive(dev, __FILE__, __LINE__); #if WATCH_COHERENCY @@ -2632,15 +2667,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); - /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); err: for (i = 0; i < pinned; i++) i915_gem_object_unpin(object_list[i]); @@ -2650,6 +2676,18 @@ err: mutex_unlock(&dev->struct_mutex); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + pre_mutex_err: drm_free(object_list, sizeof(*object_list) * args->buffer_count, DRM_MEM_DRIVER); @@ -2753,6 +2791,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -2833,6 +2872,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return -EBADF; } + /* Update the active list for the hardware's current position. + * Otherwise this only updates on a delayed timer or when irqs are + * actually unmasked, and our working set ends up being larger than + * required. + */ + i915_gem_retire_requests(dev); + obj_priv = obj->driver_private; /* Don't count being on the flushing list against the object being * done. Otherwise, a buffer left on the flushing list but not getting @@ -2885,9 +2931,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; - struct drm_map *map; struct drm_i915_gem_object *obj_priv = obj->driver_private; while (obj_priv->pin_count > 0) @@ -2898,19 +2941,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_object_unbind(obj); - list = &obj->map_list; - drm_ht_remove_item(&mm->offset_hash, &list->hash); - - if (list->file_offset_node) { - drm_mm_put_block(list->file_offset_node); - list->file_offset_node = NULL; - } - - map = list->map; - if (map) { - drm_free(map, sizeof(*map), DRM_MEM_DRIVER); - list->map = NULL; - } + i915_gem_free_mmap_offset(obj); drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); @@ -2949,7 +2980,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) return 0; } -static int +int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -3095,6 +3126,7 @@ i915_gem_init_hws(struct drm_device *dev) if (dev_priv->hw_status_page == NULL) { DRM_ERROR("Failed to map status page.\n"); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); return -EINVAL; } @@ -3107,6 +3139,31 @@ i915_gem_init_hws(struct drm_device *dev) return 0; } +static void +i915_gem_cleanup_hws(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + if (dev_priv->hws_obj == NULL) + return; + + obj = dev_priv->hws_obj; + obj_priv = obj->driver_private; + + kunmap(obj_priv->page_list[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + dev_priv->hws_obj = NULL; + + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + dev_priv->hw_status_page = NULL; + + /* Write high address into HWS_PGA when disabling. */ + I915_WRITE(HWS_PGA, 0x1ffff000); +} + int i915_gem_init_ringbuffer(struct drm_device *dev) { @@ -3124,6 +3181,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) obj = drm_gem_object_alloc(dev, 128 * 1024); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); + i915_gem_cleanup_hws(dev); return -ENOMEM; } obj_priv = obj->driver_private; @@ -3131,6 +3189,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) ret = i915_gem_object_pin(obj, 4096); if (ret != 0) { drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return ret; } @@ -3148,7 +3207,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) if (ring->map.handle == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return -EINVAL; } ring->ring_obj = obj; @@ -3228,20 +3289,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) dev_priv->ring.ring_obj = NULL; memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); - if (dev_priv->hws_obj != NULL) { - struct drm_gem_object *obj = dev_priv->hws_obj; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - kunmap(obj_priv->page_list[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - dev_priv->hws_obj = NULL; - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - dev_priv->hw_status_page = NULL; - - /* Write high address into HWS_PGA when disabling. */ - I915_WRITE(HWS_PGA, 0x1ffff000); - } + i915_gem_cleanup_hws(dev); } int diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index fa1685cba84..7fb4191ef93 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -299,9 +299,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } obj_priv->stride = args->stride; - mutex_unlock(&dev->struct_mutex); - drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -340,9 +339,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, DRM_ERROR("unknown tiling mode\n"); } - mutex_unlock(&dev->struct_mutex); - drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 4ca82a02552..65be30dccc7 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -135,6 +135,14 @@ parse_general_features(struct drm_i915_private *dev_priv, if (general) { dev_priv->int_tv_support = general->int_tv_support; dev_priv->int_crt_support = general->int_crt_support; + dev_priv->lvds_use_ssc = general->enable_ssc; + + if (dev_priv->lvds_use_ssc) { + if (IS_I855(dev_priv->dev)) + dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; + else + dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; + } } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bbdd72909a1..65b635ce28c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -90,12 +90,12 @@ typedef struct { #define I9XX_DOT_MAX 400000 #define I9XX_VCO_MIN 1400000 #define I9XX_VCO_MAX 2800000 -#define I9XX_N_MIN 3 -#define I9XX_N_MAX 8 +#define I9XX_N_MIN 1 +#define I9XX_N_MAX 6 #define I9XX_M_MIN 70 #define I9XX_M_MAX 120 #define I9XX_M1_MIN 10 -#define I9XX_M1_MAX 20 +#define I9XX_M1_MAX 22 #define I9XX_M2_MIN 5 #define I9XX_M2_MAX 9 #define I9XX_P_SDVO_DAC_MIN 5 @@ -189,9 +189,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) return limit; } -/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ - -static void i8xx_clock(int refclk, intel_clock_t *clock) +static void intel_clock(int refclk, intel_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; @@ -199,25 +197,6 @@ static void i8xx_clock(int refclk, intel_clock_t *clock) clock->dot = clock->vco / clock->p; } -/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ - -static void i9xx_clock(int refclk, intel_clock_t *clock) -{ - clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); - clock->p = clock->p1 * clock->p2; - clock->vco = refclk * clock->m / (clock->n + 2); - clock->dot = clock->vco / clock->p; -} - -static void intel_clock(struct drm_device *dev, int refclk, - intel_clock_t *clock) -{ - if (IS_I9XX(dev)) - i9xx_clock (refclk, clock); - else - i8xx_clock (refclk, clock); -} - /** * Returns whether any output on the specified pipe is of the specified type */ @@ -238,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) return false; } -#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } +#define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. @@ -318,7 +297,7 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, clock.p1 <= limit->p1.max; clock.p1++) { int this_err; - intel_clock(dev, refclk, &clock); + intel_clock(refclk, &clock); if (!intel_PLL_is_valid(crtc, &clock)) continue; @@ -343,7 +322,7 @@ intel_wait_for_vblank(struct drm_device *dev) udelay(20000); } -static void +static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -361,11 +340,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr, alignment; + int ret; /* no fb bound */ if (!crtc->fb) { DRM_DEBUG("No FB bound\n"); - return; + return 0; + } + + switch (pipe) { + case 0: + case 1: + break; + default: + DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); + return -EINVAL; } intel_fb = to_intel_framebuffer(crtc->fb); @@ -377,28 +366,30 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, alignment = 64 * 1024; break; case I915_TILING_X: - if (IS_I9XX(dev)) - alignment = 1024 * 1024; - else - alignment = 512 * 1024; + /* pin() will align the object as required by fence */ + alignment = 0; break; case I915_TILING_Y: /* FIXME: Is this true? */ DRM_ERROR("Y tiled not allowed for scan out buffers\n"); - return; + return -EINVAL; default: BUG(); } - if (i915_gem_object_pin(intel_fb->obj, alignment)) - return; - - i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); - - Start = obj_priv->gtt_offset; - Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); + mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(intel_fb->obj, alignment); + if (ret != 0) { + mutex_unlock(&dev->struct_mutex); + return ret; + } - I915_WRITE(dspstride, crtc->fb->pitch); + ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); + if (ret != 0) { + i915_gem_object_unpin(intel_fb->obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } dspcntr = I915_READ(dspcntr_reg); /* Mask out pixel format bits in case we change it */ @@ -419,11 +410,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, break; default: DRM_ERROR("Unknown color depth\n"); - return; + i915_gem_object_unpin(intel_fb->obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; } I915_WRITE(dspcntr_reg, dspcntr); + Start = obj_priv->gtt_offset; + Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); + DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); + I915_WRITE(dspstride, crtc->fb->pitch); if (IS_I965G(dev)) { I915_WRITE(dspbase, Offset); I915_READ(dspbase); @@ -440,27 +437,24 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_fb = to_intel_framebuffer(old_fb); i915_gem_object_unpin(intel_fb->obj); } + mutex_unlock(&dev->struct_mutex); if (!dev->primary->master) - return; + return 0; master_priv = dev->primary->master->driver_priv; if (!master_priv->sarea_priv) - return; + return 0; - switch (pipe) { - case 0: - master_priv->sarea_priv->pipeA_x = x; - master_priv->sarea_priv->pipeA_y = y; - break; - case 1: + if (pipe) { master_priv->sarea_priv->pipeB_x = x; master_priv->sarea_priv->pipeB_y = y; - break; - default: - DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); - break; + } else { + master_priv->sarea_priv->pipeA_x = x; + master_priv->sarea_priv->pipeA_y = y; } + + return 0; } @@ -708,11 +702,11 @@ static int intel_panel_fitter_pipe (struct drm_device *dev) return 1; } -static void intel_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) +static int intel_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -732,13 +726,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; - int refclk; + int refclk, num_outputs = 0; intel_clock_t clock; u32 dpll = 0, fp = 0, dspcntr, pipeconf; bool ok, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + int ret; drm_vblank_pre_modeset(dev, pipe); @@ -768,9 +763,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, is_crt = true; break; } + + num_outputs++; } - if (IS_I9XX(dev)) { + if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { + refclk = dev_priv->lvds_ssc_freq * 1000; + DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); + } else if (IS_I9XX(dev)) { refclk = 96000; } else { refclk = 48000; @@ -779,7 +779,7 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return; + return -EINVAL; } fp = clock.n << 16 | clock.m1 << 8 | clock.m2; @@ -829,11 +829,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (is_tv) { + if (is_sdvo && is_tv) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (is_tv) /* XXX: just matching BIOS for now */ -/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; - } + else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -950,9 +953,13 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(dspcntr_reg, dspcntr); /* Flush the plane changes */ - intel_pipe_set_base(crtc, x, y, old_fb); + ret = intel_pipe_set_base(crtc, x, y, old_fb); + if (ret != 0) + return ret; drm_vblank_post_modeset(dev, pipe); + + return 0; } /** Loads the palette/gamma unit for the CRTC with the prepared values */ @@ -1001,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, temp = CURSOR_MODE_DISABLE; addr = 0; bo = NULL; + mutex_lock(&dev->struct_mutex); goto finish; } @@ -1023,18 +1031,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, } /* we only need to pin inside GTT if cursor is non-phy */ + mutex_lock(&dev->struct_mutex); if (!dev_priv->cursor_needs_physical) { ret = i915_gem_object_pin(bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); - goto fail; + goto fail_locked; } addr = obj_priv->gtt_offset; } else { ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); if (ret) { DRM_ERROR("failed to attach phys object\n"); - goto fail; + goto fail_locked; } addr = obj_priv->phys_obj->handle->busaddr; } @@ -1054,10 +1063,9 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else i915_gem_object_unpin(intel_crtc->cursor_bo); - mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(intel_crtc->cursor_bo); - mutex_unlock(&dev->struct_mutex); } + mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = bo; @@ -1065,6 +1073,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail: mutex_lock(&dev->struct_mutex); +fail_locked: drm_gem_object_unreference(bo); mutex_unlock(&dev->struct_mutex); return ret; @@ -1292,7 +1301,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) } /* XXX: Handle the 100Mhz refclk */ - i9xx_clock(96000, &clock); + intel_clock(96000, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); @@ -1304,9 +1313,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ - i8xx_clock(66000, &clock); + intel_clock(66000, &clock); } else - i8xx_clock(48000, &clock); + intel_clock(48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; @@ -1319,7 +1328,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) else clock.p2 = 2; - i8xx_clock(48000, &clock); + intel_clock(48000, &clock); } } @@ -1598,7 +1607,9 @@ intel_user_framebuffer_create(struct drm_device *dev, ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); if (ret) { + mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return NULL; } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index afd1217b8a0..b7f0ebe9f81 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -473,7 +473,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); if (ret) { DRM_ERROR("failed to allocate fb.\n"); - goto out_unref; + goto out_unpin; } list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); @@ -484,7 +484,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, info = framebuffer_alloc(sizeof(struct intelfb_par), device); if (!info) { ret = -ENOMEM; - goto out_unref; + goto out_unpin; } par = info->par; @@ -513,7 +513,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, size); if (!info->screen_base) { ret = -ENOSPC; - goto out_unref; + goto out_unpin; } info->screen_size = size; @@ -608,6 +608,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, mutex_unlock(&dev->struct_mutex); return 0; +out_unpin: + i915_gem_object_unpin(fbo); out_unref: drm_gem_object_unreference(fbo); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6d4f9126535..0d211af9885 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -481,8 +481,6 @@ void intel_lvds_init(struct drm_device *dev) if (dev_priv->panel_fixed_mode) { dev_priv->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, - dev_priv->panel_fixed_mode); goto out; } } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a30508b639b..fbe6f3931b1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -193,7 +193,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ -const static struct _sdvo_cmd_name { +static const struct _sdvo_cmd_name { u8 cmd; char *name; } sdvo_cmd_names[] = { diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index fbb35dc56f5..56485d67369 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -411,7 +411,7 @@ struct tv_mode { * These values account for -1s required. */ -const static struct tv_mode tv_modes[] = { +static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", .clock = 107520, |