diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/Kconfig | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_bufs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc_helper.c | 97 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_edid.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fops.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 79 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_irq.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_lock.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_stub.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 187 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 161 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cp.c | 21 |
23 files changed, 470 insertions, 225 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4be3acbaaf9..3a22eb9be37 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -80,18 +80,17 @@ config DRM_I915 XFree86 4.4 and above. If unsure, build this and i830 as modules and the X server will load the correct one. -endchoice - config DRM_I915_KMS bool "Enable modesetting on intel by default" depends on DRM_I915 help - Choose this option if you want kernel modesetting enabled by default, - and you have a new enough userspace to support this. Running old - userspaces with this enabled will cause pain. Note that this causes - the driver to bind to PCI devices, which precludes loading things - like intelfb. + Choose this option if you want kernel modesetting enabled by default, + and you have a new enough userspace to support this. Running old + userspaces with this enabled will cause pain. Note that this causes + the driver to bind to PCI devices, which precludes loading things + like intelfb. +endchoice config DRM_MGA tristate "Matrox g200/g400" diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 72c667f9bee..12715d3c078 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -420,7 +420,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) dev->sigdata.lock = NULL; master->lock.hw_lock = NULL; /* SHM removed */ master->lock.file_priv = NULL; - wake_up_interruptible(&master->lock.lock_queue); + wake_up_interruptible_all(&master->lock.lock_queue); } break; case _DRM_AGP: diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bfce0992fef..94a76887173 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1741,9 +1741,8 @@ out: * RETURNS: * Zero on success, errno on failure. */ -void drm_fb_release(struct file *filp) +void drm_fb_release(struct drm_file *priv) { - struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_framebuffer *fb, *tfb; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 964c5eb1fad..1c3a8c55714 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -452,6 +452,59 @@ static void drm_setup_crtcs(struct drm_device *dev) kfree(modes); kfree(enabled); } + +/** + * drm_encoder_crtc_ok - can a given crtc drive a given encoder? + * @encoder: encoder to test + * @crtc: crtc to test + * + * Return false if @encoder can't be driven by @crtc, true otherwise. + */ +static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *crtc) +{ + struct drm_device *dev; + struct drm_crtc *tmp; + int crtc_mask = 1; + + WARN(!crtc, "checking null crtc?"); + + dev = crtc->dev; + + list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { + if (tmp == crtc) + break; + crtc_mask <<= 1; + } + + if (encoder->possible_crtcs & crtc_mask) + return true; + return false; +} + +/* + * Check the CRTC we're going to map each output to vs. its current + * CRTC. If they don't match, we have to disable the output and the CRTC + * since the driver will have to re-route things. + */ +static void +drm_crtc_prepare_encoders(struct drm_device *dev) +{ + struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_encoder *encoder; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + encoder_funcs = encoder->helper_private; + /* Disable unused encoders */ + if (encoder->crtc == NULL) + (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + /* Disable encoders whose CRTC is about to change */ + if (encoder_funcs->get_crtc && + encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) + (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + } +} + /** * drm_crtc_set_mode - set a mode * @crtc: CRTC to program @@ -512,8 +565,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (drm_mode_equal(&saved_mode, &crtc->mode)) { if (saved_x != crtc->x || saved_y != crtc->y || depth_changed || bpp_changed) { - crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, - old_fb); + ret = !crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, + old_fb); goto done; } } @@ -547,12 +600,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, encoder_funcs->prepare(encoder); } + drm_crtc_prepare_encoders(dev); + crtc_funcs->prepare(crtc); /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. */ - crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); + ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); + if (!ret) + goto done; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -615,7 +672,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) struct drm_device *dev; struct drm_crtc **save_crtcs, *new_crtc; struct drm_encoder **save_encoders, *new_encoder; - struct drm_framebuffer *old_fb; + struct drm_framebuffer *old_fb = NULL; bool save_enabled; bool mode_changed = false; bool fb_changed = false; @@ -666,9 +723,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) * and then just flip_or_move it */ if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ - if (set->crtc->fb == NULL) + if (set->crtc->fb == NULL) { + DRM_DEBUG("crtc has no fb, full mode set\n"); mode_changed = true; - else if ((set->fb->bits_per_pixel != + } else if ((set->fb->bits_per_pixel != set->crtc->fb->bits_per_pixel) || set->fb->depth != set->crtc->fb->depth) fb_changed = true; @@ -680,7 +738,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) fb_changed = true; if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { - DRM_DEBUG("modes are different\n"); + DRM_DEBUG("modes are different, full mode set\n"); drm_mode_debug_printmodeline(&set->crtc->mode); drm_mode_debug_printmodeline(set->mode); mode_changed = true; @@ -706,6 +764,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } if (new_encoder != connector->encoder) { + DRM_DEBUG("encoder changed, full mode switch\n"); mode_changed = true; connector->encoder = new_encoder; } @@ -732,10 +791,20 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (set->connectors[ro] == connector) new_crtc = set->crtc; } + + /* Make sure the new CRTC will work with the encoder */ + if (new_crtc && + !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { + ret = -EINVAL; + goto fail_set_mode; + } if (new_crtc != connector->encoder->crtc) { + DRM_DEBUG("crtc changed, full mode switch\n"); mode_changed = true; connector->encoder->crtc = new_crtc; } + DRM_DEBUG("setting connector %d crtc to %p\n", + connector->base.id, new_crtc); } /* mode_set_base is not a required function */ @@ -752,6 +821,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, old_fb)) { + DRM_ERROR("failed to set mode on crtc %p\n", + set->crtc); ret = -EINVAL; goto fail_set_mode; } @@ -765,7 +836,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) old_fb = set->crtc->fb; if (set->crtc->fb != set->fb) set->crtc->fb = set->fb; - crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); + ret = crtc_funcs->mode_set_base(set->crtc, + set->x, set->y, old_fb); + if (ret != 0) + goto fail_set_mode; } kfree(save_encoders); @@ -774,9 +848,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) fail_set_mode: set->crtc->enabled = save_enabled; + set->crtc->fb = old_fb; count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (!connector->encoder) + continue; + connector->encoder->crtc = save_crtcs[count++]; + } fail_no_encoder: kfree(save_crtcs); count = 0; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5a4d3244758..a839a28d8ee 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -125,7 +125,7 @@ static bool edid_is_valid(struct edid *edid) DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); goto bad; } - if (edid->revision <= 0 || edid->revision > 3) { + if (edid->revision > 3) { DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision); goto bad; } @@ -320,10 +320,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; - mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) | + mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 4) | pt->vsync_offset_lo); mode->vsync_end = mode->vsync_start + - ((pt->vsync_pulse_width_hi << 8) | + ((pt->vsync_pulse_width_hi << 4) | pt->vsync_pulse_width_lo); mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo); diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index b06a5371585..f52663ebe01 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -457,6 +457,9 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); + if (dev->driver->driver_features & DRIVER_MODESET) + drm_fb_release(file_priv); + mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { struct drm_ctx_list *pos, *n; @@ -481,6 +484,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->struct_mutex); if (file_priv->is_master) { + struct drm_master *master = file_priv->master; struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) { if ((temp->master == file_priv->master) && @@ -488,6 +492,19 @@ int drm_release(struct inode *inode, struct file *filp) temp->authenticated = 0; } + /** + * Since the master is disappearing, so is the + * possibility to lock. + */ + + if (master->lock.hw_lock) { + if (dev->sigdata.lock == master->lock.hw_lock) + dev->sigdata.lock = NULL; + master->lock.hw_lock = NULL; + master->lock.file_priv = NULL; + wake_up_interruptible_all(&master->lock.lock_queue); + } + if (file_priv->minor->master == file_priv->master) { /* drop the reference held my the minor */ drm_master_put(&file_priv->minor->master); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6915fb82d0b..88d3368ffdd 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -104,8 +104,8 @@ drm_gem_init(struct drm_device *dev) if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE)) { - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); drm_ht_remove(&mm->offset_hash); + drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); return -ENOMEM; } @@ -295,35 +295,37 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, return -EBADF; again: - if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) - return -ENOMEM; + if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { + ret = -ENOMEM; + goto err; + } spin_lock(&dev->object_name_lock); - if (obj->name) { - args->name = obj->name; + if (!obj->name) { + ret = idr_get_new_above(&dev->object_name_idr, obj, 1, + &obj->name); + args->name = (uint64_t) obj->name; spin_unlock(&dev->object_name_lock); - return 0; - } - ret = idr_get_new_above(&dev->object_name_idr, obj, 1, - &obj->name); - spin_unlock(&dev->object_name_lock); - if (ret == -EAGAIN) - goto again; - if (ret != 0) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret == -EAGAIN) + goto again; - /* - * Leave the reference from the lookup around as the - * name table now holds one - */ - args->name = (uint64_t) obj->name; + if (ret != 0) + goto err; - return 0; + /* Allocate a reference for the name table. */ + drm_gem_object_reference(obj); + } else { + args->name = (uint64_t) obj->name; + spin_unlock(&dev->object_name_lock); + ret = 0; + } + +err: + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; } /** @@ -448,6 +450,7 @@ drm_gem_object_handle_free(struct kref *kref) spin_lock(&dev->object_name_lock); if (obj->name) { idr_remove(&dev->object_name_idr, obj->name); + obj->name = 0; spin_unlock(&dev->object_name_lock); /* * The object name held a reference to this object, drop @@ -460,6 +463,26 @@ drm_gem_object_handle_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_handle_free); +void drm_gem_vm_open(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + + drm_gem_object_reference(obj); +} +EXPORT_SYMBOL(drm_gem_vm_open); + +void drm_gem_vm_close(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; + + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); +} +EXPORT_SYMBOL(drm_gem_vm_close); + + /** * drm_gem_mmap - memory map routine for GEM objects * @filp: DRM file pointer @@ -521,6 +544,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) #endif vma->vm_page_prot = __pgprot(prot); + /* Take a ref for this mapping of the object, so that the fault + * handler can dereference the mmap offset's pointer to the object. + * This reference is cleaned up by the corresponding vm_close + * (which should happen whether the vma was created by this call, or + * by a vm_open due to mremap or partial unmap or whatever). + */ + drm_gem_object_reference(obj); + vma->vm_file = filp; /* Needed for drm_vm_open() */ drm_vm_open_locked(vma); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3795dbc0f50..93e677a481f 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -435,6 +435,8 @@ EXPORT_SYMBOL(drm_vblank_get); */ void drm_vblank_put(struct drm_device *dev, int crtc) { + BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0); + /* Last user schedules interrupt disable */ if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); @@ -460,8 +462,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) * so that interrupts remain enabled in the interim. */ if (!dev->vblank_inmodeset[crtc]) { - dev->vblank_inmodeset[crtc] = 1; - drm_vblank_get(dev, crtc); + dev->vblank_inmodeset[crtc] = 0x1; + if (drm_vblank_get(dev, crtc) == 0) + dev->vblank_inmodeset[crtc] |= 0x2; } } EXPORT_SYMBOL(drm_vblank_pre_modeset); @@ -473,9 +476,12 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc) if (dev->vblank_inmodeset[crtc]) { spin_lock_irqsave(&dev->vbl_lock, irqflags); dev->vblank_disable_allowed = 1; - dev->vblank_inmodeset[crtc] = 0; spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - drm_vblank_put(dev, crtc); + + if (dev->vblank_inmodeset[crtc] & 0x2) + drm_vblank_put(dev, crtc); + + dev->vblank_inmodeset[crtc] = 0; } } EXPORT_SYMBOL(drm_vblank_post_modeset); diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 46e7b28f070..e2f70a516c3 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -80,6 +80,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) __set_current_state(TASK_INTERRUPTIBLE); if (!master->lock.hw_lock) { /* Device has been unregistered */ + send_sig(SIGTERM, current, 0); ret = -EINTR; break; } @@ -93,7 +94,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) /* Contention */ schedule(); if (signal_pending(current)) { - ret = -ERESTARTSYS; + ret = -EINTR; break; } } diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 46bb923b097..096e2a37446 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -146,14 +146,6 @@ static void drm_master_destroy(struct kref *kref) drm_ht_remove(&master->magiclist); - if (master->lock.hw_lock) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; - master->lock.file_priv = NULL; - wake_up_interruptible(&master->lock.lock_queue); - } - drm_free(master, sizeof(*master), DRM_MEM_DRIVER); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 81f1cff56fd..6dab63bdc4c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) dev_priv->ring.map.flags = 0; dev_priv->ring.map.mtrr = 0; - drm_core_ioremap(&dev_priv->ring.map, dev); + drm_core_ioremap_wc(&dev_priv->ring.map, dev); if (dev_priv->ring.map.handle == NULL) { i915_dma_cleanup(dev); @@ -811,7 +811,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, dev_priv->hws_map.flags = 0; dev_priv->hws_map.mtrr = 0; - drm_core_ioremap(&dev_priv->hws_map, dev); + drm_core_ioremap_wc(&dev_priv->hws_map, dev); if (dev_priv->hws_map.handle == NULL) { i915_dma_cleanup(dev); dev_priv->status_gfx_addr = 0; @@ -1090,6 +1090,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, dev->agp->agp_info.aper_size * 1024*1024); + if (dev_priv->mm.gtt_mapping == NULL) { + ret = -EIO; + goto out_rmmap; + } + /* Set up a WC MTRR for non-PAT systems. This is more common than * one would think, because the kernel disables PAT on first * generation Core chips because WC PAT gets overridden by a UC @@ -1122,7 +1127,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret != 0) - goto out_rmmap; + goto out_iomapfree; } /* On the 945G/GM, the chipset reports the MSI capability on the @@ -1161,6 +1166,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; +out_iomapfree: + io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: iounmap(dev_priv->regs); free_priv: diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index aac12ee31a4..b293ef0bae7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -27,6 +27,7 @@ * */ +#include <linux/device.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -66,6 +67,14 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) i915_save_state(dev); + /* If KMS is active, we do the leavevt stuff here */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (i915_gem_idle(dev)) + dev_err(&dev->pdev->dev, + "GEM idle failed, resume may fail\n"); + drm_irq_uninstall(dev); + } + intel_opregion_free(dev); if (state.event == PM_EVENT_SUSPEND) { @@ -79,6 +88,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) static int i915_resume(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); if (pci_enable_device(dev->pdev)) @@ -89,11 +101,26 @@ static int i915_resume(struct drm_device *dev) intel_opregion_init(dev); - return 0; + /* KMS EnterVT equivalent */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + dev_priv->mm.suspended = 0; + + ret = i915_gem_init_ringbuffer(dev); + if (ret != 0) + ret = -1; + mutex_unlock(&dev->struct_mutex); + + drm_irq_install(dev); + } + + return ret; } static struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, }; static struct drm_driver driver = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7325363164f..17fa40858d2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -184,6 +184,8 @@ typedef struct drm_i915_private { unsigned int lvds_dither:1; unsigned int lvds_vbt:1; unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + int lvds_ssc_freq; struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ @@ -616,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); +int i915_gem_idle(struct drm_device *dev); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 81857665409..85685bfd12d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,10 +34,6 @@ #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) -static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); @@ -607,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case -EAGAIN: return VM_FAULT_OOM; case -EFAULT: - case -EBUSY: - DRM_ERROR("can't insert pfn?? fault or busy...\n"); return VM_FAULT_SIGBUS; default: return VM_FAULT_NOPAGE; @@ -684,6 +678,30 @@ out_free_list: return ret; } +static void +i915_gem_free_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map_list *list; + + list = &obj->map_list; + drm_ht_remove_item(&mm->offset_hash, &list->hash); + + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + if (list->map) { + drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); + list->map = NULL; + } + + obj_priv->mmap_offset = 0; +} + /** * i915_gem_get_gtt_alignment - return required GTT alignment for an object * @obj: object to check @@ -758,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (!obj_priv->mmap_offset) { ret = i915_gem_create_mmap_offset(obj); - if (ret) + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return ret; + } } args->offset = obj_priv->mmap_offset; @@ -1030,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + if (!dev_priv->hw_status_page) + return; + seqno = i915_get_gem_seqno(dev); while (!list_empty(&dev_priv->mm.request_list)) { @@ -1996,30 +2020,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain) +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); - BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", __func__, obj, - obj->read_domains, read_domains, - obj->write_domain, write_domain); + obj->read_domains, obj->pending_read_domains, + obj->write_domain, obj->pending_write_domain); #endif /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ - if (write_domain == 0) - read_domains |= obj->read_domains; + if (obj->pending_write_domain == 0) + obj->pending_read_domains |= obj->read_domains; else obj_priv->dirty = 1; @@ -2029,15 +2051,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * any read domains which differ from the old * write domain */ - if (obj->write_domain && obj->write_domain != read_domains) { + if (obj->write_domain && + obj->write_domain != obj->pending_read_domains) { flush_domains |= obj->write_domain; - invalidate_domains |= read_domains & ~obj->write_domain; + invalidate_domains |= + obj->pending_read_domains & ~obj->write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ - invalidate_domains |= read_domains & ~obj->read_domains; + invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { #if WATCH_BUF DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", @@ -2046,9 +2070,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, i915_gem_clflush_object(obj); } - if ((write_domain | flush_domains) != 0) - obj->write_domain = write_domain; - obj->read_domains = read_domains; + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. + */ + if (flush_domains == 0 && obj->pending_write_domain == 0) + obj->pending_write_domain = obj->write_domain; + obj->read_domains = obj->pending_read_domains; dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; @@ -2251,6 +2281,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc.offset, reloc.read_domains, reloc.write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } @@ -2480,13 +2512,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, if (dev_priv->mm.wedged) { DRM_ERROR("Execbuf while wedged\n"); mutex_unlock(&dev->struct_mutex); - return -EIO; + ret = -EIO; + goto pre_mutex_err; } if (dev_priv->mm.suspended) { DRM_ERROR("Execbuf while VT-switched.\n"); mutex_unlock(&dev->struct_mutex); - return -EBUSY; + ret = -EBUSY; + goto pre_mutex_err; } /* Look up object handles */ @@ -2554,9 +2588,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *obj = object_list[i]; /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj, - obj->pending_read_domains, - obj->pending_write_domain); + i915_gem_object_set_to_gpu_domain(obj); } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -2575,6 +2607,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, (void)i915_add_request(dev, dev->flush_domains); } + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + + obj->write_domain = obj->pending_write_domain; + } + i915_verify_inactive(dev, __FILE__, __LINE__); #if WATCH_COHERENCY @@ -2632,15 +2670,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); - /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); err: for (i = 0; i < pinned; i++) i915_gem_object_unpin(object_list[i]); @@ -2650,6 +2679,18 @@ err: mutex_unlock(&dev->struct_mutex); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + pre_mutex_err: drm_free(object_list, sizeof(*object_list) * args->buffer_count, DRM_MEM_DRIVER); @@ -2753,6 +2794,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -2833,6 +2875,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return -EBADF; } + /* Update the active list for the hardware's current position. + * Otherwise this only updates on a delayed timer or when irqs are + * actually unmasked, and our working set ends up being larger than + * required. + */ + i915_gem_retire_requests(dev); + obj_priv = obj->driver_private; /* Don't count being on the flushing list against the object being * done. Otherwise, a buffer left on the flushing list but not getting @@ -2885,9 +2934,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; - struct drm_map *map; struct drm_i915_gem_object *obj_priv = obj->driver_private; while (obj_priv->pin_count > 0) @@ -2898,19 +2944,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_object_unbind(obj); - list = &obj->map_list; - drm_ht_remove_item(&mm->offset_hash, &list->hash); - - if (list->file_offset_node) { - drm_mm_put_block(list->file_offset_node); - list->file_offset_node = NULL; - } - - map = list->map; - if (map) { - drm_free(map, sizeof(*map), DRM_MEM_DRIVER); - list->map = NULL; - } + i915_gem_free_mmap_offset(obj); drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); @@ -2949,7 +2983,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) return 0; } -static int +int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -3095,6 +3129,7 @@ i915_gem_init_hws(struct drm_device *dev) if (dev_priv->hw_status_page == NULL) { DRM_ERROR("Failed to map status page.\n"); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); return -EINVAL; } @@ -3107,6 +3142,31 @@ i915_gem_init_hws(struct drm_device *dev) return 0; } +static void +i915_gem_cleanup_hws(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + if (dev_priv->hws_obj == NULL) + return; + + obj = dev_priv->hws_obj; + obj_priv = obj->driver_private; + + kunmap(obj_priv->page_list[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + dev_priv->hws_obj = NULL; + + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + dev_priv->hw_status_page = NULL; + + /* Write high address into HWS_PGA when disabling. */ + I915_WRITE(HWS_PGA, 0x1ffff000); +} + int i915_gem_init_ringbuffer(struct drm_device *dev) { @@ -3124,6 +3184,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) obj = drm_gem_object_alloc(dev, 128 * 1024); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); + i915_gem_cleanup_hws(dev); return -ENOMEM; } obj_priv = obj->driver_private; @@ -3131,6 +3192,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) ret = i915_gem_object_pin(obj, 4096); if (ret != 0) { drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return ret; } @@ -3148,7 +3210,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) if (ring->map.handle == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return -EINVAL; } ring->ring_obj = obj; @@ -3228,20 +3292,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) dev_priv->ring.ring_obj = NULL; memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); - if (dev_priv->hws_obj != NULL) { - struct drm_gem_object *obj = dev_priv->hws_obj; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - kunmap(obj_priv->page_list[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - dev_priv->hws_obj = NULL; - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - dev_priv->hw_status_page = NULL; - - /* Write high address into HWS_PGA when disabling. */ - I915_WRITE(HWS_PGA, 0x1ffff000); - } + i915_gem_cleanup_hws(dev); } int @@ -3497,7 +3548,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; - DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size); + DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); ret = copy_from_user(obj_addr, user_data, args->size); if (ret) return -EFAULT; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index fa1685cba84..7fb4191ef93 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -299,9 +299,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } obj_priv->stride = args->stride; - mutex_unlock(&dev->struct_mutex); - drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -340,9 +339,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, DRM_ERROR("unknown tiling mode\n"); } - mutex_unlock(&dev->struct_mutex); - drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 548ff2c6643..87b6b603469 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -383,12 +383,13 @@ int i915_irq_emit(struct drm_device *dev, void *data, drm_i915_irq_emit_t *emit = data; int result; - RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } + + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + mutex_lock(&dev->struct_mutex); result = i915_emit_irq(dev); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 4ca82a02552..fc28e2bbd54 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -111,6 +111,12 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) panel_fixed_mode->clock = dvo_timing->clock * 10; panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; + /* Some VBTs have bogus h/vtotal values */ + if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) + panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; + if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) + panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; + drm_mode_set_name(panel_fixed_mode); dev_priv->vbt_mode = panel_fixed_mode; @@ -135,6 +141,14 @@ parse_general_features(struct drm_i915_private *dev_priv, if (general) { dev_priv->int_tv_support = general->int_tv_support; dev_priv->int_crt_support = general->int_crt_support; + dev_priv->lvds_use_ssc = general->enable_ssc; + + if (dev_priv->lvds_use_ssc) { + if (IS_I855(dev_priv->dev)) + dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; + else + dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; + } } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bbdd72909a1..a2834276cb3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -90,12 +90,12 @@ typedef struct { #define I9XX_DOT_MAX 400000 #define I9XX_VCO_MIN 1400000 #define I9XX_VCO_MAX 2800000 -#define I9XX_N_MIN 3 -#define I9XX_N_MAX 8 +#define I9XX_N_MIN 1 +#define I9XX_N_MAX 6 #define I9XX_M_MIN 70 #define I9XX_M_MAX 120 #define I9XX_M1_MIN 10 -#define I9XX_M1_MAX 20 +#define I9XX_M1_MAX 22 #define I9XX_M2_MIN 5 #define I9XX_M2_MAX 9 #define I9XX_P_SDVO_DAC_MIN 5 @@ -189,9 +189,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) return limit; } -/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ - -static void i8xx_clock(int refclk, intel_clock_t *clock) +static void intel_clock(int refclk, intel_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; @@ -199,25 +197,6 @@ static void i8xx_clock(int refclk, intel_clock_t *clock) clock->dot = clock->vco / clock->p; } -/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ - -static void i9xx_clock(int refclk, intel_clock_t *clock) -{ - clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); - clock->p = clock->p1 * clock->p2; - clock->vco = refclk * clock->m / (clock->n + 2); - clock->dot = clock->vco / clock->p; -} - -static void intel_clock(struct drm_device *dev, int refclk, - intel_clock_t *clock) -{ - if (IS_I9XX(dev)) - i9xx_clock (refclk, clock); - else - i8xx_clock (refclk, clock); -} - /** * Returns whether any output on the specified pipe is of the specified type */ @@ -238,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) return false; } -#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } +#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. @@ -318,7 +297,7 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, clock.p1 <= limit->p1.max; clock.p1++) { int this_err; - intel_clock(dev, refclk, &clock); + intel_clock(refclk, &clock); if (!intel_PLL_is_valid(crtc, &clock)) continue; @@ -343,7 +322,7 @@ intel_wait_for_vblank(struct drm_device *dev) udelay(20000); } -static void +static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -361,11 +340,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr, alignment; + int ret; /* no fb bound */ if (!crtc->fb) { DRM_DEBUG("No FB bound\n"); - return; + return 0; + } + + switch (pipe) { + case 0: + case 1: + break; + default: + DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); + return -EINVAL; } intel_fb = to_intel_framebuffer(crtc->fb); @@ -377,28 +366,30 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, alignment = 64 * 1024; break; case I915_TILING_X: - if (IS_I9XX(dev)) - alignment = 1024 * 1024; - else - alignment = 512 * 1024; + /* pin() will align the object as required by fence */ + alignment = 0; break; case I915_TILING_Y: /* FIXME: Is this true? */ DRM_ERROR("Y tiled not allowed for scan out buffers\n"); - return; + return -EINVAL; default: BUG(); } - if (i915_gem_object_pin(intel_fb->obj, alignment)) - return; - - i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); - - Start = obj_priv->gtt_offset; - Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); + mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(intel_fb->obj, alignment); + if (ret != 0) { + mutex_unlock(&dev->struct_mutex); + return ret; + } - I915_WRITE(dspstride, crtc->fb->pitch); + ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); + if (ret != 0) { + i915_gem_object_unpin(intel_fb->obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } dspcntr = I915_READ(dspcntr_reg); /* Mask out pixel format bits in case we change it */ @@ -419,11 +410,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, break; default: DRM_ERROR("Unknown color depth\n"); - return; + i915_gem_object_unpin(intel_fb->obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; } I915_WRITE(dspcntr_reg, dspcntr); + Start = obj_priv->gtt_offset; + Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); + DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); + I915_WRITE(dspstride, crtc->fb->pitch); if (IS_I965G(dev)) { I915_WRITE(dspbase, Offset); I915_READ(dspbase); @@ -440,27 +437,24 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_fb = to_intel_framebuffer(old_fb); i915_gem_object_unpin(intel_fb->obj); } + mutex_unlock(&dev->struct_mutex); if (!dev->primary->master) - return; + return 0; master_priv = dev->primary->master->driver_priv; if (!master_priv->sarea_priv) - return; + return 0; - switch (pipe) { - case 0: - master_priv->sarea_priv->pipeA_x = x; - master_priv->sarea_priv->pipeA_y = y; - break; - case 1: + if (pipe) { master_priv->sarea_priv->pipeB_x = x; master_priv->sarea_priv->pipeB_y = y; - break; - default: - DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); - break; + } else { + master_priv->sarea_priv->pipeA_x = x; + master_priv->sarea_priv->pipeA_y = y; } + + return 0; } @@ -708,11 +702,11 @@ static int intel_panel_fitter_pipe (struct drm_device *dev) return 1; } -static void intel_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) +static int intel_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -732,13 +726,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; - int refclk; + int refclk, num_outputs = 0; intel_clock_t clock; u32 dpll = 0, fp = 0, dspcntr, pipeconf; bool ok, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + int ret; drm_vblank_pre_modeset(dev, pipe); @@ -768,9 +763,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, is_crt = true; break; } + + num_outputs++; } - if (IS_I9XX(dev)) { + if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { + refclk = dev_priv->lvds_ssc_freq * 1000; + DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); + } else if (IS_I9XX(dev)) { refclk = 96000; } else { refclk = 48000; @@ -779,7 +779,7 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return; + return -EINVAL; } fp = clock.n << 16 | clock.m1 << 8 | clock.m2; @@ -829,11 +829,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (is_tv) { + if (is_sdvo && is_tv) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (is_tv) /* XXX: just matching BIOS for now */ -/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; - } + else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -950,9 +953,13 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(dspcntr_reg, dspcntr); /* Flush the plane changes */ - intel_pipe_set_base(crtc, x, y, old_fb); + ret = intel_pipe_set_base(crtc, x, y, old_fb); + if (ret != 0) + return ret; drm_vblank_post_modeset(dev, pipe); + + return 0; } /** Loads the palette/gamma unit for the CRTC with the prepared values */ @@ -1001,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, temp = CURSOR_MODE_DISABLE; addr = 0; bo = NULL; + mutex_lock(&dev->struct_mutex); goto finish; } @@ -1023,18 +1031,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, } /* we only need to pin inside GTT if cursor is non-phy */ + mutex_lock(&dev->struct_mutex); if (!dev_priv->cursor_needs_physical) { ret = i915_gem_object_pin(bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); - goto fail; + goto fail_locked; } addr = obj_priv->gtt_offset; } else { ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); if (ret) { DRM_ERROR("failed to attach phys object\n"); - goto fail; + goto fail_locked; } addr = obj_priv->phys_obj->handle->busaddr; } @@ -1054,10 +1063,9 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else i915_gem_object_unpin(intel_crtc->cursor_bo); - mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(intel_crtc->cursor_bo); - mutex_unlock(&dev->struct_mutex); } + mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = bo; @@ -1065,6 +1073,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail: mutex_lock(&dev->struct_mutex); +fail_locked: drm_gem_object_unreference(bo); mutex_unlock(&dev->struct_mutex); return ret; @@ -1292,7 +1301,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) } /* XXX: Handle the 100Mhz refclk */ - i9xx_clock(96000, &clock); + intel_clock(96000, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); @@ -1304,9 +1313,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ - i8xx_clock(66000, &clock); + intel_clock(66000, &clock); } else - i8xx_clock(48000, &clock); + intel_clock(48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; @@ -1319,7 +1328,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) else clock.p2 = 2; - i8xx_clock(48000, &clock); + intel_clock(48000, &clock); } } @@ -1598,7 +1607,9 @@ intel_user_framebuffer_create(struct drm_device *dev, ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); if (ret) { + mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return NULL; } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index afd1217b8a0..b7f0ebe9f81 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -473,7 +473,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); if (ret) { DRM_ERROR("failed to allocate fb.\n"); - goto out_unref; + goto out_unpin; } list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); @@ -484,7 +484,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, info = framebuffer_alloc(sizeof(struct intelfb_par), device); if (!info) { ret = -ENOMEM; - goto out_unref; + goto out_unpin; } par = info->par; @@ -513,7 +513,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, size); if (!info->screen_base) { ret = -ENOSPC; - goto out_unref; + goto out_unpin; } info->screen_size = size; @@ -608,6 +608,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, mutex_unlock(&dev->struct_mutex); return 0; +out_unpin: + i915_gem_object_unpin(fbo); out_unref: drm_gem_object_unreference(fbo); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6d4f9126535..0d211af9885 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -481,8 +481,6 @@ void intel_lvds_init(struct drm_device *dev) if (dev_priv->panel_fixed_mode) { dev_priv->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, - dev_priv->panel_fixed_mode); goto out; } } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a30508b639b..fbe6f3931b1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -193,7 +193,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ -const static struct _sdvo_cmd_name { +static const struct _sdvo_cmd_name { u8 cmd; char *name; } sdvo_cmd_names[] = { diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index fbb35dc56f5..56485d67369 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -411,7 +411,7 @@ struct tv_mode { * These values account for -1s required. */ -const static struct tv_mode tv_modes[] = { +static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", .clock = 107520, diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index df4cf97e5d9..92965dbb3c1 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -557,8 +557,10 @@ static int radeon_do_engine_reset(struct drm_device * dev) } static void radeon_cp_init_ring_buffer(struct drm_device * dev, - drm_radeon_private_t * dev_priv) + drm_radeon_private_t *dev_priv, + struct drm_file *file_priv) { + struct drm_radeon_master_private *master_priv; u32 ring_start, cur_read_ptr; u32 tmp; @@ -677,6 +679,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, dev_priv->scratch[2] = 0; RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); + /* reset sarea copies of these */ + master_priv = file_priv->master->driver_priv; + if (master_priv->sarea_priv) { + master_priv->sarea_priv->last_frame = 0; + master_priv->sarea_priv->last_dispatch = 0; + master_priv->sarea_priv->last_clear = 0; + } + radeon_do_wait_for_idle(dev_priv); /* Sync everything up */ @@ -1215,7 +1225,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, } radeon_cp_load_microcode(dev_priv); - radeon_cp_init_ring_buffer(dev, dev_priv); + radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); dev_priv->last_buf = 0; @@ -1281,7 +1291,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) * * Charl P. Botha <http://cpbotha.net> */ -static int radeon_do_resume_cp(struct drm_device * dev) +static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1304,7 +1314,7 @@ static int radeon_do_resume_cp(struct drm_device * dev) } radeon_cp_load_microcode(dev_priv); - radeon_cp_init_ring_buffer(dev, dev_priv); + radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); radeon_do_engine_reset(dev); radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); @@ -1479,8 +1489,7 @@ int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_pri */ int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) { - - return radeon_do_resume_cp(dev); + return radeon_do_resume_cp(dev, file_priv); } int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) |