From e169b23e66575856c5712b8f2162e305d8560d6b Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 21 Oct 2008 16:25:42 +0200 Subject: linux-moblin: Add 2.6.27 moblin kernel This will be the default moblin kernel. We also moved the 2.6.27-rc* kernels to meta-moblin. --- ...-struct_mutex-to-protect-ring-in-GEM-mode.patch | 205 +++++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch (limited to 'meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch') diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch new file mode 100644 index 0000000000..910f37e9c5 --- /dev/null +++ b/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch @@ -0,0 +1,205 @@ +commit 8a524209fce67d3b6d2e831b5dad4eced796ce98 +Author: Eric Anholt +Date: Mon Sep 1 16:45:29 2008 -0700 + + i915: Use struct_mutex to protect ring in GEM mode. + + In the conversion for GEM, we had stopped using the hardware lock to protect + ring usage, since it was all internal to the DRM now. However, some paths + weren't converted to using struct_mutex to prevent multiple threads from + concurrently working on the ring, in particular between the vblank swap handler + and ioctls. + + Signed-off-by: Eric Anholt + +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index 205d21e..25f59c1 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -588,9 +588,15 @@ static int i915_quiescent(struct drm_device * dev) + static int i915_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +- LOCK_TEST_WITH_RETURN(dev, file_priv); ++ int ret; ++ ++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + +- return i915_quiescent(dev); ++ mutex_lock(&dev->struct_mutex); ++ ret = i915_quiescent(dev); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; + } + + static int i915_batchbuffer(struct drm_device *dev, void *data, +@@ -611,14 +617,16 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, + DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", + batch->start, batch->used, batch->num_cliprects); + +- LOCK_TEST_WITH_RETURN(dev, file_priv); ++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, + batch->num_cliprects * + sizeof(struct drm_clip_rect))) + return -EFAULT; + ++ mutex_lock(&dev->struct_mutex); + ret = i915_dispatch_batchbuffer(dev, batch); ++ mutex_unlock(&dev->struct_mutex); + + sarea_priv->last_dispatch = (int)hw_status[5]; + return ret; +@@ -637,7 +645,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, + DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", + cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); + +- LOCK_TEST_WITH_RETURN(dev, file_priv); ++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (cmdbuf->num_cliprects && + DRM_VERIFYAREA_READ(cmdbuf->cliprects, +@@ -647,7 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, + return -EFAULT; + } + ++ mutex_lock(&dev->struct_mutex); + ret = i915_dispatch_cmdbuffer(dev, cmdbuf); ++ mutex_unlock(&dev->struct_mutex); + if (ret) { + DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); + return ret; +@@ -660,11 +670,17 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, + static int i915_flip_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { ++ int ret; ++ + DRM_DEBUG("%s\n", __FUNCTION__); + +- LOCK_TEST_WITH_RETURN(dev, file_priv); ++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + +- return i915_dispatch_flip(dev); ++ mutex_lock(&dev->struct_mutex); ++ ret = i915_dispatch_flip(dev); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; + } + + static int i915_getparam(struct drm_device *dev, void *data, +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 87b071a..8547f0a 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -285,6 +285,9 @@ typedef struct drm_i915_private { + */ + struct delayed_work retire_work; + ++ /** Work task for vblank-related ring access */ ++ struct work_struct vblank_work; ++ + uint32_t next_gem_seqno; + + /** +@@ -435,6 +438,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data, + void i915_user_irq_get(struct drm_device *dev); + void i915_user_irq_put(struct drm_device *dev); + ++extern void i915_gem_vblank_work_handler(struct work_struct *work); + extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); + extern void i915_driver_irq_preinstall(struct drm_device * dev); + extern int i915_driver_irq_postinstall(struct drm_device *dev); +@@ -538,6 +542,17 @@ extern void intel_opregion_free(struct drm_device *dev); + extern void opregion_asle_intr(struct drm_device *dev); + extern void opregion_enable_asle(struct drm_device *dev); + ++/** ++ * Lock test for when it's just for synchronization of ring access. ++ * ++ * In that case, we don't need to do it when GEM is initialized as nobody else ++ * has access to the ring. ++ */ ++#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ ++ if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); \ ++} while (0) ++ + #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) + #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) + #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 90ae8a0..bb6e5a3 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2491,6 +2491,8 @@ i915_gem_load(struct drm_device *dev) + INIT_LIST_HEAD(&dev_priv->mm.request_list); + INIT_DELAYED_WORK(&dev_priv->mm.retire_work, + i915_gem_retire_work_handler); ++ INIT_WORK(&dev_priv->mm.vblank_work, ++ i915_gem_vblank_work_handler); + dev_priv->mm.next_gem_seqno = 1; + + i915_gem_detect_bit_6_swizzle(dev); +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index f295bdf..d04c526 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -349,6 +349,21 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane) + return count; + } + ++void ++i915_gem_vblank_work_handler(struct work_struct *work) ++{ ++ drm_i915_private_t *dev_priv; ++ struct drm_device *dev; ++ ++ dev_priv = container_of(work, drm_i915_private_t, ++ mm.vblank_work); ++ dev = dev_priv->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ i915_vblank_tasklet(dev); ++ mutex_unlock(&dev->struct_mutex); ++} ++ + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + { + struct drm_device *dev = (struct drm_device *) arg; +@@ -422,8 +437,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + if (iir & I915_ASLE_INTERRUPT) + opregion_asle_intr(dev); + +- if (vblank && dev_priv->swaps_pending > 0) +- drm_locked_tasklet(dev, i915_vblank_tasklet); ++ if (vblank && dev_priv->swaps_pending > 0) { ++ if (dev_priv->ring.ring_obj == NULL) ++ drm_locked_tasklet(dev, i915_vblank_tasklet); ++ else ++ schedule_work(&dev_priv->mm.vblank_work); ++ } + + return IRQ_HANDLED; + } +@@ -514,14 +533,15 @@ int i915_irq_emit(struct drm_device *dev, void *data, + drm_i915_irq_emit_t *emit = data; + int result; + +- LOCK_TEST_WITH_RETURN(dev, file_priv); ++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } +- ++ mutex_lock(&dev->struct_mutex); + result = i915_emit_irq(dev); ++ mutex_unlock(&dev->struct_mutex); + + if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { + DRM_ERROR("copy_to_user\n"); -- cgit v1.2.3-54-g00ecf