]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.arch/ppc-spufs-05-Use-state_mutex-for-switch_log-lockin.patch
Revert "Move xen patchset to new version's subdir."
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.arch / ppc-spufs-05-Use-state_mutex-for-switch_log-lockin.patch
diff --git a/src/patches/suse-2.6.27.31/patches.arch/ppc-spufs-05-Use-state_mutex-for-switch_log-lockin.patch b/src/patches/suse-2.6.27.31/patches.arch/ppc-spufs-05-Use-state_mutex-for-switch_log-lockin.patch
deleted file mode 100644 (file)
index 0be6922..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-Subject: Use state_mutex for switch_log locking, and prevent multiple openers
-From: Jeremy Kerr <jk@ozlabs.org>
-References: 447133 - LTC50070
-
-Currently, we use ctx->mapping_lock and ctx->switch_log->lock for the
-context switch log. The mapping lock only prevents concurrent open()s,
-so we require the switch_lock->lock for reads.
-
-Since writes to the switch log buffer occur on context switches, we're
-better off synchronising with the state_mutex, which is held during a
-switch. Since we're serialised througout the buffer reads and writes,
-we can use the state mutex to protect open and release too, and
-can now kfree() the log buffer on release. This allows us to perform
-the switch log notify without taking any extra locks.
-
-Because the buffer is only present while the file is open, we can use
-it to prevent multiple simultaneous openers.
-
-Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
-Signed-off-by: Olaf Hering <olh@suse.de>
----
- arch/powerpc/platforms/cell/spufs/file.c  |  133 ++++++++++++++++++------------
- arch/powerpc/platforms/cell/spufs/run.c   |    3 
- arch/powerpc/platforms/cell/spufs/spufs.h |    1 
- 3 files changed, 81 insertions(+), 56 deletions(-)
-
---- a/arch/powerpc/platforms/cell/spufs/file.c
-+++ b/arch/powerpc/platforms/cell/spufs/file.c
-@@ -2429,38 +2429,48 @@ static inline int spufs_switch_log_avail
- static int spufs_switch_log_open(struct inode *inode, struct file *file)
- {
-       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
-+      int rc;
-+
-+      rc = spu_acquire(ctx);
-+      if (rc)
-+              return rc;
--      /*
--       * We (ab-)use the mapping_lock here because it serves the similar
--       * purpose for synchronizing open/close elsewhere.  Maybe it should
--       * be renamed eventually.
--       */
--      mutex_lock(&ctx->mapping_lock);
-       if (ctx->switch_log) {
--              spin_lock(&ctx->switch_log->lock);
--              ctx->switch_log->head = 0;
--              ctx->switch_log->tail = 0;
--              spin_unlock(&ctx->switch_log->lock);
--      } else {
--              /*
--               * We allocate the switch log data structures on first open.
--               * They will never be free because we assume a context will
--               * be traced until it goes away.
--               */
--              ctx->switch_log = kzalloc(sizeof(struct switch_log) +
--                      SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
--                      GFP_KERNEL);
--              if (!ctx->switch_log)
--                      goto out;
--              spin_lock_init(&ctx->switch_log->lock);
--              init_waitqueue_head(&ctx->switch_log->wait);
-+              rc = -EBUSY;
-+              goto out;
-+      }
-+
-+      ctx->switch_log = kzalloc(sizeof(struct switch_log) +
-+              SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
-+              GFP_KERNEL);
-+
-+      if (!ctx->switch_log) {
-+              rc = -ENOMEM;
-+              goto out;
-       }
--      mutex_unlock(&ctx->mapping_lock);
-+
-+      init_waitqueue_head(&ctx->switch_log->wait);
-+      rc = 0;
-+
-+out:
-+      spu_release(ctx);
-+      return rc;
-+}
-+
-+static int spufs_switch_log_release(struct inode *inode, struct file *file)
-+{
-+      struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
-+      int rc;
-+
-+      rc = spu_acquire(ctx);
-+      if (rc)
-+              return rc;
-+
-+      kfree(ctx->switch_log);
-+      ctx->switch_log = NULL;
-+      spu_release(ctx);
-       return 0;
-- out:
--      mutex_unlock(&ctx->mapping_lock);
--      return -ENOMEM;
- }
- static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
-@@ -2488,42 +2498,46 @@ static ssize_t spufs_switch_log_read(str
-       if (!buf || len < 0)
-               return -EINVAL;
-+      error = spu_acquire(ctx);
-+      if (error)
-+              return error;
-+
-       while (cnt < len) {
-               char tbuf[128];
-               int width;
-               if (file->f_flags & O_NONBLOCK) {
--                      if (spufs_switch_log_used(ctx) <= 0)
--                              return cnt ? cnt : -EAGAIN;
-+                      if (spufs_switch_log_used(ctx) == 0) {
-+                              error = -EAGAIN;
-+                              break;
-+                      }
-               } else {
--                      /* Wait for data in buffer */
--                      error = wait_event_interruptible(ctx->switch_log->wait,
-+                      /* spufs_wait will drop the mutex and re-acquire,
-+                       * but since we're in read(), the file cannot be
-+                       * _released (and so ctx->switch_log is stable).
-+                       */
-+                      error = spufs_wait(ctx->switch_log->wait,
-                                       spufs_switch_log_used(ctx) > 0);
-+
-+                      /* On error, spufs_wait returns without the
-+                       * state mutex held */
-                       if (error)
--                              break;
-+                              return error;
-               }
--              spin_lock(&ctx->switch_log->lock);
--              if (ctx->switch_log->head == ctx->switch_log->tail) {
--                      /* multiple readers race? */
--                      spin_unlock(&ctx->switch_log->lock);
-+              /* We may have had entries read from underneath us while we
-+               * dropped the mutex in spufs_wait, so re-check */
-+              if (ctx->switch_log->head == ctx->switch_log->tail)
-                       continue;
--              }
-               width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
--              if (width < len) {
-+              if (width < len)
-                       ctx->switch_log->tail =
-                               (ctx->switch_log->tail + 1) %
-                                SWITCH_LOG_BUFSIZE;
--              }
--
--              spin_unlock(&ctx->switch_log->lock);
--
--              /*
--               * If the record is greater than space available return
--               * partial buffer (so far)
--               */
--              if (width >= len)
-+              else
-+                      /* If the record is greater than space available return
-+                       * partial buffer (so far) */
-                       break;
-               error = copy_to_user(buf + cnt, tbuf, width);
-@@ -2532,6 +2546,8 @@ static ssize_t spufs_switch_log_read(str
-               cnt += width;
-       }
-+      spu_release(ctx);
-+
-       return cnt == 0 ? error : cnt;
- }
-@@ -2540,29 +2556,41 @@ static unsigned int spufs_switch_log_pol
-       struct inode *inode = file->f_path.dentry->d_inode;
-       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
-       unsigned int mask = 0;
-+      int rc;
-       poll_wait(file, &ctx->switch_log->wait, wait);
-+      rc = spu_acquire(ctx);
-+      if (rc)
-+              return rc;
-+
-       if (spufs_switch_log_used(ctx) > 0)
-               mask |= POLLIN;
-+      spu_release(ctx);
-+
-       return mask;
- }
- static const struct file_operations spufs_switch_log_fops = {
--      .owner  = THIS_MODULE,
--      .open   = spufs_switch_log_open,
--      .read   = spufs_switch_log_read,
--      .poll   = spufs_switch_log_poll,
-+      .owner          = THIS_MODULE,
-+      .open           = spufs_switch_log_open,
-+      .read           = spufs_switch_log_read,
-+      .poll           = spufs_switch_log_poll,
-+      .release        = spufs_switch_log_release,
- };
-+/**
-+ * Log a context switch event to a switch log reader.
-+ *
-+ * Must be called with ctx->state_mutex held.
-+ */
- void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
-               u32 type, u32 val)
- {
-       if (!ctx->switch_log)
-               return;
--      spin_lock(&ctx->switch_log->lock);
-       if (spufs_switch_log_avail(ctx) > 1) {
-               struct switch_log_entry *p;
-@@ -2576,7 +2604,6 @@ void spu_switch_log_notify(struct spu *s
-               ctx->switch_log->head =
-                       (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
-       }
--      spin_unlock(&ctx->switch_log->lock);
-       wake_up(&ctx->switch_log->wait);
- }
---- a/arch/powerpc/platforms/cell/spufs/run.c
-+++ b/arch/powerpc/platforms/cell/spufs/run.c
-@@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_conte
-       spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
-       clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
-+      spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
-       spu_release(ctx);
-       if (signal_pending(current))
-@@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *c
-       ret = spu_run_fini(ctx, npc, &status);
-       spu_yield(ctx);
--      spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
--
-       if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
-           (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
-               ctx->stats.libassist++;
---- a/arch/powerpc/platforms/cell/spufs/spufs.h
-+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
-@@ -65,7 +65,6 @@ enum {
- };
- struct switch_log {
--      spinlock_t              lock;
-       wait_queue_head_t       wait;
-       unsigned long           head;
-       unsigned long           tail;