.release = single_release,
};
+static inline int spufs_switch_log_used(struct spu_context *ctx)
+{
+ return (ctx->switch_log->head - ctx->switch_log->tail) %
+ SWITCH_LOG_BUFSIZE;
+}
+
+static inline int spufs_switch_log_avail(struct spu_context *ctx)
+{
+ return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
+}
+
+static int spufs_switch_log_open(struct inode *inode, struct file *file)
+{
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+
+ /*
+ * We (ab-)use the mapping_lock here because it serves the similar
+ * purpose for synchronizing open/close elsewhere. Maybe it should
+ * be renamed eventually.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->switch_log) {
+ spin_lock(&ctx->switch_log->lock);
+ ctx->switch_log->head = 0;
+ ctx->switch_log->tail = 0;
+ spin_unlock(&ctx->switch_log->lock);
+ } else {
+ /*
+ * We allocate the switch log data structures on first open.
+ * They will never be free because we assume a context will
+ * be traced until it goes away.
+ */
+ ctx->switch_log = kzalloc(sizeof(struct switch_log) +
+ SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+ GFP_KERNEL);
+ if (!ctx->switch_log)
+ goto out;
+ spin_lock_init(&ctx->switch_log->lock);
+ init_waitqueue_head(&ctx->switch_log->wait);
+ }
+ mutex_unlock(&ctx->mapping_lock);
+
+ return 0;
+ out:
+ mutex_unlock(&ctx->mapping_lock);
+ return -ENOMEM;
+}
+
+static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
+{
+ struct switch_log_entry *p;
+
+ p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
+
+ return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
+ (unsigned int) p->tstamp.tv_sec,
+ (unsigned int) p->tstamp.tv_nsec,
+ p->spu_id,
+ (unsigned int) p->type,
+ (unsigned int) p->val,
+ (unsigned long long) p->timebase);
+}
+
+static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ int error = 0, cnt = 0;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ while (cnt < len) {
+ char tbuf[128];
+ int width;
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (spufs_switch_log_used(ctx) <= 0)
+ return cnt ? cnt : -EAGAIN;
+ } else {
+ /* Wait for data in buffer */
+ error = wait_event_interruptible(ctx->switch_log->wait,
+ spufs_switch_log_used(ctx) > 0);
+ if (error)
+ break;
+ }
+
+ spin_lock(&ctx->switch_log->lock);
+ if (ctx->switch_log->head == ctx->switch_log->tail) {
+ /* multiple readers race? */
+ spin_unlock(&ctx->switch_log->lock);
+ continue;
+ }
+
+ width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
+ if (width < len) {
+ ctx->switch_log->tail =
+ (ctx->switch_log->tail + 1) %
+ SWITCH_LOG_BUFSIZE;
+ }
+
+ spin_unlock(&ctx->switch_log->lock);
+
+ /*
+ * If the record is greater than space available return
+ * partial buffer (so far)
+ */
+ if (width >= len)
+ break;
+
+ error = copy_to_user(buf + cnt, tbuf, width);
+ if (error)
+ break;
+ cnt += width;
+ }
+
+ return cnt == 0 ? error : cnt;
+}
+
+static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ unsigned int mask = 0;
+
+ poll_wait(file, &ctx->switch_log->wait, wait);
+
+ if (spufs_switch_log_used(ctx) > 0)
+ mask |= POLLIN;
+
+ return mask;
+}
+
+static const struct file_operations spufs_switch_log_fops = {
+ .owner = THIS_MODULE,
+ .open = spufs_switch_log_open,
+ .read = spufs_switch_log_read,
+ .poll = spufs_switch_log_poll,
+};
+
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+ u32 type, u32 val)
+{
+ if (!ctx->switch_log)
+ return;
+
+ spin_lock(&ctx->switch_log->lock);
+ if (spufs_switch_log_avail(ctx) > 1) {
+ struct switch_log_entry *p;
+
+ p = ctx->switch_log->log + ctx->switch_log->head;
+ ktime_get_ts(&p->tstamp);
+ p->timebase = get_tb();
+ p->spu_id = spu ? spu->number : -1;
+ p->type = type;
+ p->val = val;
+
+ ctx->switch_log->head =
+ (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
+ }
+ spin_unlock(&ctx->switch_log->lock);
+
+ wake_up(&ctx->switch_log->wait);
+}
struct tree_descr spufs_dir_contents[] = {
{ "capabilities", &spufs_caps_fops, 0444, },
{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
{ "tid", &spufs_tid_fops, 0444, },
{ "stat", &spufs_stat_fops, 0444, },
+ { "switch_log", &spufs_switch_log_fops, 0444 },
{},
};
SPU_SCHED_SPU_RUN, /* context is within spu_run */
};
+enum {
+ SWITCH_LOG_BUFSIZE = 4096,
+};
+
+enum {
+ SWITCH_LOG_START,
+ SWITCH_LOG_STOP,
+ SWITCH_LOG_EXIT,
+};
+
+struct switch_log {
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ unsigned long head;
+ unsigned long tail;
+ struct switch_log_entry {
+ struct timespec tstamp;
+ s32 spu_id;
+ u32 type;
+ u32 val;
+ u64 timebase;
+ } log[];
+};
+
struct spu_context {
struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */
unsigned long long libassist;
} stats;
+ /* context switch log */
+ struct switch_log *switch_log;
+
struct list_head aff_list;
int aff_head;
int aff_offset;
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+ u32 type, u32 val);
void spu_set_timeslice(struct spu_context *ctx);
void spu_update_sched_info(struct spu_context *ctx);
void __spu_update_sched_info(struct spu_context *ctx);