if (stat & 0xff0000)
ret |= POLLIN | POLLRDNORM;
else {
- ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
- ctx->csa.priv1.int_mask_class2_RW |= 0x1;
+ ctx->csa.priv1.int_stat_class2_RW &=
+ ~CLASS2_MAILBOX_INTR;
+ ctx->csa.priv1.int_mask_class2_RW |=
+ CLASS2_ENABLE_MAILBOX_INTR;
}
}
if (events & (POLLOUT | POLLWRNORM)) {
if (stat & 0x00ff00)
ret = POLLOUT | POLLWRNORM;
else {
- ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
- ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+ ctx->csa.priv1.int_stat_class2_RW &=
+ ~CLASS2_MAILBOX_THRESHOLD_INTR;
+ ctx->csa.priv1.int_mask_class2_RW |=
+ CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
}
}
spin_unlock_irq(&ctx->csa.register_lock);
ret = 4;
} else {
/* make sure we get woken up by the interrupt */
- ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
+ ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
ret = 0;
}
spin_unlock(&ctx->csa.register_lock);
BUG_ON(avail != (4 - slot));
ctx->csa.spu_mailbox_data[slot] = data;
ctx->csa.spu_chnlcnt_RW[29] = ++slot;
- ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8);
+ ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
+ ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
ret = 4;
} else {
/* make sure we get woken up by the interrupt when space
becomes available */
- ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+ ctx->csa.priv1.int_mask_class2_RW |=
+ CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
ret = 0;
}
spin_unlock(&ctx->csa.register_lock);
return ctx->csa.lscsa->ls;
}
+static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
+{
+ ctx->csa.priv2.spu_privcntl_RW = val;
+}
+
static u32 spu_backing_runcntl_read(struct spu_context *ctx)
{
return ctx->csa.prob.spu_runcntl_RW;
spin_lock(&ctx->csa.register_lock);
ctx->csa.prob.spu_runcntl_RW = val;
if (val & SPU_RUNCNTL_RUNNABLE) {
+ ctx->csa.prob.spu_status_R &=
+ ~SPU_STATUS_STOPPED_BY_STOP &
+ ~SPU_STATUS_STOPPED_BY_HALT &
+ ~SPU_STATUS_SINGLE_STEP &
+ ~SPU_STATUS_INVALID_INSTR &
+ ~SPU_STATUS_INVALID_CH;
ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
} else {
ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
spin_unlock(&ctx->csa.register_lock);
}
+static void spu_backing_runcntl_stop(struct spu_context *ctx)
+{
+ spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
+}
+
static void spu_backing_master_start(struct spu_context *ctx)
{
struct spu_state *csa = &ctx->csa;
/* FIXME: what are the side-effects of this? */
prob->dma_querymask_RW = mask;
prob->dma_querytype_RW = mode;
+ /* In the current implementation, the SPU context is always
+ * acquired in runnable state when new bits are added to the
+ * mask (tagwait), so it's sufficient just to mask
+ * dma_tagstatus_R with the 'mask' parameter here.
+ */
+ ctx->csa.prob.dma_tagstatus_R &= mask;
out:
spin_unlock(&ctx->csa.register_lock);
static void spu_backing_restart_dma(struct spu_context *ctx)
{
- /* nothing to do here */
+ ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
}
struct spu_context_ops spu_backing_ops = {
.npc_write = spu_backing_npc_write,
.status_read = spu_backing_status_read,
.get_ls = spu_backing_get_ls,
+ .privcntl_write = spu_backing_privcntl_write,
.runcntl_read = spu_backing_runcntl_read,
.runcntl_write = spu_backing_runcntl_write,
+ .runcntl_stop = spu_backing_runcntl_stop,
.master_start = spu_backing_master_start,
.master_stop = spu_backing_master_stop,
.set_mfc_query = spu_backing_set_mfc_query,