}
-static void qh_destroy (struct kref *kref)
+static void qh_destroy(struct ehci_qh *qh)
{
- struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
struct ehci_hcd *ehci = qh->ehci;
/* clean qtds first, and know this is not linked */
return qh;
memset (qh, 0, sizeof *qh);
- kref_init(&qh->kref);
+ qh->refcount = 1;
qh->ehci = ehci;
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
{
- kref_get(&qh->kref);
+ WARN_ON(!qh->refcount);
+ qh->refcount++;
return qh;
}
static inline void qh_put (struct ehci_qh *qh)
{
- kref_put(&qh->kref, qh_destroy);
+ if (!--qh->refcount)
+ qh_destroy(qh);
}
/*-------------------------------------------------------------------------*/
struct ehci_qh *reclaim; /* next to reclaim */
struct ehci_hcd *ehci;
- struct kref kref;
+
+ /*
+ * Do NOT use atomic operations for QH refcounting. On some CPUs
+ * (PPC7448 for example), atomic operations cannot be performed on
+ * memory that is cache-inhibited (i.e. being used for DMA).
+ * Spinlocks are used to protect all QH fields.
+ */
+ u32 refcount;
unsigned stamp;
u8 qh_state;