*/
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
+#include <asm/bitops.h>
#include <asm/bug.h>
#include <asm/processor.h>
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
*/
+/* Page status table bits for virtualization */
+#define RCP_PCL_BIT 55
+#define RCP_HR_BIT 54
+#define RCP_HC_BIT 53
+#define RCP_GR_BIT 50
+#define RCP_GC_BIT 49
+
#ifndef __s390x__
/* Bits in the segment table address-space-control-element */
#define __HAVE_ARCH_PTE_SAME
#define pte_same(a,b) (pte_val(a) == pte_val(b))
+static inline void rcp_lock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ preempt_disable();
+ while (test_and_set_bit(RCP_PCL_BIT, pgste))
+ ;
+#endif
+}
+
+static inline void rcp_unlock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ clear_bit(RCP_PCL_BIT, pgste);
+ preempt_enable();
+#endif
+}
+
+/* forward declaration for SetPageUptodate in page-flags.h*/
+static inline void page_clear_dirty(struct page *page);
+#include <linux/page-flags.h>
+
+static inline void ptep_rcp_copy(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ struct page *page = virt_to_page(pte_val(*ptep));
+ unsigned int skey;
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ skey = page_get_storage_key(page_to_phys(page));
+ if (skey & _PAGE_CHANGED)
+ set_bit(RCP_GC_BIT, pgste);
+ if (skey & _PAGE_REFERENCED)
+ set_bit(RCP_GR_BIT, pgste);
+ if (test_and_clear_bit(RCP_HC_BIT, pgste))
+ SetPageDirty(page);
+ if (test_and_clear_bit(RCP_HR_BIT, pgste))
+ SetPageReferenced(page);
+#endif
+}
+
/*
* query functions pte_write/pte_dirty/pte_young only work if
* pte_present() is true. Undefined behaviour if not..
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ if (mm->context.pgstes)
+ ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+#ifdef CONFIG_PGSTE
+ unsigned long physpage;
+ int young;
+ unsigned long *pgste;
+
+ if (!vma->vm_mm->context.pgstes)
+ return 0;
+ physpage = pte_val(*ptep) & PAGE_MASK;
+ pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
+ rcp_lock(ptep);
+ if (young)
+ set_bit(RCP_GR_BIT, pgste);
+ young |= test_and_clear_bit(RCP_HR_BIT, pgste);
+ rcp_unlock(ptep);
+ return young;
+#endif
return 0;
}
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- /* No need to flush TLB; bits are in storage key */
+ /* No need to flush TLB
+ * On s390 reference bits are in storage key and never in TLB
+ * With virtualization we handle the reference bit, without we
+ * we can simply return */
+#ifdef CONFIG_PGSTE
+ return ptep_test_and_clear_young(vma, address, ptep);
+#endif
return 0;
}
: "=m" (*ptep) : "m" (*ptep),
"a" (pto), "a" (address));
}
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
}
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
+ if (mm->context.pgstes) {
+ rcp_lock(ptep);
+ __ptep_ipte(address, ptep);
+ ptep_rcp_copy(ptep);
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ rcp_unlock(ptep);
+ return;
+ }
__ptep_ipte(address, ptep);
- if (mm->context.noexec)
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ if (mm->context.noexec) {
__ptep_ipte(address, ptep + PTRS_PER_PTE);
+ pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
+ }
}
/*