flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
}
-/* Flush all unused kmap mappings in order to remove stray
- mappings. */
+/**
+ * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
+ */
void kmap_flush_unused(void)
{
spin_lock(&kmap_lock);
return vaddr;
}
+/**
+ * kmap_high - map a highmem page into memory
+ * @page: &struct page to map
+ *
+ * Returns the page's virtual memory address.
+ *
+ * We cannot call this from interrupts, as it may block.
+ */
void *kmap_high(struct page *page)
{
unsigned long vaddr;
/*
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
- *
- * We cannot call this from interrupts, as it may block
*/
spin_lock(&kmap_lock);
vaddr = (unsigned long)page_address(page);
EXPORT_SYMBOL(kmap_high);
+/**
+ * kunmap_high - map a highmem page into memory
+ * @page: &struct page to unmap
+ */
void kunmap_high(struct page *page)
{
unsigned long vaddr;
return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
}
+/**
+ * page_address - get the mapped virtual address of a page
+ * @page: &struct page to get the virtual address of
+ *
+ * Returns the page's virtual address.
+ */
void *page_address(struct page *page)
{
unsigned long flags;
EXPORT_SYMBOL(page_address);
+/**
+ * set_page_address - set a page's virtual address
+ * @page: &struct page to set
+ * @virtual: virtual address to use
+ */
void set_page_address(struct page *page, void *virtual)
{
unsigned long flags;