__pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
})
-#define pfn_to_page(pfn) \
-({ \
- unsigned long __pfn = pfn; \
- int __node = pfn_to_nid(__pfn); \
- &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \
-})
-
-#define page_to_pfn(pg) \
-({ \
- struct page *__page = pg; \
- struct zone *__zone = page_zone(__page); \
- (unsigned long)(__page - __zone->zone_mem_map) \
- + __zone->zone_start_pfn; \
-})
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
/*
* pfn_valid should be made as fast as possible, and the current definition
#ifndef CONFIG_DISCONTIGMEM
#define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT)
-#define pfn_to_page(pfn) (mem_map + ((pfn) - PFN_BASE))
-#define page_to_pfn(page) \
- ((unsigned long)((page) - mem_map) + PFN_BASE)
+#define ARCH_PFN_OFFSET PFN_BASE
#define pfn_valid(pfn) (((pfn) - PFN_BASE) < max_mapnr)
#endif /* !CONFIG_DISCONTIGMEM */
#endif /* __KERNEL__ */
+#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
#endif /* _ASM_M32R_PAGE_H */