0001 #ifndef _LINUX_MM_H
0002 #define _LINUX_MM_H
0003
0004 #include <assert.h>
0005 #include <string.h>
0006 #include <stdlib.h>
0007 #include <errno.h>
0008 #include <limits.h>
0009 #include <stdio.h>
0010
0011 typedef unsigned long dma_addr_t;
0012
0013 #define unlikely
0014
0015 #define BUG_ON(x) assert(!(x))
0016
0017 #define WARN_ON(condition) ({ \
0018 int __ret_warn_on = !!(condition); \
0019 unlikely(__ret_warn_on); \
0020 })
0021
0022 #define WARN_ON_ONCE(condition) ({ \
0023 int __ret_warn_on = !!(condition); \
0024 if (unlikely(__ret_warn_on)) \
0025 assert(0); \
0026 unlikely(__ret_warn_on); \
0027 })
0028
0029 #define PAGE_SIZE (4096)
0030 #define PAGE_SHIFT (12)
0031 #define PAGE_MASK (~(PAGE_SIZE-1))
0032
0033 #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
0034 #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
0035 #define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
0036 #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
0037
0038 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
0039
0040 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
0041
0042 #define virt_to_page(x) ((void *)x)
0043 #define page_address(x) ((void *)x)
0044
0045 static inline unsigned long page_to_phys(struct page *page)
0046 {
0047 assert(0);
0048
0049 return 0;
0050 }
0051
0052 #define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
0053 #define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
0054 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
0055
0056 #define __min(t1, t2, min1, min2, x, y) ({ \
0057 t1 min1 = (x); \
0058 t2 min2 = (y); \
0059 (void) (&min1 == &min2); \
0060 min1 < min2 ? min1 : min2; })
0061
0062 #define ___PASTE(a,b) a##b
0063 #define __PASTE(a,b) ___PASTE(a,b)
0064
0065 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
0066
0067 #define min(x, y) \
0068 __min(typeof(x), typeof(y), \
0069 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
0070 x, y)
0071
0072 #define min_t(type, x, y) \
0073 __min(type, type, \
0074 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
0075 x, y)
0076
0077 #define pagefault_disabled() (0)
0078
0079 static inline void *kmap(struct page *page)
0080 {
0081 assert(0);
0082
0083 return NULL;
0084 }
0085
0086 static inline void *kmap_atomic(struct page *page)
0087 {
0088 assert(0);
0089
0090 return NULL;
0091 }
0092
0093 static inline void kunmap(void *addr)
0094 {
0095 assert(0);
0096 }
0097
0098 static inline void kunmap_atomic(void *addr)
0099 {
0100 assert(0);
0101 }
0102
0103 static inline unsigned long __get_free_page(unsigned int flags)
0104 {
0105 return (unsigned long)malloc(PAGE_SIZE);
0106 }
0107
0108 static inline void free_page(unsigned long page)
0109 {
0110 free((void *)page);
0111 }
0112
0113 static inline void *kmalloc(unsigned int size, unsigned int flags)
0114 {
0115 return malloc(size);
0116 }
0117
0118 static inline void *
0119 kmalloc_array(unsigned int n, unsigned int size, unsigned int flags)
0120 {
0121 return malloc(n * size);
0122 }
0123
0124 #define kfree(x) free(x)
0125
0126 #define kmemleak_alloc(a, b, c, d)
0127 #define kmemleak_free(a)
0128
0129 #define PageSlab(p) (0)
0130 #define flush_dcache_page(p)
0131
0132 #define MAX_ERRNO 4095
0133
0134 #define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
0135
0136 static inline void * __must_check ERR_PTR(long error)
0137 {
0138 return (void *) error;
0139 }
0140
0141 static inline long __must_check PTR_ERR(__force const void *ptr)
0142 {
0143 return (long) ptr;
0144 }
0145
0146 static inline bool __must_check IS_ERR(__force const void *ptr)
0147 {
0148 return IS_ERR_VALUE((unsigned long)ptr);
0149 }
0150
0151 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
0152 {
0153 if (IS_ERR(ptr))
0154 return PTR_ERR(ptr);
0155 else
0156 return 0;
0157 }
0158
0159 #define IS_ENABLED(x) (0)
0160
0161 #endif