0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include <linux/types.h>
0028 #include <linux/kernel.h>
0029 #include <linux/module.h>
0030 #include <linux/init.h>
0031 #include <linux/errno.h>
0032 #include <linux/miscdevice.h>
0033 #include <linux/spinlock.h>
0034 #include <linux/mm.h>
0035 #include <linux/fs.h>
0036 #include <linux/vmalloc.h>
0037 #include <linux/string.h>
0038 #include <linux/slab.h>
0039 #include <linux/numa.h>
0040 #include <linux/refcount.h>
0041 #include <asm/page.h>
0042 #include <linux/atomic.h>
0043 #include <asm/tlbflush.h>
0044 #include <asm/uncached.h>
0045
0046
0047 #define CACHED_ID "Cached,"
0048 #define UNCACHED_ID "Uncached"
0049 #define REVISION "4.0"
0050 #define MSPEC_BASENAME "mspec"
0051
0052
0053
0054
0055 enum mspec_page_type {
0056 MSPEC_CACHED = 2,
0057 MSPEC_UNCACHED
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 struct vma_data {
0071 refcount_t refcnt;
0072 spinlock_t lock;
0073 int count;
0074 enum mspec_page_type type;
0075 unsigned long vm_start;
0076 unsigned long vm_end;
0077 unsigned long maddr[];
0078 };
0079
0080
0081
0082
0083
0084
0085
0086
0087 static void
0088 mspec_open(struct vm_area_struct *vma)
0089 {
0090 struct vma_data *vdata;
0091
0092 vdata = vma->vm_private_data;
0093 refcount_inc(&vdata->refcnt);
0094 }
0095
0096
0097
0098
0099
0100
0101
0102 static void
0103 mspec_close(struct vm_area_struct *vma)
0104 {
0105 struct vma_data *vdata;
0106 int index, last_index;
0107 unsigned long my_page;
0108
0109 vdata = vma->vm_private_data;
0110
0111 if (!refcount_dec_and_test(&vdata->refcnt))
0112 return;
0113
0114 last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
0115 for (index = 0; index < last_index; index++) {
0116 if (vdata->maddr[index] == 0)
0117 continue;
0118
0119
0120
0121
0122 my_page = vdata->maddr[index];
0123 vdata->maddr[index] = 0;
0124 memset((char *)my_page, 0, PAGE_SIZE);
0125 uncached_free_page(my_page, 1);
0126 }
0127
0128 kvfree(vdata);
0129 }
0130
0131
0132
0133
0134
0135
0136 static vm_fault_t
0137 mspec_fault(struct vm_fault *vmf)
0138 {
0139 unsigned long paddr, maddr;
0140 unsigned long pfn;
0141 pgoff_t index = vmf->pgoff;
0142 struct vma_data *vdata = vmf->vma->vm_private_data;
0143
0144 maddr = (volatile unsigned long) vdata->maddr[index];
0145 if (maddr == 0) {
0146 maddr = uncached_alloc_page(numa_node_id(), 1);
0147 if (maddr == 0)
0148 return VM_FAULT_OOM;
0149
0150 spin_lock(&vdata->lock);
0151 if (vdata->maddr[index] == 0) {
0152 vdata->count++;
0153 vdata->maddr[index] = maddr;
0154 } else {
0155 uncached_free_page(maddr, 1);
0156 maddr = vdata->maddr[index];
0157 }
0158 spin_unlock(&vdata->lock);
0159 }
0160
0161 paddr = maddr & ~__IA64_UNCACHED_OFFSET;
0162 pfn = paddr >> PAGE_SHIFT;
0163
0164 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
0165 }
0166
0167 static const struct vm_operations_struct mspec_vm_ops = {
0168 .open = mspec_open,
0169 .close = mspec_close,
0170 .fault = mspec_fault,
0171 };
0172
0173
0174
0175
0176
0177
0178
0179
0180 static int
0181 mspec_mmap(struct file *file, struct vm_area_struct *vma,
0182 enum mspec_page_type type)
0183 {
0184 struct vma_data *vdata;
0185 int pages, vdata_size;
0186
0187 if (vma->vm_pgoff != 0)
0188 return -EINVAL;
0189
0190 if ((vma->vm_flags & VM_SHARED) == 0)
0191 return -EINVAL;
0192
0193 if ((vma->vm_flags & VM_WRITE) == 0)
0194 return -EPERM;
0195
0196 pages = vma_pages(vma);
0197 vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
0198 vdata = kvzalloc(vdata_size, GFP_KERNEL);
0199 if (!vdata)
0200 return -ENOMEM;
0201
0202 vdata->vm_start = vma->vm_start;
0203 vdata->vm_end = vma->vm_end;
0204 vdata->type = type;
0205 spin_lock_init(&vdata->lock);
0206 refcount_set(&vdata->refcnt, 1);
0207 vma->vm_private_data = vdata;
0208
0209 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
0210 if (vdata->type == MSPEC_UNCACHED)
0211 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0212 vma->vm_ops = &mspec_vm_ops;
0213
0214 return 0;
0215 }
0216
0217 static int
0218 cached_mmap(struct file *file, struct vm_area_struct *vma)
0219 {
0220 return mspec_mmap(file, vma, MSPEC_CACHED);
0221 }
0222
0223 static int
0224 uncached_mmap(struct file *file, struct vm_area_struct *vma)
0225 {
0226 return mspec_mmap(file, vma, MSPEC_UNCACHED);
0227 }
0228
0229 static const struct file_operations cached_fops = {
0230 .owner = THIS_MODULE,
0231 .mmap = cached_mmap,
0232 .llseek = noop_llseek,
0233 };
0234
0235 static struct miscdevice cached_miscdev = {
0236 .minor = MISC_DYNAMIC_MINOR,
0237 .name = "mspec_cached",
0238 .fops = &cached_fops
0239 };
0240
0241 static const struct file_operations uncached_fops = {
0242 .owner = THIS_MODULE,
0243 .mmap = uncached_mmap,
0244 .llseek = noop_llseek,
0245 };
0246
0247 static struct miscdevice uncached_miscdev = {
0248 .minor = MISC_DYNAMIC_MINOR,
0249 .name = "mspec_uncached",
0250 .fops = &uncached_fops
0251 };
0252
0253
0254
0255
0256
0257
0258 static int __init
0259 mspec_init(void)
0260 {
0261 int ret;
0262
0263 ret = misc_register(&cached_miscdev);
0264 if (ret) {
0265 printk(KERN_ERR "%s: failed to register device %i\n",
0266 CACHED_ID, ret);
0267 return ret;
0268 }
0269 ret = misc_register(&uncached_miscdev);
0270 if (ret) {
0271 printk(KERN_ERR "%s: failed to register device %i\n",
0272 UNCACHED_ID, ret);
0273 misc_deregister(&cached_miscdev);
0274 return ret;
0275 }
0276
0277 printk(KERN_INFO "%s %s initialized devices: %s %s\n",
0278 MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID);
0279
0280 return 0;
0281 }
0282
0283 static void __exit
0284 mspec_exit(void)
0285 {
0286 misc_deregister(&uncached_miscdev);
0287 misc_deregister(&cached_miscdev);
0288 }
0289
0290 module_init(mspec_init);
0291 module_exit(mspec_exit);
0292
0293 MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
0294 MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
0295 MODULE_LICENSE("GPL");