0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/mm.h>
0014 #include <linux/pci.h>
0015
0016 #include "../vsec.h"
0017 #include "class.h"
0018
0019 #define PMT_XA_START 0
0020 #define PMT_XA_MAX INT_MAX
0021 #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
0022
0023 bool intel_pmt_is_early_client_hw(struct device *dev)
0024 {
0025 struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
0026
0027
0028
0029
0030
0031
0032 return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
0033 }
0034 EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
0035
0036
0037
0038
0039 static ssize_t
0040 intel_pmt_read(struct file *filp, struct kobject *kobj,
0041 struct bin_attribute *attr, char *buf, loff_t off,
0042 size_t count)
0043 {
0044 struct intel_pmt_entry *entry = container_of(attr,
0045 struct intel_pmt_entry,
0046 pmt_bin_attr);
0047
0048 if (off < 0)
0049 return -EINVAL;
0050
0051 if (off >= entry->size)
0052 return 0;
0053
0054 if (count > entry->size - off)
0055 count = entry->size - off;
0056
0057 memcpy_fromio(buf, entry->base + off, count);
0058
0059 return count;
0060 }
0061
0062 static int
0063 intel_pmt_mmap(struct file *filp, struct kobject *kobj,
0064 struct bin_attribute *attr, struct vm_area_struct *vma)
0065 {
0066 struct intel_pmt_entry *entry = container_of(attr,
0067 struct intel_pmt_entry,
0068 pmt_bin_attr);
0069 unsigned long vsize = vma->vm_end - vma->vm_start;
0070 struct device *dev = kobj_to_dev(kobj);
0071 unsigned long phys = entry->base_addr;
0072 unsigned long pfn = PFN_DOWN(phys);
0073 unsigned long psize;
0074
0075 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
0076 return -EROFS;
0077
0078 psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
0079 if (vsize > psize) {
0080 dev_err(dev, "Requested mmap size is too large\n");
0081 return -EINVAL;
0082 }
0083
0084 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0085 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
0086 vsize, vma->vm_page_prot))
0087 return -EAGAIN;
0088
0089 return 0;
0090 }
0091
0092 static ssize_t
0093 guid_show(struct device *dev, struct device_attribute *attr, char *buf)
0094 {
0095 struct intel_pmt_entry *entry = dev_get_drvdata(dev);
0096
0097 return sprintf(buf, "0x%x\n", entry->guid);
0098 }
0099 static DEVICE_ATTR_RO(guid);
0100
0101 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
0102 char *buf)
0103 {
0104 struct intel_pmt_entry *entry = dev_get_drvdata(dev);
0105
0106 return sprintf(buf, "%zu\n", entry->size);
0107 }
0108 static DEVICE_ATTR_RO(size);
0109
0110 static ssize_t
0111 offset_show(struct device *dev, struct device_attribute *attr, char *buf)
0112 {
0113 struct intel_pmt_entry *entry = dev_get_drvdata(dev);
0114
0115 return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr));
0116 }
0117 static DEVICE_ATTR_RO(offset);
0118
0119 static struct attribute *intel_pmt_attrs[] = {
0120 &dev_attr_guid.attr,
0121 &dev_attr_size.attr,
0122 &dev_attr_offset.attr,
0123 NULL
0124 };
0125 ATTRIBUTE_GROUPS(intel_pmt);
0126
0127 static struct class intel_pmt_class = {
0128 .name = "intel_pmt",
0129 .owner = THIS_MODULE,
0130 .dev_groups = intel_pmt_groups,
0131 };
0132
0133 static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
0134 struct intel_pmt_header *header,
0135 struct device *dev,
0136 struct resource *disc_res)
0137 {
0138 struct pci_dev *pci_dev = to_pci_dev(dev->parent);
0139 u8 bir;
0140
0141
0142
0143
0144
0145
0146
0147
0148 bir = GET_BIR(header->base_offset);
0149
0150
0151 switch (header->access_type) {
0152 case ACCESS_LOCAL:
0153 if (bir) {
0154 dev_err(dev,
0155 "Unsupported BAR index %d for access type %d\n",
0156 bir, header->access_type);
0157 return -EINVAL;
0158 }
0159
0160
0161
0162
0163 entry->base_addr = disc_res->end + 1 + header->base_offset;
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 if (intel_pmt_is_early_client_hw(dev)) {
0174 int i;
0175
0176 entry->base_addr = 0;
0177 for (i = 0; i < 6; i++)
0178 if (disc_res->start >= pci_resource_start(pci_dev, i) &&
0179 (disc_res->start <= pci_resource_end(pci_dev, i))) {
0180 entry->base_addr = pci_resource_start(pci_dev, i) +
0181 header->base_offset;
0182 break;
0183 }
0184 if (!entry->base_addr)
0185 return -EINVAL;
0186 }
0187
0188 break;
0189 case ACCESS_BARID:
0190
0191
0192
0193
0194
0195 entry->base_addr = pci_resource_start(pci_dev, bir) +
0196 GET_ADDRESS(header->base_offset);
0197 break;
0198 default:
0199 dev_err(dev, "Unsupported access type %d\n",
0200 header->access_type);
0201 return -EINVAL;
0202 }
0203
0204 entry->guid = header->guid;
0205 entry->size = header->size;
0206
0207 return 0;
0208 }
0209
0210 static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
0211 struct intel_pmt_namespace *ns,
0212 struct device *parent)
0213 {
0214 struct resource res = {0};
0215 struct device *dev;
0216 int ret;
0217
0218 ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL);
0219 if (ret)
0220 return ret;
0221
0222 dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry,
0223 "%s%d", ns->name, entry->devid);
0224
0225 if (IS_ERR(dev)) {
0226 dev_err(parent, "Could not create %s%d device node\n",
0227 ns->name, entry->devid);
0228 ret = PTR_ERR(dev);
0229 goto fail_dev_create;
0230 }
0231
0232 entry->kobj = &dev->kobj;
0233
0234 if (ns->attr_grp) {
0235 ret = sysfs_create_group(entry->kobj, ns->attr_grp);
0236 if (ret)
0237 goto fail_sysfs;
0238 }
0239
0240
0241 if (!entry->size)
0242 return 0;
0243
0244 res.start = entry->base_addr;
0245 res.end = res.start + entry->size - 1;
0246 res.flags = IORESOURCE_MEM;
0247
0248 entry->base = devm_ioremap_resource(dev, &res);
0249 if (IS_ERR(entry->base)) {
0250 ret = PTR_ERR(entry->base);
0251 goto fail_ioremap;
0252 }
0253
0254 sysfs_bin_attr_init(&entry->pmt_bin_attr);
0255 entry->pmt_bin_attr.attr.name = ns->name;
0256 entry->pmt_bin_attr.attr.mode = 0440;
0257 entry->pmt_bin_attr.mmap = intel_pmt_mmap;
0258 entry->pmt_bin_attr.read = intel_pmt_read;
0259 entry->pmt_bin_attr.size = entry->size;
0260
0261 ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
0262 if (!ret)
0263 return 0;
0264
0265 fail_ioremap:
0266 if (ns->attr_grp)
0267 sysfs_remove_group(entry->kobj, ns->attr_grp);
0268 fail_sysfs:
0269 device_unregister(dev);
0270 fail_dev_create:
0271 xa_erase(ns->xa, entry->devid);
0272
0273 return ret;
0274 }
0275
0276 int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns,
0277 struct intel_vsec_device *intel_vsec_dev, int idx)
0278 {
0279 struct device *dev = &intel_vsec_dev->auxdev.dev;
0280 struct intel_pmt_header header;
0281 struct resource *disc_res;
0282 int ret;
0283
0284 disc_res = &intel_vsec_dev->resource[idx];
0285
0286 entry->disc_table = devm_ioremap_resource(dev, disc_res);
0287 if (IS_ERR(entry->disc_table))
0288 return PTR_ERR(entry->disc_table);
0289
0290 ret = ns->pmt_header_decode(entry, &header, dev);
0291 if (ret)
0292 return ret;
0293
0294 ret = intel_pmt_populate_entry(entry, &header, dev, disc_res);
0295 if (ret)
0296 return ret;
0297
0298 return intel_pmt_dev_register(entry, ns, dev);
0299
0300 }
0301 EXPORT_SYMBOL_GPL(intel_pmt_dev_create);
0302
0303 void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
0304 struct intel_pmt_namespace *ns)
0305 {
0306 struct device *dev = kobj_to_dev(entry->kobj);
0307
0308 if (entry->size)
0309 sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr);
0310
0311 if (ns->attr_grp)
0312 sysfs_remove_group(entry->kobj, ns->attr_grp);
0313
0314 device_unregister(dev);
0315 xa_erase(ns->xa, entry->devid);
0316 }
0317 EXPORT_SYMBOL_GPL(intel_pmt_dev_destroy);
0318
0319 static int __init pmt_class_init(void)
0320 {
0321 return class_register(&intel_pmt_class);
0322 }
0323
0324 static void __exit pmt_class_exit(void)
0325 {
0326 class_unregister(&intel_pmt_class);
0327 }
0328
0329 module_init(pmt_class_init);
0330 module_exit(pmt_class_exit);
0331
0332 MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
0333 MODULE_DESCRIPTION("Intel PMT Class driver");
0334 MODULE_LICENSE("GPL v2");