0001
0002
0003 #include <linux/io-64-nonatomic-lo-hi.h>
0004 #include <linux/device.h>
0005 #include <linux/slab.h>
0006 #include <linux/pci.h>
0007 #include <cxlmem.h>
0008 #include <cxlpci.h>
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 void cxl_probe_component_regs(struct device *dev, void __iomem *base,
0036 struct cxl_component_reg_map *map)
0037 {
0038 int cap, cap_count;
0039 u32 cap_array;
0040
0041 *map = (struct cxl_component_reg_map) { 0 };
0042
0043
0044
0045
0046
0047 base += CXL_CM_OFFSET;
0048
0049 cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET);
0050
0051 if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
0052 dev_err(dev,
0053 "Couldn't locate the CXL.cache and CXL.mem capability array header.\n");
0054 return;
0055 }
0056
0057
0058 cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
0059
0060 for (cap = 1; cap <= cap_count; cap++) {
0061 void __iomem *register_block;
0062 u32 hdr;
0063 int decoder_cnt;
0064 u16 cap_id, offset;
0065 u32 length;
0066
0067 hdr = readl(base + cap * 0x4);
0068
0069 cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
0070 offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
0071 register_block = base + offset;
0072
0073 switch (cap_id) {
0074 case CXL_CM_CAP_CAP_ID_HDM:
0075 dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
0076 offset);
0077
0078 hdr = readl(register_block);
0079
0080 decoder_cnt = cxl_hdm_decoder_count(hdr);
0081 length = 0x20 * decoder_cnt + 0x10;
0082
0083 map->hdm_decoder.valid = true;
0084 map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
0085 map->hdm_decoder.size = length;
0086 break;
0087 default:
0088 dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
0089 offset);
0090 break;
0091 }
0092 }
0093 }
0094 EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL);
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 void cxl_probe_device_regs(struct device *dev, void __iomem *base,
0105 struct cxl_device_reg_map *map)
0106 {
0107 int cap, cap_count;
0108 u64 cap_array;
0109
0110 *map = (struct cxl_device_reg_map){ 0 };
0111
0112 cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
0113 if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
0114 CXLDEV_CAP_ARRAY_CAP_ID)
0115 return;
0116
0117 cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
0118
0119 for (cap = 1; cap <= cap_count; cap++) {
0120 u32 offset, length;
0121 u16 cap_id;
0122
0123 cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
0124 readl(base + cap * 0x10));
0125 offset = readl(base + cap * 0x10 + 0x4);
0126 length = readl(base + cap * 0x10 + 0x8);
0127
0128 switch (cap_id) {
0129 case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
0130 dev_dbg(dev, "found Status capability (0x%x)\n", offset);
0131
0132 map->status.valid = true;
0133 map->status.offset = offset;
0134 map->status.size = length;
0135 break;
0136 case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
0137 dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
0138 map->mbox.valid = true;
0139 map->mbox.offset = offset;
0140 map->mbox.size = length;
0141 break;
0142 case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
0143 dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
0144 break;
0145 case CXLDEV_CAP_CAP_ID_MEMDEV:
0146 dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
0147 map->memdev.valid = true;
0148 map->memdev.offset = offset;
0149 map->memdev.size = length;
0150 break;
0151 default:
0152 if (cap_id >= 0x8000)
0153 dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
0154 else
0155 dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
0156 break;
0157 }
0158 }
0159 }
0160 EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL);
0161
0162 void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
0163 resource_size_t length)
0164 {
0165 void __iomem *ret_val;
0166 struct resource *res;
0167
0168 res = devm_request_mem_region(dev, addr, length, dev_name(dev));
0169 if (!res) {
0170 resource_size_t end = addr + length - 1;
0171
0172 dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
0173 return NULL;
0174 }
0175
0176 ret_val = devm_ioremap(dev, addr, length);
0177 if (!ret_val)
0178 dev_err(dev, "Failed to map region %pr\n", res);
0179
0180 return ret_val;
0181 }
0182
0183 int cxl_map_component_regs(struct pci_dev *pdev,
0184 struct cxl_component_regs *regs,
0185 struct cxl_register_map *map)
0186 {
0187 struct device *dev = &pdev->dev;
0188 resource_size_t phys_addr;
0189 resource_size_t length;
0190
0191 phys_addr = pci_resource_start(pdev, map->barno);
0192 phys_addr += map->block_offset;
0193
0194 phys_addr += map->component_map.hdm_decoder.offset;
0195 length = map->component_map.hdm_decoder.size;
0196 regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
0197 if (!regs->hdm_decoder)
0198 return -ENOMEM;
0199
0200 return 0;
0201 }
0202 EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
0203
0204 int cxl_map_device_regs(struct pci_dev *pdev,
0205 struct cxl_device_regs *regs,
0206 struct cxl_register_map *map)
0207 {
0208 struct device *dev = &pdev->dev;
0209 resource_size_t phys_addr;
0210
0211 phys_addr = pci_resource_start(pdev, map->barno);
0212 phys_addr += map->block_offset;
0213
0214 if (map->device_map.status.valid) {
0215 resource_size_t addr;
0216 resource_size_t length;
0217
0218 addr = phys_addr + map->device_map.status.offset;
0219 length = map->device_map.status.size;
0220 regs->status = devm_cxl_iomap_block(dev, addr, length);
0221 if (!regs->status)
0222 return -ENOMEM;
0223 }
0224
0225 if (map->device_map.mbox.valid) {
0226 resource_size_t addr;
0227 resource_size_t length;
0228
0229 addr = phys_addr + map->device_map.mbox.offset;
0230 length = map->device_map.mbox.size;
0231 regs->mbox = devm_cxl_iomap_block(dev, addr, length);
0232 if (!regs->mbox)
0233 return -ENOMEM;
0234 }
0235
0236 if (map->device_map.memdev.valid) {
0237 resource_size_t addr;
0238 resource_size_t length;
0239
0240 addr = phys_addr + map->device_map.memdev.offset;
0241 length = map->device_map.memdev.size;
0242 regs->memdev = devm_cxl_iomap_block(dev, addr, length);
0243 if (!regs->memdev)
0244 return -ENOMEM;
0245 }
0246
0247 return 0;
0248 }
0249 EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
0250
0251 static void cxl_decode_regblock(u32 reg_lo, u32 reg_hi,
0252 struct cxl_register_map *map)
0253 {
0254 map->block_offset = ((u64)reg_hi << 32) |
0255 (reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
0256 map->barno = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
0257 map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271 int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
0272 struct cxl_register_map *map)
0273 {
0274 u32 regloc_size, regblocks;
0275 int regloc, i;
0276
0277 map->block_offset = U64_MAX;
0278 regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL,
0279 CXL_DVSEC_REG_LOCATOR);
0280 if (!regloc)
0281 return -ENXIO;
0282
0283 pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size);
0284 regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
0285
0286 regloc += CXL_DVSEC_REG_LOCATOR_BLOCK1_OFFSET;
0287 regblocks = (regloc_size - CXL_DVSEC_REG_LOCATOR_BLOCK1_OFFSET) / 8;
0288
0289 for (i = 0; i < regblocks; i++, regloc += 8) {
0290 u32 reg_lo, reg_hi;
0291
0292 pci_read_config_dword(pdev, regloc, ®_lo);
0293 pci_read_config_dword(pdev, regloc + 4, ®_hi);
0294
0295 cxl_decode_regblock(reg_lo, reg_hi, map);
0296
0297 if (map->reg_type == type)
0298 return 0;
0299 }
0300
0301 map->block_offset = U64_MAX;
0302 return -ENODEV;
0303 }
0304 EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL);