0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/bitfield.h>
0009 #include <linux/device.h>
0010 #include <linux/firmware.h>
0011 #include <linux/mfd/intel-m10-bmc.h>
0012 #include <linux/mod_devicetable.h>
0013 #include <linux/module.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/slab.h>
0016
0017 struct m10bmc_sec {
0018 struct device *dev;
0019 struct intel_m10bmc *m10bmc;
0020 struct fw_upload *fwl;
0021 char *fw_name;
0022 u32 fw_name_id;
0023 bool cancel_request;
0024 };
0025
0026 static DEFINE_XARRAY_ALLOC(fw_upload_xa);
0027
0028
0029 #define REH_SHA256_SIZE 32
0030 #define REH_SHA384_SIZE 48
0031 #define REH_MAGIC GENMASK(15, 0)
0032 #define REH_SHA_NUM_BYTES GENMASK(31, 16)
0033
0034 static ssize_t
0035 show_root_entry_hash(struct device *dev, u32 exp_magic,
0036 u32 prog_addr, u32 reh_addr, char *buf)
0037 {
0038 struct m10bmc_sec *sec = dev_get_drvdata(dev);
0039 int sha_num_bytes, i, ret, cnt = 0;
0040 u8 hash[REH_SHA384_SIZE];
0041 unsigned int stride;
0042 u32 magic;
0043
0044 stride = regmap_get_reg_stride(sec->m10bmc->regmap);
0045 ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
0046 if (ret)
0047 return ret;
0048
0049 if (FIELD_GET(REH_MAGIC, magic) != exp_magic)
0050 return sysfs_emit(buf, "hash not programmed\n");
0051
0052 sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
0053 if ((sha_num_bytes % stride) ||
0054 (sha_num_bytes != REH_SHA256_SIZE &&
0055 sha_num_bytes != REH_SHA384_SIZE)) {
0056 dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
0057 sha_num_bytes);
0058 return -EINVAL;
0059 }
0060
0061 ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
0062 hash, sha_num_bytes / stride);
0063 if (ret) {
0064 dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
0065 reh_addr, sha_num_bytes / stride, ret);
0066 return ret;
0067 }
0068
0069 for (i = 0; i < sha_num_bytes; i++)
0070 cnt += sprintf(buf + cnt, "%02x", hash[i]);
0071 cnt += sprintf(buf + cnt, "\n");
0072
0073 return cnt;
0074 }
0075
0076 #define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
0077 static ssize_t _name##_root_entry_hash_show(struct device *dev, \
0078 struct device_attribute *attr, \
0079 char *buf) \
0080 { return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
0081 static DEVICE_ATTR_RO(_name##_root_entry_hash)
0082
0083 DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
0084 DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
0085 DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
0086
0087 #define CSK_BIT_LEN 128U
0088 #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
0089
0090 static ssize_t
0091 show_canceled_csk(struct device *dev, u32 addr, char *buf)
0092 {
0093 unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
0094 struct m10bmc_sec *sec = dev_get_drvdata(dev);
0095 DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
0096 __le32 csk_le32[CSK_32ARRAY_SIZE];
0097 u32 csk32[CSK_32ARRAY_SIZE];
0098 int ret;
0099
0100 stride = regmap_get_reg_stride(sec->m10bmc->regmap);
0101 if (size % stride) {
0102 dev_err(sec->dev,
0103 "CSK vector size (0x%x) not aligned to stride (0x%x)\n",
0104 size, stride);
0105 WARN_ON_ONCE(1);
0106 return -EINVAL;
0107 }
0108
0109 ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
0110 size / stride);
0111 if (ret) {
0112 dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
0113 addr, size / stride, ret);
0114 return ret;
0115 }
0116
0117 for (i = 0; i < CSK_32ARRAY_SIZE; i++)
0118 csk32[i] = le32_to_cpu(((csk_le32[i])));
0119
0120 bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN);
0121 bitmap_complement(csk_map, csk_map, CSK_BIT_LEN);
0122 return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
0123 }
0124
0125 #define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
0126 static ssize_t _name##_canceled_csks_show(struct device *dev, \
0127 struct device_attribute *attr, \
0128 char *buf) \
0129 { return show_canceled_csk(dev, _addr, buf); } \
0130 static DEVICE_ATTR_RO(_name##_canceled_csks)
0131
0132 #define CSK_VEC_OFFSET 0x34
0133
0134 DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
0135 DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
0136 DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
0137
0138 #define FLASH_COUNT_SIZE 4096
0139
0140 static ssize_t flash_count_show(struct device *dev,
0141 struct device_attribute *attr, char *buf)
0142 {
0143 struct m10bmc_sec *sec = dev_get_drvdata(dev);
0144 unsigned int stride, num_bits;
0145 u8 *flash_buf;
0146 int cnt, ret;
0147
0148 stride = regmap_get_reg_stride(sec->m10bmc->regmap);
0149 num_bits = FLASH_COUNT_SIZE * 8;
0150
0151 if (FLASH_COUNT_SIZE % stride) {
0152 dev_err(sec->dev,
0153 "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
0154 FLASH_COUNT_SIZE, stride);
0155 WARN_ON_ONCE(1);
0156 return -EINVAL;
0157 }
0158
0159 flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
0160 if (!flash_buf)
0161 return -ENOMEM;
0162
0163 ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
0164 flash_buf, FLASH_COUNT_SIZE / stride);
0165 if (ret) {
0166 dev_err(sec->dev,
0167 "failed to read flash count: %x cnt %x: %d\n",
0168 STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
0169 goto exit_free;
0170 }
0171 cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
0172
0173 exit_free:
0174 kfree(flash_buf);
0175
0176 return ret ? : sysfs_emit(buf, "%u\n", cnt);
0177 }
0178 static DEVICE_ATTR_RO(flash_count);
0179
0180 static struct attribute *m10bmc_security_attrs[] = {
0181 &dev_attr_flash_count.attr,
0182 &dev_attr_bmc_root_entry_hash.attr,
0183 &dev_attr_sr_root_entry_hash.attr,
0184 &dev_attr_pr_root_entry_hash.attr,
0185 &dev_attr_sr_canceled_csks.attr,
0186 &dev_attr_pr_canceled_csks.attr,
0187 &dev_attr_bmc_canceled_csks.attr,
0188 NULL,
0189 };
0190
0191 static struct attribute_group m10bmc_security_attr_group = {
0192 .name = "security",
0193 .attrs = m10bmc_security_attrs,
0194 };
0195
0196 static const struct attribute_group *m10bmc_sec_attr_groups[] = {
0197 &m10bmc_security_attr_group,
0198 NULL,
0199 };
0200
0201 static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
0202 {
0203 u32 auth_result;
0204
0205 dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell);
0206
0207 if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result))
0208 dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
0209 }
0210
0211 static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
0212 {
0213 u32 doorbell;
0214 int ret;
0215
0216 ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
0217 if (ret)
0218 return FW_UPLOAD_ERR_RW_ERROR;
0219
0220 if (rsu_prog(doorbell) != RSU_PROG_IDLE &&
0221 rsu_prog(doorbell) != RSU_PROG_RSU_DONE) {
0222 log_error_regs(sec, doorbell);
0223 return FW_UPLOAD_ERR_BUSY;
0224 }
0225
0226 return FW_UPLOAD_ERR_NONE;
0227 }
0228
0229 static inline bool rsu_start_done(u32 doorbell)
0230 {
0231 u32 status, progress;
0232
0233 if (doorbell & DRBL_RSU_REQUEST)
0234 return false;
0235
0236 status = rsu_stat(doorbell);
0237 if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
0238 return true;
0239
0240 progress = rsu_prog(doorbell);
0241 if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE)
0242 return true;
0243
0244 return false;
0245 }
0246
0247 static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
0248 {
0249 u32 doorbell, status;
0250 int ret;
0251
0252 ret = regmap_update_bits(sec->m10bmc->regmap,
0253 M10BMC_SYS_BASE + M10BMC_DOORBELL,
0254 DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
0255 DRBL_RSU_REQUEST |
0256 FIELD_PREP(DRBL_HOST_STATUS,
0257 HOST_STATUS_IDLE));
0258 if (ret)
0259 return FW_UPLOAD_ERR_RW_ERROR;
0260
0261 ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
0262 M10BMC_SYS_BASE + M10BMC_DOORBELL,
0263 doorbell,
0264 rsu_start_done(doorbell),
0265 NIOS_HANDSHAKE_INTERVAL_US,
0266 NIOS_HANDSHAKE_TIMEOUT_US);
0267
0268 if (ret == -ETIMEDOUT) {
0269 log_error_regs(sec, doorbell);
0270 return FW_UPLOAD_ERR_TIMEOUT;
0271 } else if (ret) {
0272 return FW_UPLOAD_ERR_RW_ERROR;
0273 }
0274
0275 status = rsu_stat(doorbell);
0276 if (status == RSU_STAT_WEAROUT) {
0277 dev_warn(sec->dev, "Excessive flash update count detected\n");
0278 return FW_UPLOAD_ERR_WEAROUT;
0279 } else if (status == RSU_STAT_ERASE_FAIL) {
0280 log_error_regs(sec, doorbell);
0281 return FW_UPLOAD_ERR_HW_ERROR;
0282 }
0283
0284 return FW_UPLOAD_ERR_NONE;
0285 }
0286
0287 static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
0288 {
0289 unsigned long poll_timeout;
0290 u32 doorbell, progress;
0291 int ret;
0292
0293 ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
0294 if (ret)
0295 return FW_UPLOAD_ERR_RW_ERROR;
0296
0297 poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS);
0298 while (rsu_prog(doorbell) == RSU_PROG_PREPARE) {
0299 msleep(RSU_PREP_INTERVAL_MS);
0300 if (time_after(jiffies, poll_timeout))
0301 break;
0302
0303 ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
0304 if (ret)
0305 return FW_UPLOAD_ERR_RW_ERROR;
0306 }
0307
0308 progress = rsu_prog(doorbell);
0309 if (progress == RSU_PROG_PREPARE) {
0310 log_error_regs(sec, doorbell);
0311 return FW_UPLOAD_ERR_TIMEOUT;
0312 } else if (progress != RSU_PROG_READY) {
0313 log_error_regs(sec, doorbell);
0314 return FW_UPLOAD_ERR_HW_ERROR;
0315 }
0316
0317 return FW_UPLOAD_ERR_NONE;
0318 }
0319
0320 static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
0321 {
0322 u32 doorbell;
0323 int ret;
0324
0325 ret = regmap_update_bits(sec->m10bmc->regmap,
0326 M10BMC_SYS_BASE + M10BMC_DOORBELL,
0327 DRBL_HOST_STATUS,
0328 FIELD_PREP(DRBL_HOST_STATUS,
0329 HOST_STATUS_WRITE_DONE));
0330 if (ret)
0331 return FW_UPLOAD_ERR_RW_ERROR;
0332
0333 ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
0334 M10BMC_SYS_BASE + M10BMC_DOORBELL,
0335 doorbell,
0336 rsu_prog(doorbell) != RSU_PROG_READY,
0337 NIOS_HANDSHAKE_INTERVAL_US,
0338 NIOS_HANDSHAKE_TIMEOUT_US);
0339
0340 if (ret == -ETIMEDOUT) {
0341 log_error_regs(sec, doorbell);
0342 return FW_UPLOAD_ERR_TIMEOUT;
0343 } else if (ret) {
0344 return FW_UPLOAD_ERR_RW_ERROR;
0345 }
0346
0347 switch (rsu_stat(doorbell)) {
0348 case RSU_STAT_NORMAL:
0349 case RSU_STAT_NIOS_OK:
0350 case RSU_STAT_USER_OK:
0351 case RSU_STAT_FACTORY_OK:
0352 break;
0353 default:
0354 log_error_regs(sec, doorbell);
0355 return FW_UPLOAD_ERR_HW_ERROR;
0356 }
0357
0358 return FW_UPLOAD_ERR_NONE;
0359 }
0360
0361 static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell)
0362 {
0363 if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell))
0364 return -EIO;
0365
0366 switch (rsu_stat(*doorbell)) {
0367 case RSU_STAT_NORMAL:
0368 case RSU_STAT_NIOS_OK:
0369 case RSU_STAT_USER_OK:
0370 case RSU_STAT_FACTORY_OK:
0371 break;
0372 default:
0373 return -EINVAL;
0374 }
0375
0376 switch (rsu_prog(*doorbell)) {
0377 case RSU_PROG_IDLE:
0378 case RSU_PROG_RSU_DONE:
0379 return 0;
0380 case RSU_PROG_AUTHENTICATING:
0381 case RSU_PROG_COPYING:
0382 case RSU_PROG_UPDATE_CANCEL:
0383 case RSU_PROG_PROGRAM_KEY_HASH:
0384 return -EAGAIN;
0385 default:
0386 return -EINVAL;
0387 }
0388 }
0389
0390 static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
0391 {
0392 u32 doorbell;
0393 int ret;
0394
0395 ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
0396 if (ret)
0397 return FW_UPLOAD_ERR_RW_ERROR;
0398
0399 if (rsu_prog(doorbell) != RSU_PROG_READY)
0400 return FW_UPLOAD_ERR_BUSY;
0401
0402 ret = regmap_update_bits(sec->m10bmc->regmap,
0403 M10BMC_SYS_BASE + M10BMC_DOORBELL,
0404 DRBL_HOST_STATUS,
0405 FIELD_PREP(DRBL_HOST_STATUS,
0406 HOST_STATUS_ABORT_RSU));
0407 if (ret)
0408 return FW_UPLOAD_ERR_RW_ERROR;
0409
0410 return FW_UPLOAD_ERR_CANCELED;
0411 }
0412
0413 static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
0414 const u8 *data, u32 size)
0415 {
0416 struct m10bmc_sec *sec = fwl->dd_handle;
0417 u32 ret;
0418
0419 sec->cancel_request = false;
0420
0421 if (!size || size > M10BMC_STAGING_SIZE)
0422 return FW_UPLOAD_ERR_INVALID_SIZE;
0423
0424 ret = rsu_check_idle(sec);
0425 if (ret != FW_UPLOAD_ERR_NONE)
0426 return ret;
0427
0428 ret = rsu_update_init(sec);
0429 if (ret != FW_UPLOAD_ERR_NONE)
0430 return ret;
0431
0432 ret = rsu_prog_ready(sec);
0433 if (ret != FW_UPLOAD_ERR_NONE)
0434 return ret;
0435
0436 if (sec->cancel_request)
0437 return rsu_cancel(sec);
0438
0439 return FW_UPLOAD_ERR_NONE;
0440 }
0441
0442 #define WRITE_BLOCK_SIZE 0x4000
0443
0444 static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data,
0445 u32 offset, u32 size, u32 *written)
0446 {
0447 struct m10bmc_sec *sec = fwl->dd_handle;
0448 u32 blk_size, doorbell, extra_offset;
0449 unsigned int stride, extra = 0;
0450 int ret;
0451
0452 stride = regmap_get_reg_stride(sec->m10bmc->regmap);
0453 if (sec->cancel_request)
0454 return rsu_cancel(sec);
0455
0456 ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
0457 if (ret) {
0458 return FW_UPLOAD_ERR_RW_ERROR;
0459 } else if (rsu_prog(doorbell) != RSU_PROG_READY) {
0460 log_error_regs(sec, doorbell);
0461 return FW_UPLOAD_ERR_HW_ERROR;
0462 }
0463
0464 WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride);
0465 blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
0466 ret = regmap_bulk_write(sec->m10bmc->regmap,
0467 M10BMC_STAGING_BASE + offset,
0468 (void *)data + offset,
0469 blk_size / stride);
0470 if (ret)
0471 return FW_UPLOAD_ERR_RW_ERROR;
0472
0473
0474
0475
0476
0477 if (blk_size % stride) {
0478 extra_offset = offset + ALIGN_DOWN(blk_size, stride);
0479 memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride);
0480 ret = regmap_write(sec->m10bmc->regmap,
0481 M10BMC_STAGING_BASE + extra_offset, extra);
0482 if (ret)
0483 return FW_UPLOAD_ERR_RW_ERROR;
0484 }
0485
0486 *written = blk_size;
0487 return FW_UPLOAD_ERR_NONE;
0488 }
0489
0490 static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl)
0491 {
0492 struct m10bmc_sec *sec = fwl->dd_handle;
0493 unsigned long poll_timeout;
0494 u32 doorbell, result;
0495 int ret;
0496
0497 if (sec->cancel_request)
0498 return rsu_cancel(sec);
0499
0500 result = rsu_send_data(sec);
0501 if (result != FW_UPLOAD_ERR_NONE)
0502 return result;
0503
0504 poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS);
0505 do {
0506 msleep(RSU_COMPLETE_INTERVAL_MS);
0507 ret = rsu_check_complete(sec, &doorbell);
0508 } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout));
0509
0510 if (ret == -EAGAIN) {
0511 log_error_regs(sec, doorbell);
0512 return FW_UPLOAD_ERR_TIMEOUT;
0513 } else if (ret == -EIO) {
0514 return FW_UPLOAD_ERR_RW_ERROR;
0515 } else if (ret) {
0516 log_error_regs(sec, doorbell);
0517 return FW_UPLOAD_ERR_HW_ERROR;
0518 }
0519
0520 return FW_UPLOAD_ERR_NONE;
0521 }
0522
0523
0524
0525
0526
0527
0528
0529
0530 static void m10bmc_sec_cancel(struct fw_upload *fwl)
0531 {
0532 struct m10bmc_sec *sec = fwl->dd_handle;
0533
0534 sec->cancel_request = true;
0535 }
0536
0537 static void m10bmc_sec_cleanup(struct fw_upload *fwl)
0538 {
0539 struct m10bmc_sec *sec = fwl->dd_handle;
0540
0541 (void)rsu_cancel(sec);
0542 }
0543
0544 static const struct fw_upload_ops m10bmc_ops = {
0545 .prepare = m10bmc_sec_prepare,
0546 .write = m10bmc_sec_write,
0547 .poll_complete = m10bmc_sec_poll_complete,
0548 .cancel = m10bmc_sec_cancel,
0549 .cleanup = m10bmc_sec_cleanup,
0550 };
0551
0552 #define SEC_UPDATE_LEN_MAX 32
0553 static int m10bmc_sec_probe(struct platform_device *pdev)
0554 {
0555 char buf[SEC_UPDATE_LEN_MAX];
0556 struct m10bmc_sec *sec;
0557 struct fw_upload *fwl;
0558 unsigned int len;
0559 int ret;
0560
0561 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
0562 if (!sec)
0563 return -ENOMEM;
0564
0565 sec->dev = &pdev->dev;
0566 sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
0567 dev_set_drvdata(&pdev->dev, sec);
0568
0569 ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
0570 xa_limit_32b, GFP_KERNEL);
0571 if (ret)
0572 return ret;
0573
0574 len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
0575 sec->fw_name_id);
0576 sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
0577 if (!sec->fw_name)
0578 return -ENOMEM;
0579
0580 fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
0581 &m10bmc_ops, sec);
0582 if (IS_ERR(fwl)) {
0583 dev_err(sec->dev, "Firmware Upload driver failed to start\n");
0584 kfree(sec->fw_name);
0585 xa_erase(&fw_upload_xa, sec->fw_name_id);
0586 return PTR_ERR(fwl);
0587 }
0588
0589 sec->fwl = fwl;
0590 return 0;
0591 }
0592
0593 static int m10bmc_sec_remove(struct platform_device *pdev)
0594 {
0595 struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
0596
0597 firmware_upload_unregister(sec->fwl);
0598 kfree(sec->fw_name);
0599 xa_erase(&fw_upload_xa, sec->fw_name_id);
0600
0601 return 0;
0602 }
0603
0604 static const struct platform_device_id intel_m10bmc_sec_ids[] = {
0605 {
0606 .name = "n3000bmc-sec-update",
0607 },
0608 { }
0609 };
0610 MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
0611
0612 static struct platform_driver intel_m10bmc_sec_driver = {
0613 .probe = m10bmc_sec_probe,
0614 .remove = m10bmc_sec_remove,
0615 .driver = {
0616 .name = "intel-m10bmc-sec-update",
0617 .dev_groups = m10bmc_sec_attr_groups,
0618 },
0619 .id_table = intel_m10bmc_sec_ids,
0620 };
0621 module_platform_driver(intel_m10bmc_sec_driver);
0622
0623 MODULE_AUTHOR("Intel Corporation");
0624 MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
0625 MODULE_LICENSE("GPL");