0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) "kcs-bmc: " fmt
0007
0008 #include <linux/errno.h>
0009 #include <linux/io.h>
0010 #include <linux/ipmi_bmc.h>
0011 #include <linux/list.h>
0012 #include <linux/miscdevice.h>
0013 #include <linux/module.h>
0014 #include <linux/mutex.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/poll.h>
0017 #include <linux/sched.h>
0018 #include <linux/slab.h>
0019
0020 #include "kcs_bmc_client.h"
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 enum kcs_ipmi_phases {
0045 KCS_PHASE_IDLE,
0046
0047 KCS_PHASE_WRITE_START,
0048 KCS_PHASE_WRITE_DATA,
0049 KCS_PHASE_WRITE_END_CMD,
0050 KCS_PHASE_WRITE_DONE,
0051
0052 KCS_PHASE_WAIT_READ,
0053 KCS_PHASE_READ,
0054
0055 KCS_PHASE_ABORT_ERROR1,
0056 KCS_PHASE_ABORT_ERROR2,
0057 KCS_PHASE_ERROR
0058 };
0059
0060
0061 enum kcs_ipmi_errors {
0062 KCS_NO_ERROR = 0x00,
0063 KCS_ABORTED_BY_COMMAND = 0x01,
0064 KCS_ILLEGAL_CONTROL_CODE = 0x02,
0065 KCS_LENGTH_ERROR = 0x06,
0066 KCS_UNSPECIFIED_ERROR = 0xFF
0067 };
0068
0069 struct kcs_bmc_ipmi {
0070 struct list_head entry;
0071
0072 struct kcs_bmc_client client;
0073
0074 spinlock_t lock;
0075
0076 enum kcs_ipmi_phases phase;
0077 enum kcs_ipmi_errors error;
0078
0079 wait_queue_head_t queue;
0080 bool data_in_avail;
0081 int data_in_idx;
0082 u8 *data_in;
0083
0084 int data_out_idx;
0085 int data_out_len;
0086 u8 *data_out;
0087
0088 struct mutex mutex;
0089 u8 *kbuffer;
0090
0091 struct miscdevice miscdev;
0092 };
0093
0094 #define DEVICE_NAME "ipmi-kcs"
0095
0096 #define KCS_MSG_BUFSIZ 1000
0097
0098 #define KCS_ZERO_DATA 0
0099
0100
0101 #define KCS_STATUS_STATE(state) (state << 6)
0102 #define KCS_STATUS_STATE_MASK GENMASK(7, 6)
0103 #define KCS_STATUS_CMD_DAT BIT(3)
0104 #define KCS_STATUS_SMS_ATN BIT(2)
0105 #define KCS_STATUS_IBF BIT(1)
0106 #define KCS_STATUS_OBF BIT(0)
0107
0108
0109 enum kcs_states {
0110 IDLE_STATE = 0,
0111 READ_STATE = 1,
0112 WRITE_STATE = 2,
0113 ERROR_STATE = 3,
0114 };
0115
0116
0117 #define KCS_CMD_GET_STATUS_ABORT 0x60
0118 #define KCS_CMD_WRITE_START 0x61
0119 #define KCS_CMD_WRITE_END 0x62
0120 #define KCS_CMD_READ_BYTE 0x68
0121
0122 static inline void set_state(struct kcs_bmc_ipmi *priv, u8 state)
0123 {
0124 kcs_bmc_update_status(priv->client.dev, KCS_STATUS_STATE_MASK, KCS_STATUS_STATE(state));
0125 }
0126
0127 static void kcs_bmc_ipmi_force_abort(struct kcs_bmc_ipmi *priv)
0128 {
0129 set_state(priv, ERROR_STATE);
0130 kcs_bmc_read_data(priv->client.dev);
0131 kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
0132
0133 priv->phase = KCS_PHASE_ERROR;
0134 priv->data_in_avail = false;
0135 priv->data_in_idx = 0;
0136 }
0137
0138 static void kcs_bmc_ipmi_handle_data(struct kcs_bmc_ipmi *priv)
0139 {
0140 struct kcs_bmc_device *dev;
0141 u8 data;
0142
0143 dev = priv->client.dev;
0144
0145 switch (priv->phase) {
0146 case KCS_PHASE_WRITE_START:
0147 priv->phase = KCS_PHASE_WRITE_DATA;
0148 fallthrough;
0149
0150 case KCS_PHASE_WRITE_DATA:
0151 if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
0152 set_state(priv, WRITE_STATE);
0153 kcs_bmc_write_data(dev, KCS_ZERO_DATA);
0154 priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
0155 } else {
0156 kcs_bmc_ipmi_force_abort(priv);
0157 priv->error = KCS_LENGTH_ERROR;
0158 }
0159 break;
0160
0161 case KCS_PHASE_WRITE_END_CMD:
0162 if (priv->data_in_idx < KCS_MSG_BUFSIZ) {
0163 set_state(priv, READ_STATE);
0164 priv->data_in[priv->data_in_idx++] = kcs_bmc_read_data(dev);
0165 priv->phase = KCS_PHASE_WRITE_DONE;
0166 priv->data_in_avail = true;
0167 wake_up_interruptible(&priv->queue);
0168 } else {
0169 kcs_bmc_ipmi_force_abort(priv);
0170 priv->error = KCS_LENGTH_ERROR;
0171 }
0172 break;
0173
0174 case KCS_PHASE_READ:
0175 if (priv->data_out_idx == priv->data_out_len)
0176 set_state(priv, IDLE_STATE);
0177
0178 data = kcs_bmc_read_data(dev);
0179 if (data != KCS_CMD_READ_BYTE) {
0180 set_state(priv, ERROR_STATE);
0181 kcs_bmc_write_data(dev, KCS_ZERO_DATA);
0182 break;
0183 }
0184
0185 if (priv->data_out_idx == priv->data_out_len) {
0186 kcs_bmc_write_data(dev, KCS_ZERO_DATA);
0187 priv->phase = KCS_PHASE_IDLE;
0188 break;
0189 }
0190
0191 kcs_bmc_write_data(dev, priv->data_out[priv->data_out_idx++]);
0192 break;
0193
0194 case KCS_PHASE_ABORT_ERROR1:
0195 set_state(priv, READ_STATE);
0196 kcs_bmc_read_data(dev);
0197 kcs_bmc_write_data(dev, priv->error);
0198 priv->phase = KCS_PHASE_ABORT_ERROR2;
0199 break;
0200
0201 case KCS_PHASE_ABORT_ERROR2:
0202 set_state(priv, IDLE_STATE);
0203 kcs_bmc_read_data(dev);
0204 kcs_bmc_write_data(dev, KCS_ZERO_DATA);
0205 priv->phase = KCS_PHASE_IDLE;
0206 break;
0207
0208 default:
0209 kcs_bmc_ipmi_force_abort(priv);
0210 break;
0211 }
0212 }
0213
0214 static void kcs_bmc_ipmi_handle_cmd(struct kcs_bmc_ipmi *priv)
0215 {
0216 u8 cmd;
0217
0218 set_state(priv, WRITE_STATE);
0219 kcs_bmc_write_data(priv->client.dev, KCS_ZERO_DATA);
0220
0221 cmd = kcs_bmc_read_data(priv->client.dev);
0222 switch (cmd) {
0223 case KCS_CMD_WRITE_START:
0224 priv->phase = KCS_PHASE_WRITE_START;
0225 priv->error = KCS_NO_ERROR;
0226 priv->data_in_avail = false;
0227 priv->data_in_idx = 0;
0228 break;
0229
0230 case KCS_CMD_WRITE_END:
0231 if (priv->phase != KCS_PHASE_WRITE_DATA) {
0232 kcs_bmc_ipmi_force_abort(priv);
0233 break;
0234 }
0235
0236 priv->phase = KCS_PHASE_WRITE_END_CMD;
0237 break;
0238
0239 case KCS_CMD_GET_STATUS_ABORT:
0240 if (priv->error == KCS_NO_ERROR)
0241 priv->error = KCS_ABORTED_BY_COMMAND;
0242
0243 priv->phase = KCS_PHASE_ABORT_ERROR1;
0244 priv->data_in_avail = false;
0245 priv->data_in_idx = 0;
0246 break;
0247
0248 default:
0249 kcs_bmc_ipmi_force_abort(priv);
0250 priv->error = KCS_ILLEGAL_CONTROL_CODE;
0251 break;
0252 }
0253 }
0254
0255 static inline struct kcs_bmc_ipmi *client_to_kcs_bmc_ipmi(struct kcs_bmc_client *client)
0256 {
0257 return container_of(client, struct kcs_bmc_ipmi, client);
0258 }
0259
0260 static irqreturn_t kcs_bmc_ipmi_event(struct kcs_bmc_client *client)
0261 {
0262 struct kcs_bmc_ipmi *priv;
0263 u8 status;
0264 int ret;
0265
0266 priv = client_to_kcs_bmc_ipmi(client);
0267 if (!priv)
0268 return IRQ_NONE;
0269
0270 spin_lock(&priv->lock);
0271
0272 status = kcs_bmc_read_status(client->dev);
0273 if (status & KCS_STATUS_IBF) {
0274 if (status & KCS_STATUS_CMD_DAT)
0275 kcs_bmc_ipmi_handle_cmd(priv);
0276 else
0277 kcs_bmc_ipmi_handle_data(priv);
0278
0279 ret = IRQ_HANDLED;
0280 } else {
0281 ret = IRQ_NONE;
0282 }
0283
0284 spin_unlock(&priv->lock);
0285
0286 return ret;
0287 }
0288
0289 static const struct kcs_bmc_client_ops kcs_bmc_ipmi_client_ops = {
0290 .event = kcs_bmc_ipmi_event,
0291 };
0292
0293 static inline struct kcs_bmc_ipmi *to_kcs_bmc(struct file *filp)
0294 {
0295 return container_of(filp->private_data, struct kcs_bmc_ipmi, miscdev);
0296 }
0297
0298 static int kcs_bmc_ipmi_open(struct inode *inode, struct file *filp)
0299 {
0300 struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
0301
0302 return kcs_bmc_enable_device(priv->client.dev, &priv->client);
0303 }
0304
0305 static __poll_t kcs_bmc_ipmi_poll(struct file *filp, poll_table *wait)
0306 {
0307 struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
0308 __poll_t mask = 0;
0309
0310 poll_wait(filp, &priv->queue, wait);
0311
0312 spin_lock_irq(&priv->lock);
0313 if (priv->data_in_avail)
0314 mask |= EPOLLIN;
0315 spin_unlock_irq(&priv->lock);
0316
0317 return mask;
0318 }
0319
0320 static ssize_t kcs_bmc_ipmi_read(struct file *filp, char __user *buf,
0321 size_t count, loff_t *ppos)
0322 {
0323 struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
0324 bool data_avail;
0325 size_t data_len;
0326 ssize_t ret;
0327
0328 if (!(filp->f_flags & O_NONBLOCK))
0329 wait_event_interruptible(priv->queue,
0330 priv->data_in_avail);
0331
0332 mutex_lock(&priv->mutex);
0333
0334 spin_lock_irq(&priv->lock);
0335 data_avail = priv->data_in_avail;
0336 if (data_avail) {
0337 data_len = priv->data_in_idx;
0338 memcpy(priv->kbuffer, priv->data_in, data_len);
0339 }
0340 spin_unlock_irq(&priv->lock);
0341
0342 if (!data_avail) {
0343 ret = -EAGAIN;
0344 goto out_unlock;
0345 }
0346
0347 if (count < data_len) {
0348 pr_err("channel=%u with too large data : %zu\n",
0349 priv->client.dev->channel, data_len);
0350
0351 spin_lock_irq(&priv->lock);
0352 kcs_bmc_ipmi_force_abort(priv);
0353 spin_unlock_irq(&priv->lock);
0354
0355 ret = -EOVERFLOW;
0356 goto out_unlock;
0357 }
0358
0359 if (copy_to_user(buf, priv->kbuffer, data_len)) {
0360 ret = -EFAULT;
0361 goto out_unlock;
0362 }
0363
0364 ret = data_len;
0365
0366 spin_lock_irq(&priv->lock);
0367 if (priv->phase == KCS_PHASE_WRITE_DONE) {
0368 priv->phase = KCS_PHASE_WAIT_READ;
0369 priv->data_in_avail = false;
0370 priv->data_in_idx = 0;
0371 } else {
0372 ret = -EAGAIN;
0373 }
0374 spin_unlock_irq(&priv->lock);
0375
0376 out_unlock:
0377 mutex_unlock(&priv->mutex);
0378
0379 return ret;
0380 }
0381
0382 static ssize_t kcs_bmc_ipmi_write(struct file *filp, const char __user *buf,
0383 size_t count, loff_t *ppos)
0384 {
0385 struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
0386 ssize_t ret;
0387
0388
0389 if (count < 3 || count > KCS_MSG_BUFSIZ)
0390 return -EINVAL;
0391
0392 mutex_lock(&priv->mutex);
0393
0394 if (copy_from_user(priv->kbuffer, buf, count)) {
0395 ret = -EFAULT;
0396 goto out_unlock;
0397 }
0398
0399 spin_lock_irq(&priv->lock);
0400 if (priv->phase == KCS_PHASE_WAIT_READ) {
0401 priv->phase = KCS_PHASE_READ;
0402 priv->data_out_idx = 1;
0403 priv->data_out_len = count;
0404 memcpy(priv->data_out, priv->kbuffer, count);
0405 kcs_bmc_write_data(priv->client.dev, priv->data_out[0]);
0406 ret = count;
0407 } else {
0408 ret = -EINVAL;
0409 }
0410 spin_unlock_irq(&priv->lock);
0411
0412 out_unlock:
0413 mutex_unlock(&priv->mutex);
0414
0415 return ret;
0416 }
0417
0418 static long kcs_bmc_ipmi_ioctl(struct file *filp, unsigned int cmd,
0419 unsigned long arg)
0420 {
0421 struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
0422 long ret = 0;
0423
0424 spin_lock_irq(&priv->lock);
0425
0426 switch (cmd) {
0427 case IPMI_BMC_IOCTL_SET_SMS_ATN:
0428 kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, KCS_STATUS_SMS_ATN);
0429 break;
0430
0431 case IPMI_BMC_IOCTL_CLEAR_SMS_ATN:
0432 kcs_bmc_update_status(priv->client.dev, KCS_STATUS_SMS_ATN, 0);
0433 break;
0434
0435 case IPMI_BMC_IOCTL_FORCE_ABORT:
0436 kcs_bmc_ipmi_force_abort(priv);
0437 break;
0438
0439 default:
0440 ret = -EINVAL;
0441 break;
0442 }
0443
0444 spin_unlock_irq(&priv->lock);
0445
0446 return ret;
0447 }
0448
0449 static int kcs_bmc_ipmi_release(struct inode *inode, struct file *filp)
0450 {
0451 struct kcs_bmc_ipmi *priv = to_kcs_bmc(filp);
0452
0453 kcs_bmc_ipmi_force_abort(priv);
0454 kcs_bmc_disable_device(priv->client.dev, &priv->client);
0455
0456 return 0;
0457 }
0458
0459 static const struct file_operations kcs_bmc_ipmi_fops = {
0460 .owner = THIS_MODULE,
0461 .open = kcs_bmc_ipmi_open,
0462 .read = kcs_bmc_ipmi_read,
0463 .write = kcs_bmc_ipmi_write,
0464 .release = kcs_bmc_ipmi_release,
0465 .poll = kcs_bmc_ipmi_poll,
0466 .unlocked_ioctl = kcs_bmc_ipmi_ioctl,
0467 };
0468
0469 static DEFINE_SPINLOCK(kcs_bmc_ipmi_instances_lock);
0470 static LIST_HEAD(kcs_bmc_ipmi_instances);
0471
0472 static int kcs_bmc_ipmi_add_device(struct kcs_bmc_device *kcs_bmc)
0473 {
0474 struct kcs_bmc_ipmi *priv;
0475 int rc;
0476
0477 priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
0478 if (!priv)
0479 return -ENOMEM;
0480
0481 spin_lock_init(&priv->lock);
0482 mutex_init(&priv->mutex);
0483
0484 init_waitqueue_head(&priv->queue);
0485
0486 priv->client.dev = kcs_bmc;
0487 priv->client.ops = &kcs_bmc_ipmi_client_ops;
0488 priv->data_in = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
0489 priv->data_out = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
0490 priv->kbuffer = devm_kmalloc(kcs_bmc->dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
0491
0492 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
0493 priv->miscdev.name = devm_kasprintf(kcs_bmc->dev, GFP_KERNEL, "%s%u", DEVICE_NAME,
0494 kcs_bmc->channel);
0495 if (!priv->data_in || !priv->data_out || !priv->kbuffer || !priv->miscdev.name)
0496 return -EINVAL;
0497
0498 priv->miscdev.fops = &kcs_bmc_ipmi_fops;
0499
0500 rc = misc_register(&priv->miscdev);
0501 if (rc) {
0502 dev_err(kcs_bmc->dev, "Unable to register device: %d\n", rc);
0503 return rc;
0504 }
0505
0506 spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
0507 list_add(&priv->entry, &kcs_bmc_ipmi_instances);
0508 spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
0509
0510 dev_info(kcs_bmc->dev, "Initialised IPMI client for channel %d", kcs_bmc->channel);
0511
0512 return 0;
0513 }
0514
0515 static int kcs_bmc_ipmi_remove_device(struct kcs_bmc_device *kcs_bmc)
0516 {
0517 struct kcs_bmc_ipmi *priv = NULL, *pos;
0518
0519 spin_lock_irq(&kcs_bmc_ipmi_instances_lock);
0520 list_for_each_entry(pos, &kcs_bmc_ipmi_instances, entry) {
0521 if (pos->client.dev == kcs_bmc) {
0522 priv = pos;
0523 list_del(&pos->entry);
0524 break;
0525 }
0526 }
0527 spin_unlock_irq(&kcs_bmc_ipmi_instances_lock);
0528
0529 if (!priv)
0530 return -ENODEV;
0531
0532 misc_deregister(&priv->miscdev);
0533 kcs_bmc_disable_device(priv->client.dev, &priv->client);
0534 devm_kfree(kcs_bmc->dev, priv->kbuffer);
0535 devm_kfree(kcs_bmc->dev, priv->data_out);
0536 devm_kfree(kcs_bmc->dev, priv->data_in);
0537 devm_kfree(kcs_bmc->dev, priv);
0538
0539 return 0;
0540 }
0541
0542 static const struct kcs_bmc_driver_ops kcs_bmc_ipmi_driver_ops = {
0543 .add_device = kcs_bmc_ipmi_add_device,
0544 .remove_device = kcs_bmc_ipmi_remove_device,
0545 };
0546
0547 static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
0548 .ops = &kcs_bmc_ipmi_driver_ops,
0549 };
0550
0551 static int kcs_bmc_ipmi_init(void)
0552 {
0553 kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
0554
0555 return 0;
0556 }
0557 module_init(kcs_bmc_ipmi_init);
0558
0559 static void kcs_bmc_ipmi_exit(void)
0560 {
0561 kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
0562 }
0563 module_exit(kcs_bmc_ipmi_exit);
0564
0565 MODULE_LICENSE("GPL v2");
0566 MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
0567 MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
0568 MODULE_DESCRIPTION("KCS BMC to handle the IPMI request from system software");