0001
0002
0003
0004
0005
0006
0007 #include <linux/module.h>
0008 #include <linux/moduleparam.h>
0009 #include <linux/kernel.h>
0010 #include <linux/device.h>
0011 #include <linux/slab.h>
0012 #include <linux/fs.h>
0013 #include <linux/errno.h>
0014 #include <linux/types.h>
0015 #include <linux/fcntl.h>
0016 #include <linux/poll.h>
0017 #include <linux/init.h>
0018 #include <linux/ioctl.h>
0019 #include <linux/cdev.h>
0020 #include <linux/sched/signal.h>
0021 #include <linux/uuid.h>
0022 #include <linux/compat.h>
0023 #include <linux/jiffies.h>
0024 #include <linux/interrupt.h>
0025
0026 #include <linux/mei.h>
0027
0028 #include "mei_dev.h"
0029 #include "client.h"
0030
0031 static struct class *mei_class;
0032 static dev_t mei_devt;
0033 #define MEI_MAX_DEVS MINORMASK
0034 static DEFINE_MUTEX(mei_minor_lock);
0035 static DEFINE_IDR(mei_idr);
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 static int mei_open(struct inode *inode, struct file *file)
0046 {
0047 struct mei_device *dev;
0048 struct mei_cl *cl;
0049
0050 int err;
0051
0052 dev = container_of(inode->i_cdev, struct mei_device, cdev);
0053
0054 mutex_lock(&dev->device_lock);
0055
0056 if (dev->dev_state != MEI_DEV_ENABLED) {
0057 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
0058 mei_dev_state_str(dev->dev_state));
0059 err = -ENODEV;
0060 goto err_unlock;
0061 }
0062
0063 cl = mei_cl_alloc_linked(dev);
0064 if (IS_ERR(cl)) {
0065 err = PTR_ERR(cl);
0066 goto err_unlock;
0067 }
0068
0069 cl->fp = file;
0070 file->private_data = cl;
0071
0072 mutex_unlock(&dev->device_lock);
0073
0074 return nonseekable_open(inode, file);
0075
0076 err_unlock:
0077 mutex_unlock(&dev->device_lock);
0078 return err;
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088 static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
0089 const struct file *fp)
0090 {
0091 struct mei_cl_vtag *vtag_l, *next;
0092
0093 list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
0094 if (vtag_l->fp == fp) {
0095 list_del(&vtag_l->list);
0096 kfree(vtag_l);
0097 return;
0098 }
0099 }
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 static int mei_release(struct inode *inode, struct file *file)
0111 {
0112 struct mei_cl *cl = file->private_data;
0113 struct mei_device *dev;
0114 int rets;
0115
0116 if (WARN_ON(!cl || !cl->dev))
0117 return -ENODEV;
0118
0119 dev = cl->dev;
0120
0121 mutex_lock(&dev->device_lock);
0122
0123 mei_cl_vtag_remove_by_fp(cl, file);
0124
0125 if (!list_empty(&cl->vtag_map)) {
0126 cl_dbg(dev, cl, "not the last vtag\n");
0127 mei_cl_flush_queues(cl, file);
0128 rets = 0;
0129 goto out;
0130 }
0131
0132 rets = mei_cl_disconnect(cl);
0133
0134
0135
0136
0137 if (!list_empty(&cl->vtag_map)) {
0138 cl_dbg(dev, cl, "not the last vtag after disconnect\n");
0139 mei_cl_flush_queues(cl, file);
0140 goto out;
0141 }
0142
0143 mei_cl_flush_queues(cl, NULL);
0144 cl_dbg(dev, cl, "removing\n");
0145
0146 mei_cl_unlink(cl);
0147 kfree(cl);
0148
0149 out:
0150 file->private_data = NULL;
0151
0152 mutex_unlock(&dev->device_lock);
0153 return rets;
0154 }
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 static ssize_t mei_read(struct file *file, char __user *ubuf,
0168 size_t length, loff_t *offset)
0169 {
0170 struct mei_cl *cl = file->private_data;
0171 struct mei_device *dev;
0172 struct mei_cl_cb *cb = NULL;
0173 bool nonblock = !!(file->f_flags & O_NONBLOCK);
0174 ssize_t rets;
0175
0176 if (WARN_ON(!cl || !cl->dev))
0177 return -ENODEV;
0178
0179 dev = cl->dev;
0180
0181
0182 mutex_lock(&dev->device_lock);
0183 if (dev->dev_state != MEI_DEV_ENABLED) {
0184 rets = -ENODEV;
0185 goto out;
0186 }
0187
0188 if (length == 0) {
0189 rets = 0;
0190 goto out;
0191 }
0192
0193 if (ubuf == NULL) {
0194 rets = -EMSGSIZE;
0195 goto out;
0196 }
0197
0198 cb = mei_cl_read_cb(cl, file);
0199 if (cb)
0200 goto copy_buffer;
0201
0202 if (*offset > 0)
0203 *offset = 0;
0204
0205 rets = mei_cl_read_start(cl, length, file);
0206 if (rets && rets != -EBUSY) {
0207 cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
0208 goto out;
0209 }
0210
0211 if (nonblock) {
0212 rets = -EAGAIN;
0213 goto out;
0214 }
0215
0216 mutex_unlock(&dev->device_lock);
0217 if (wait_event_interruptible(cl->rx_wait,
0218 mei_cl_read_cb(cl, file) ||
0219 !mei_cl_is_connected(cl))) {
0220 if (signal_pending(current))
0221 return -EINTR;
0222 return -ERESTARTSYS;
0223 }
0224 mutex_lock(&dev->device_lock);
0225
0226 if (!mei_cl_is_connected(cl)) {
0227 rets = -ENODEV;
0228 goto out;
0229 }
0230
0231 cb = mei_cl_read_cb(cl, file);
0232 if (!cb) {
0233 rets = 0;
0234 goto out;
0235 }
0236
0237 copy_buffer:
0238
0239 if (cb->status) {
0240 rets = cb->status;
0241 cl_dbg(dev, cl, "read operation failed %zd\n", rets);
0242 goto free;
0243 }
0244
0245 cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
0246 cb->buf.size, cb->buf_idx, *offset);
0247 if (*offset >= cb->buf_idx) {
0248 rets = 0;
0249 goto free;
0250 }
0251
0252
0253
0254 length = min_t(size_t, length, cb->buf_idx - *offset);
0255
0256 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
0257 dev_dbg(dev->dev, "failed to copy data to userland\n");
0258 rets = -EFAULT;
0259 goto free;
0260 }
0261
0262 rets = length;
0263 *offset += length;
0264
0265 if (*offset < cb->buf_idx)
0266 goto out;
0267
0268 free:
0269 mei_cl_del_rd_completed(cl, cb);
0270 *offset = 0;
0271
0272 out:
0273 cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
0274 mutex_unlock(&dev->device_lock);
0275 return rets;
0276 }
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
0287 {
0288 struct mei_cl_vtag *cl_vtag;
0289
0290 if (!fp)
0291 return 0;
0292
0293 list_for_each_entry(cl_vtag, &cl->vtag_map, list)
0294 if (cl_vtag->fp == fp)
0295 return cl_vtag->vtag;
0296 return 0;
0297 }
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 static ssize_t mei_write(struct file *file, const char __user *ubuf,
0310 size_t length, loff_t *offset)
0311 {
0312 struct mei_cl *cl = file->private_data;
0313 struct mei_cl_cb *cb;
0314 struct mei_device *dev;
0315 ssize_t rets;
0316
0317 if (WARN_ON(!cl || !cl->dev))
0318 return -ENODEV;
0319
0320 dev = cl->dev;
0321
0322 mutex_lock(&dev->device_lock);
0323
0324 if (dev->dev_state != MEI_DEV_ENABLED) {
0325 rets = -ENODEV;
0326 goto out;
0327 }
0328
0329 if (!mei_cl_is_connected(cl)) {
0330 cl_err(dev, cl, "is not connected");
0331 rets = -ENODEV;
0332 goto out;
0333 }
0334
0335 if (!mei_me_cl_is_active(cl->me_cl)) {
0336 rets = -ENOTTY;
0337 goto out;
0338 }
0339
0340 if (length > mei_cl_mtu(cl)) {
0341 rets = -EFBIG;
0342 goto out;
0343 }
0344
0345 if (length == 0) {
0346 rets = 0;
0347 goto out;
0348 }
0349
0350 while (cl->tx_cb_queued >= dev->tx_queue_limit) {
0351 if (file->f_flags & O_NONBLOCK) {
0352 rets = -EAGAIN;
0353 goto out;
0354 }
0355 mutex_unlock(&dev->device_lock);
0356 rets = wait_event_interruptible(cl->tx_wait,
0357 cl->writing_state == MEI_WRITE_COMPLETE ||
0358 (!mei_cl_is_connected(cl)));
0359 mutex_lock(&dev->device_lock);
0360 if (rets) {
0361 if (signal_pending(current))
0362 rets = -EINTR;
0363 goto out;
0364 }
0365 if (!mei_cl_is_connected(cl)) {
0366 rets = -ENODEV;
0367 goto out;
0368 }
0369 }
0370
0371 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
0372 if (!cb) {
0373 rets = -ENOMEM;
0374 goto out;
0375 }
0376 cb->vtag = mei_cl_vtag_by_fp(cl, file);
0377
0378 rets = copy_from_user(cb->buf.data, ubuf, length);
0379 if (rets) {
0380 dev_dbg(dev->dev, "failed to copy data from userland\n");
0381 rets = -EFAULT;
0382 mei_io_cb_free(cb);
0383 goto out;
0384 }
0385
0386 rets = mei_cl_write(cl, cb);
0387 out:
0388 mutex_unlock(&dev->device_lock);
0389 return rets;
0390 }
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 static int mei_ioctl_connect_client(struct file *file,
0404 const uuid_le *in_client_uuid,
0405 struct mei_client *client)
0406 {
0407 struct mei_device *dev;
0408 struct mei_me_client *me_cl;
0409 struct mei_cl *cl;
0410 int rets;
0411
0412 cl = file->private_data;
0413 dev = cl->dev;
0414
0415 if (cl->state != MEI_FILE_INITIALIZING &&
0416 cl->state != MEI_FILE_DISCONNECTED)
0417 return -EBUSY;
0418
0419
0420 me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
0421 if (!me_cl) {
0422 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
0423 in_client_uuid);
0424 rets = -ENOTTY;
0425 goto end;
0426 }
0427
0428 if (me_cl->props.fixed_address) {
0429 bool forbidden = dev->override_fixed_address ?
0430 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
0431 if (forbidden) {
0432 dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
0433 in_client_uuid);
0434 rets = -ENOTTY;
0435 goto end;
0436 }
0437 }
0438
0439 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
0440 me_cl->client_id);
0441 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
0442 me_cl->props.protocol_version);
0443 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
0444 me_cl->props.max_msg_length);
0445
0446
0447 client->max_msg_length = me_cl->props.max_msg_length;
0448 client->protocol_version = me_cl->props.protocol_version;
0449 dev_dbg(dev->dev, "Can connect?\n");
0450
0451 rets = mei_cl_connect(cl, me_cl, file);
0452
0453 end:
0454 mei_me_cl_put(me_cl);
0455 return rets;
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
0472 {
0473 struct mei_me_client *me_cl;
0474 int ret;
0475
0476 if (!dev->hbm_f_vt_supported)
0477 return -EOPNOTSUPP;
0478
0479 me_cl = mei_me_cl_by_uuid(dev, uuid);
0480 if (!me_cl) {
0481 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
0482 uuid);
0483 return -ENOTTY;
0484 }
0485 ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
0486 mei_me_cl_put(me_cl);
0487
0488 return ret;
0489 }
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 static int mei_ioctl_connect_vtag(struct file *file,
0504 const uuid_le *in_client_uuid,
0505 struct mei_client *client,
0506 u8 vtag)
0507 {
0508 struct mei_device *dev;
0509 struct mei_cl *cl;
0510 struct mei_cl *pos;
0511 struct mei_cl_vtag *cl_vtag;
0512
0513 cl = file->private_data;
0514 dev = cl->dev;
0515
0516 dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag);
0517
0518 switch (cl->state) {
0519 case MEI_FILE_DISCONNECTED:
0520 if (mei_cl_vtag_by_fp(cl, file) != vtag) {
0521 dev_err(dev->dev, "reconnect with different vtag\n");
0522 return -EINVAL;
0523 }
0524 break;
0525 case MEI_FILE_INITIALIZING:
0526
0527 if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) {
0528 dev_err(dev->dev, "vtag already filled\n");
0529 return -EINVAL;
0530 }
0531
0532 list_for_each_entry(pos, &dev->file_list, link) {
0533 if (pos == cl)
0534 continue;
0535 if (!pos->me_cl)
0536 continue;
0537
0538
0539 if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid))
0540 continue;
0541
0542
0543 if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag)))
0544 continue;
0545
0546
0547 dev_dbg(dev->dev, "replacing with existing cl\n");
0548 mei_cl_unlink(cl);
0549 kfree(cl);
0550 file->private_data = pos;
0551 cl = pos;
0552 break;
0553 }
0554
0555 cl_vtag = mei_cl_vtag_alloc(file, vtag);
0556 if (IS_ERR(cl_vtag))
0557 return -ENOMEM;
0558
0559 list_add_tail(&cl_vtag->list, &cl->vtag_map);
0560 break;
0561 default:
0562 return -EBUSY;
0563 }
0564
0565 while (cl->state != MEI_FILE_INITIALIZING &&
0566 cl->state != MEI_FILE_DISCONNECTED &&
0567 cl->state != MEI_FILE_CONNECTED) {
0568 mutex_unlock(&dev->device_lock);
0569 wait_event_timeout(cl->wait,
0570 (cl->state == MEI_FILE_CONNECTED ||
0571 cl->state == MEI_FILE_DISCONNECTED ||
0572 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
0573 cl->state == MEI_FILE_DISCONNECT_REPLY),
0574 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
0575 mutex_lock(&dev->device_lock);
0576 }
0577
0578 if (!mei_cl_is_connected(cl))
0579 return mei_ioctl_connect_client(file, in_client_uuid, client);
0580
0581 client->max_msg_length = cl->me_cl->props.max_msg_length;
0582 client->protocol_version = cl->me_cl->props.protocol_version;
0583
0584 return 0;
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596 static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
0597 {
0598 struct mei_cl *cl = file->private_data;
0599
0600 if (request != MEI_HBM_NOTIFICATION_START &&
0601 request != MEI_HBM_NOTIFICATION_STOP)
0602 return -EINVAL;
0603
0604 return mei_cl_notify_request(cl, file, (u8)request);
0605 }
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615 static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
0616 {
0617 struct mei_cl *cl = file->private_data;
0618 bool notify_ev;
0619 bool block = (file->f_flags & O_NONBLOCK) == 0;
0620 int rets;
0621
0622 rets = mei_cl_notify_get(cl, block, ¬ify_ev);
0623 if (rets)
0624 return rets;
0625
0626 *notify_get = notify_ev ? 1 : 0;
0627 return 0;
0628 }
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
0640 {
0641 struct mei_device *dev;
0642 struct mei_cl *cl = file->private_data;
0643 struct mei_connect_client_data conn;
0644 struct mei_connect_client_data_vtag conn_vtag;
0645 const uuid_le *cl_uuid;
0646 struct mei_client *props;
0647 u8 vtag;
0648 u32 notify_get, notify_req;
0649 int rets;
0650
0651
0652 if (WARN_ON(!cl || !cl->dev))
0653 return -ENODEV;
0654
0655 dev = cl->dev;
0656
0657 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
0658
0659 mutex_lock(&dev->device_lock);
0660 if (dev->dev_state != MEI_DEV_ENABLED) {
0661 rets = -ENODEV;
0662 goto out;
0663 }
0664
0665 switch (cmd) {
0666 case IOCTL_MEI_CONNECT_CLIENT:
0667 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
0668 if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) {
0669 dev_dbg(dev->dev, "failed to copy data from userland\n");
0670 rets = -EFAULT;
0671 goto out;
0672 }
0673 cl_uuid = &conn.in_client_uuid;
0674 props = &conn.out_client_properties;
0675 vtag = 0;
0676
0677 rets = mei_vt_support_check(dev, cl_uuid);
0678 if (rets == -ENOTTY)
0679 goto out;
0680 if (!rets)
0681 rets = mei_ioctl_connect_vtag(file, cl_uuid, props,
0682 vtag);
0683 else
0684 rets = mei_ioctl_connect_client(file, cl_uuid, props);
0685 if (rets)
0686 goto out;
0687
0688
0689 if (copy_to_user((char __user *)data, &conn, sizeof(conn))) {
0690 dev_dbg(dev->dev, "failed to copy data to userland\n");
0691 rets = -EFAULT;
0692 goto out;
0693 }
0694
0695 break;
0696
0697 case IOCTL_MEI_CONNECT_CLIENT_VTAG:
0698 dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n");
0699 if (copy_from_user(&conn_vtag, (char __user *)data,
0700 sizeof(conn_vtag))) {
0701 dev_dbg(dev->dev, "failed to copy data from userland\n");
0702 rets = -EFAULT;
0703 goto out;
0704 }
0705
0706 cl_uuid = &conn_vtag.connect.in_client_uuid;
0707 props = &conn_vtag.out_client_properties;
0708 vtag = conn_vtag.connect.vtag;
0709
0710 rets = mei_vt_support_check(dev, cl_uuid);
0711 if (rets == -EOPNOTSUPP)
0712 dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n",
0713 cl_uuid);
0714 if (rets)
0715 goto out;
0716
0717 if (!vtag) {
0718 dev_dbg(dev->dev, "vtag can't be zero\n");
0719 rets = -EINVAL;
0720 goto out;
0721 }
0722
0723 rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag);
0724 if (rets)
0725 goto out;
0726
0727
0728 if (copy_to_user((char __user *)data, &conn_vtag,
0729 sizeof(conn_vtag))) {
0730 dev_dbg(dev->dev, "failed to copy data to userland\n");
0731 rets = -EFAULT;
0732 goto out;
0733 }
0734
0735 break;
0736
0737 case IOCTL_MEI_NOTIFY_SET:
0738 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
0739 if (copy_from_user(¬ify_req,
0740 (char __user *)data, sizeof(notify_req))) {
0741 dev_dbg(dev->dev, "failed to copy data from userland\n");
0742 rets = -EFAULT;
0743 goto out;
0744 }
0745 rets = mei_ioctl_client_notify_request(file, notify_req);
0746 break;
0747
0748 case IOCTL_MEI_NOTIFY_GET:
0749 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
0750 rets = mei_ioctl_client_notify_get(file, ¬ify_get);
0751 if (rets)
0752 goto out;
0753
0754 dev_dbg(dev->dev, "copy connect data to user\n");
0755 if (copy_to_user((char __user *)data,
0756 ¬ify_get, sizeof(notify_get))) {
0757 dev_dbg(dev->dev, "failed to copy data to userland\n");
0758 rets = -EFAULT;
0759 goto out;
0760
0761 }
0762 break;
0763
0764 default:
0765 rets = -ENOIOCTLCMD;
0766 }
0767
0768 out:
0769 mutex_unlock(&dev->device_lock);
0770 return rets;
0771 }
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781 static __poll_t mei_poll(struct file *file, poll_table *wait)
0782 {
0783 __poll_t req_events = poll_requested_events(wait);
0784 struct mei_cl *cl = file->private_data;
0785 struct mei_device *dev;
0786 __poll_t mask = 0;
0787 bool notify_en;
0788
0789 if (WARN_ON(!cl || !cl->dev))
0790 return EPOLLERR;
0791
0792 dev = cl->dev;
0793
0794 mutex_lock(&dev->device_lock);
0795
0796 notify_en = cl->notify_en && (req_events & EPOLLPRI);
0797
0798 if (dev->dev_state != MEI_DEV_ENABLED ||
0799 !mei_cl_is_connected(cl)) {
0800 mask = EPOLLERR;
0801 goto out;
0802 }
0803
0804 if (notify_en) {
0805 poll_wait(file, &cl->ev_wait, wait);
0806 if (cl->notify_ev)
0807 mask |= EPOLLPRI;
0808 }
0809
0810 if (req_events & (EPOLLIN | EPOLLRDNORM)) {
0811 poll_wait(file, &cl->rx_wait, wait);
0812
0813 if (mei_cl_read_cb(cl, file))
0814 mask |= EPOLLIN | EPOLLRDNORM;
0815 else
0816 mei_cl_read_start(cl, mei_cl_mtu(cl), file);
0817 }
0818
0819 if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
0820 poll_wait(file, &cl->tx_wait, wait);
0821 if (cl->tx_cb_queued < dev->tx_queue_limit)
0822 mask |= EPOLLOUT | EPOLLWRNORM;
0823 }
0824
0825 out:
0826 mutex_unlock(&dev->device_lock);
0827 return mask;
0828 }
0829
0830
0831
0832
0833
0834
0835
0836
0837 static bool mei_cl_is_write_queued(struct mei_cl *cl)
0838 {
0839 struct mei_device *dev = cl->dev;
0840 struct mei_cl_cb *cb;
0841
0842 list_for_each_entry(cb, &dev->write_list, list)
0843 if (cb->cl == cl)
0844 return true;
0845 list_for_each_entry(cb, &dev->write_waiting_list, list)
0846 if (cb->cl == cl)
0847 return true;
0848 return false;
0849 }
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861 static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
0862 {
0863 struct mei_cl *cl = fp->private_data;
0864 struct mei_device *dev;
0865 int rets;
0866
0867 if (WARN_ON(!cl || !cl->dev))
0868 return -ENODEV;
0869
0870 dev = cl->dev;
0871
0872 mutex_lock(&dev->device_lock);
0873
0874 if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
0875 rets = -ENODEV;
0876 goto out;
0877 }
0878
0879 while (mei_cl_is_write_queued(cl)) {
0880 mutex_unlock(&dev->device_lock);
0881 rets = wait_event_interruptible(cl->tx_wait,
0882 cl->writing_state == MEI_WRITE_COMPLETE ||
0883 !mei_cl_is_connected(cl));
0884 mutex_lock(&dev->device_lock);
0885 if (rets) {
0886 if (signal_pending(current))
0887 rets = -EINTR;
0888 goto out;
0889 }
0890 if (!mei_cl_is_connected(cl)) {
0891 rets = -ENODEV;
0892 goto out;
0893 }
0894 }
0895 rets = 0;
0896 out:
0897 mutex_unlock(&dev->device_lock);
0898 return rets;
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912 static int mei_fasync(int fd, struct file *file, int band)
0913 {
0914
0915 struct mei_cl *cl = file->private_data;
0916
0917 if (!mei_cl_is_connected(cl))
0918 return -ENODEV;
0919
0920 return fasync_helper(fd, file, band, &cl->ev_async);
0921 }
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932 static ssize_t trc_show(struct device *device,
0933 struct device_attribute *attr, char *buf)
0934 {
0935 struct mei_device *dev = dev_get_drvdata(device);
0936 u32 trc;
0937 int ret;
0938
0939 ret = mei_trc_status(dev, &trc);
0940 if (ret)
0941 return ret;
0942 return sprintf(buf, "%08X\n", trc);
0943 }
0944 static DEVICE_ATTR_RO(trc);
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955 static ssize_t fw_status_show(struct device *device,
0956 struct device_attribute *attr, char *buf)
0957 {
0958 struct mei_device *dev = dev_get_drvdata(device);
0959 struct mei_fw_status fw_status;
0960 int err, i;
0961 ssize_t cnt = 0;
0962
0963 mutex_lock(&dev->device_lock);
0964 err = mei_fw_status(dev, &fw_status);
0965 mutex_unlock(&dev->device_lock);
0966 if (err) {
0967 dev_err(device, "read fw_status error = %d\n", err);
0968 return err;
0969 }
0970
0971 for (i = 0; i < fw_status.count; i++)
0972 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
0973 fw_status.status[i]);
0974 return cnt;
0975 }
0976 static DEVICE_ATTR_RO(fw_status);
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987 static ssize_t hbm_ver_show(struct device *device,
0988 struct device_attribute *attr, char *buf)
0989 {
0990 struct mei_device *dev = dev_get_drvdata(device);
0991 struct hbm_version ver;
0992
0993 mutex_lock(&dev->device_lock);
0994 ver = dev->version;
0995 mutex_unlock(&dev->device_lock);
0996
0997 return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
0998 }
0999 static DEVICE_ATTR_RO(hbm_ver);
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 static ssize_t hbm_ver_drv_show(struct device *device,
1011 struct device_attribute *attr, char *buf)
1012 {
1013 return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
1014 }
1015 static DEVICE_ATTR_RO(hbm_ver_drv);
1016
1017 static ssize_t tx_queue_limit_show(struct device *device,
1018 struct device_attribute *attr, char *buf)
1019 {
1020 struct mei_device *dev = dev_get_drvdata(device);
1021 u8 size = 0;
1022
1023 mutex_lock(&dev->device_lock);
1024 size = dev->tx_queue_limit;
1025 mutex_unlock(&dev->device_lock);
1026
1027 return sysfs_emit(buf, "%u\n", size);
1028 }
1029
1030 static ssize_t tx_queue_limit_store(struct device *device,
1031 struct device_attribute *attr,
1032 const char *buf, size_t count)
1033 {
1034 struct mei_device *dev = dev_get_drvdata(device);
1035 u8 limit;
1036 unsigned int inp;
1037 int err;
1038
1039 err = kstrtouint(buf, 10, &inp);
1040 if (err)
1041 return err;
1042 if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
1043 return -EINVAL;
1044 limit = inp;
1045
1046 mutex_lock(&dev->device_lock);
1047 dev->tx_queue_limit = limit;
1048 mutex_unlock(&dev->device_lock);
1049
1050 return count;
1051 }
1052 static DEVICE_ATTR_RW(tx_queue_limit);
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 static ssize_t fw_ver_show(struct device *device,
1064 struct device_attribute *attr, char *buf)
1065 {
1066 struct mei_device *dev = dev_get_drvdata(device);
1067 struct mei_fw_version *ver;
1068 ssize_t cnt = 0;
1069 int i;
1070
1071 ver = dev->fw_ver;
1072
1073 for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
1074 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
1075 ver[i].platform, ver[i].major, ver[i].minor,
1076 ver[i].hotfix, ver[i].buildno);
1077 return cnt;
1078 }
1079 static DEVICE_ATTR_RO(fw_ver);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 static ssize_t dev_state_show(struct device *device,
1091 struct device_attribute *attr, char *buf)
1092 {
1093 struct mei_device *dev = dev_get_drvdata(device);
1094 enum mei_dev_state dev_state;
1095
1096 mutex_lock(&dev->device_lock);
1097 dev_state = dev->dev_state;
1098 mutex_unlock(&dev->device_lock);
1099
1100 return sprintf(buf, "%s", mei_dev_state_str(dev_state));
1101 }
1102 static DEVICE_ATTR_RO(dev_state);
1103
1104
1105
1106
1107
1108
1109
1110 void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
1111 {
1112 struct device *clsdev;
1113
1114 if (dev->dev_state == state)
1115 return;
1116
1117 dev->dev_state = state;
1118
1119 clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev);
1120 if (clsdev) {
1121 sysfs_notify(&clsdev->kobj, NULL, "dev_state");
1122 put_device(clsdev);
1123 }
1124 }
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 static ssize_t kind_show(struct device *device,
1136 struct device_attribute *attr, char *buf)
1137 {
1138 struct mei_device *dev = dev_get_drvdata(device);
1139 ssize_t ret;
1140
1141 if (dev->kind)
1142 ret = sprintf(buf, "%s\n", dev->kind);
1143 else
1144 ret = sprintf(buf, "%s\n", "mei");
1145
1146 return ret;
1147 }
1148 static DEVICE_ATTR_RO(kind);
1149
1150 static struct attribute *mei_attrs[] = {
1151 &dev_attr_fw_status.attr,
1152 &dev_attr_hbm_ver.attr,
1153 &dev_attr_hbm_ver_drv.attr,
1154 &dev_attr_tx_queue_limit.attr,
1155 &dev_attr_fw_ver.attr,
1156 &dev_attr_dev_state.attr,
1157 &dev_attr_trc.attr,
1158 &dev_attr_kind.attr,
1159 NULL
1160 };
1161 ATTRIBUTE_GROUPS(mei);
1162
1163
1164
1165
1166 static const struct file_operations mei_fops = {
1167 .owner = THIS_MODULE,
1168 .read = mei_read,
1169 .unlocked_ioctl = mei_ioctl,
1170 .compat_ioctl = compat_ptr_ioctl,
1171 .open = mei_open,
1172 .release = mei_release,
1173 .write = mei_write,
1174 .poll = mei_poll,
1175 .fsync = mei_fsync,
1176 .fasync = mei_fasync,
1177 .llseek = no_llseek
1178 };
1179
1180
1181
1182
1183
1184
1185
1186
1187 static int mei_minor_get(struct mei_device *dev)
1188 {
1189 int ret;
1190
1191 mutex_lock(&mei_minor_lock);
1192 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
1193 if (ret >= 0)
1194 dev->minor = ret;
1195 else if (ret == -ENOSPC)
1196 dev_err(dev->dev, "too many mei devices\n");
1197
1198 mutex_unlock(&mei_minor_lock);
1199 return ret;
1200 }
1201
1202
1203
1204
1205
1206
1207 static void mei_minor_free(struct mei_device *dev)
1208 {
1209 mutex_lock(&mei_minor_lock);
1210 idr_remove(&mei_idr, dev->minor);
1211 mutex_unlock(&mei_minor_lock);
1212 }
1213
1214 int mei_register(struct mei_device *dev, struct device *parent)
1215 {
1216 struct device *clsdev;
1217 int ret, devno;
1218
1219 ret = mei_minor_get(dev);
1220 if (ret < 0)
1221 return ret;
1222
1223
1224 devno = MKDEV(MAJOR(mei_devt), dev->minor);
1225 cdev_init(&dev->cdev, &mei_fops);
1226 dev->cdev.owner = parent->driver->owner;
1227
1228
1229 ret = cdev_add(&dev->cdev, devno, 1);
1230 if (ret) {
1231 dev_err(parent, "unable to add device %d:%d\n",
1232 MAJOR(mei_devt), dev->minor);
1233 goto err_dev_add;
1234 }
1235
1236 clsdev = device_create_with_groups(mei_class, parent, devno,
1237 dev, mei_groups,
1238 "mei%d", dev->minor);
1239
1240 if (IS_ERR(clsdev)) {
1241 dev_err(parent, "unable to create device %d:%d\n",
1242 MAJOR(mei_devt), dev->minor);
1243 ret = PTR_ERR(clsdev);
1244 goto err_dev_create;
1245 }
1246
1247 mei_dbgfs_register(dev, dev_name(clsdev));
1248
1249 return 0;
1250
1251 err_dev_create:
1252 cdev_del(&dev->cdev);
1253 err_dev_add:
1254 mei_minor_free(dev);
1255 return ret;
1256 }
1257 EXPORT_SYMBOL_GPL(mei_register);
1258
1259 void mei_deregister(struct mei_device *dev)
1260 {
1261 int devno;
1262
1263 devno = dev->cdev.dev;
1264 cdev_del(&dev->cdev);
1265
1266 mei_dbgfs_deregister(dev);
1267
1268 device_destroy(mei_class, devno);
1269
1270 mei_minor_free(dev);
1271 }
1272 EXPORT_SYMBOL_GPL(mei_deregister);
1273
1274 static int __init mei_init(void)
1275 {
1276 int ret;
1277
1278 mei_class = class_create(THIS_MODULE, "mei");
1279 if (IS_ERR(mei_class)) {
1280 pr_err("couldn't create class\n");
1281 ret = PTR_ERR(mei_class);
1282 goto err;
1283 }
1284
1285 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
1286 if (ret < 0) {
1287 pr_err("unable to allocate char dev region\n");
1288 goto err_class;
1289 }
1290
1291 ret = mei_cl_bus_init();
1292 if (ret < 0) {
1293 pr_err("unable to initialize bus\n");
1294 goto err_chrdev;
1295 }
1296
1297 return 0;
1298
1299 err_chrdev:
1300 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1301 err_class:
1302 class_destroy(mei_class);
1303 err:
1304 return ret;
1305 }
1306
1307 static void __exit mei_exit(void)
1308 {
1309 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1310 class_destroy(mei_class);
1311 mei_cl_bus_exit();
1312 }
1313
1314 module_init(mei_init);
1315 module_exit(mei_exit);
1316
1317 MODULE_AUTHOR("Intel Corporation");
1318 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1319 MODULE_LICENSE("GPL v2");
1320