0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0039
0040 #include <linux/kernel.h>
0041 #include <linux/errno.h>
0042 #include <linux/uio.h>
0043 #include <linux/notifier.h>
0044 #include <linux/wait.h>
0045 #include <linux/fs.h>
0046 #include <linux/poll.h>
0047 #include <linux/mutex.h>
0048 #include <linux/sched.h>
0049 #include <linux/spinlock.h>
0050 #include <linux/mount.h>
0051 #include <linux/pagemap.h>
0052 #include <linux/uaccess.h>
0053 #include <linux/init.h>
0054 #include <linux/namei.h>
0055 #include <linux/string.h>
0056 #include <linux/slab.h>
0057 #include <linux/miscdevice.h>
0058 #include <linux/workqueue.h>
0059
0060 #include <xen/xenbus.h>
0061 #include <xen/xen.h>
0062 #include <asm/xen/hypervisor.h>
0063
0064 #include "xenbus.h"
0065
0066 unsigned int xb_dev_generation_id;
0067
0068
0069
0070
0071
0072 struct xenbus_transaction_holder {
0073 struct list_head list;
0074 struct xenbus_transaction handle;
0075 unsigned int generation_id;
0076 };
0077
0078
0079
0080
0081 struct read_buffer {
0082 struct list_head list;
0083 unsigned int cons;
0084 unsigned int len;
0085 char msg[];
0086 };
0087
0088 struct xenbus_file_priv {
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 struct mutex msgbuffer_mutex;
0100
0101
0102 struct list_head transactions;
0103
0104
0105 struct list_head watches;
0106
0107
0108 unsigned int len;
0109 union {
0110 struct xsd_sockmsg msg;
0111 char buffer[XENSTORE_PAYLOAD_MAX];
0112 } u;
0113
0114
0115 struct mutex reply_mutex;
0116 struct list_head read_buffers;
0117 wait_queue_head_t read_waitq;
0118
0119 struct kref kref;
0120
0121 struct work_struct wq;
0122 };
0123
0124
0125 static ssize_t xenbus_file_read(struct file *filp,
0126 char __user *ubuf,
0127 size_t len, loff_t *ppos)
0128 {
0129 struct xenbus_file_priv *u = filp->private_data;
0130 struct read_buffer *rb;
0131 ssize_t i;
0132 int ret;
0133
0134 mutex_lock(&u->reply_mutex);
0135 again:
0136 while (list_empty(&u->read_buffers)) {
0137 mutex_unlock(&u->reply_mutex);
0138 if (filp->f_flags & O_NONBLOCK)
0139 return -EAGAIN;
0140
0141 ret = wait_event_interruptible(u->read_waitq,
0142 !list_empty(&u->read_buffers));
0143 if (ret)
0144 return ret;
0145 mutex_lock(&u->reply_mutex);
0146 }
0147
0148 rb = list_entry(u->read_buffers.next, struct read_buffer, list);
0149 i = 0;
0150 while (i < len) {
0151 size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
0152
0153 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
0154
0155 i += sz - ret;
0156 rb->cons += sz - ret;
0157
0158 if (ret != 0) {
0159 if (i == 0)
0160 i = -EFAULT;
0161 goto out;
0162 }
0163
0164
0165 if (rb->cons == rb->len) {
0166 list_del(&rb->list);
0167 kfree(rb);
0168 if (list_empty(&u->read_buffers))
0169 break;
0170 rb = list_entry(u->read_buffers.next,
0171 struct read_buffer, list);
0172 }
0173 }
0174 if (i == 0)
0175 goto again;
0176
0177 out:
0178 mutex_unlock(&u->reply_mutex);
0179 return i;
0180 }
0181
0182
0183
0184
0185
0186
0187
0188
0189 static int queue_reply(struct list_head *queue, const void *data, size_t len)
0190 {
0191 struct read_buffer *rb;
0192
0193 if (len == 0)
0194 return 0;
0195 if (len > XENSTORE_PAYLOAD_MAX)
0196 return -EINVAL;
0197
0198 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
0199 if (rb == NULL)
0200 return -ENOMEM;
0201
0202 rb->cons = 0;
0203 rb->len = len;
0204
0205 memcpy(rb->msg, data, len);
0206
0207 list_add_tail(&rb->list, queue);
0208 return 0;
0209 }
0210
0211
0212
0213
0214
0215 static void queue_cleanup(struct list_head *list)
0216 {
0217 struct read_buffer *rb;
0218
0219 while (!list_empty(list)) {
0220 rb = list_entry(list->next, struct read_buffer, list);
0221 list_del(list->next);
0222 kfree(rb);
0223 }
0224 }
0225
0226 struct watch_adapter {
0227 struct list_head list;
0228 struct xenbus_watch watch;
0229 struct xenbus_file_priv *dev_data;
0230 char *token;
0231 };
0232
0233 static void free_watch_adapter(struct watch_adapter *watch)
0234 {
0235 kfree(watch->watch.node);
0236 kfree(watch->token);
0237 kfree(watch);
0238 }
0239
0240 static struct watch_adapter *alloc_watch_adapter(const char *path,
0241 const char *token)
0242 {
0243 struct watch_adapter *watch;
0244
0245 watch = kzalloc(sizeof(*watch), GFP_KERNEL);
0246 if (watch == NULL)
0247 goto out_fail;
0248
0249 watch->watch.node = kstrdup(path, GFP_KERNEL);
0250 if (watch->watch.node == NULL)
0251 goto out_free;
0252
0253 watch->token = kstrdup(token, GFP_KERNEL);
0254 if (watch->token == NULL)
0255 goto out_free;
0256
0257 return watch;
0258
0259 out_free:
0260 free_watch_adapter(watch);
0261
0262 out_fail:
0263 return NULL;
0264 }
0265
0266 static void watch_fired(struct xenbus_watch *watch,
0267 const char *path,
0268 const char *token)
0269 {
0270 struct watch_adapter *adap;
0271 struct xsd_sockmsg hdr;
0272 const char *token_caller;
0273 int path_len, tok_len, body_len;
0274 int ret;
0275 LIST_HEAD(staging_q);
0276
0277 adap = container_of(watch, struct watch_adapter, watch);
0278
0279 token_caller = adap->token;
0280
0281 path_len = strlen(path) + 1;
0282 tok_len = strlen(token_caller) + 1;
0283 body_len = path_len + tok_len;
0284
0285 hdr.type = XS_WATCH_EVENT;
0286 hdr.len = body_len;
0287
0288 mutex_lock(&adap->dev_data->reply_mutex);
0289
0290 ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
0291 if (!ret)
0292 ret = queue_reply(&staging_q, path, path_len);
0293 if (!ret)
0294 ret = queue_reply(&staging_q, token_caller, tok_len);
0295
0296 if (!ret) {
0297
0298 list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
0299 wake_up(&adap->dev_data->read_waitq);
0300 } else
0301 queue_cleanup(&staging_q);
0302
0303 mutex_unlock(&adap->dev_data->reply_mutex);
0304 }
0305
0306 static void xenbus_worker(struct work_struct *wq)
0307 {
0308 struct xenbus_file_priv *u;
0309 struct xenbus_transaction_holder *trans, *tmp;
0310 struct watch_adapter *watch, *tmp_watch;
0311 struct read_buffer *rb, *tmp_rb;
0312
0313 u = container_of(wq, struct xenbus_file_priv, wq);
0314
0315
0316
0317
0318
0319
0320 list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
0321 xenbus_transaction_end(trans->handle, 1);
0322 list_del(&trans->list);
0323 kfree(trans);
0324 }
0325
0326 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
0327 unregister_xenbus_watch(&watch->watch);
0328 list_del(&watch->list);
0329 free_watch_adapter(watch);
0330 }
0331
0332 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
0333 list_del(&rb->list);
0334 kfree(rb);
0335 }
0336 kfree(u);
0337 }
0338
0339 static void xenbus_file_free(struct kref *kref)
0340 {
0341 struct xenbus_file_priv *u;
0342
0343
0344
0345
0346
0347 u = container_of(kref, struct xenbus_file_priv, kref);
0348 schedule_work(&u->wq);
0349 }
0350
0351 static struct xenbus_transaction_holder *xenbus_get_transaction(
0352 struct xenbus_file_priv *u, uint32_t tx_id)
0353 {
0354 struct xenbus_transaction_holder *trans;
0355
0356 list_for_each_entry(trans, &u->transactions, list)
0357 if (trans->handle.id == tx_id)
0358 return trans;
0359
0360 return NULL;
0361 }
0362
0363 void xenbus_dev_queue_reply(struct xb_req_data *req)
0364 {
0365 struct xenbus_file_priv *u = req->par;
0366 struct xenbus_transaction_holder *trans = NULL;
0367 int rc;
0368 LIST_HEAD(staging_q);
0369
0370 xs_request_exit(req);
0371
0372 mutex_lock(&u->msgbuffer_mutex);
0373
0374 if (req->type == XS_TRANSACTION_START) {
0375 trans = xenbus_get_transaction(u, 0);
0376 if (WARN_ON(!trans))
0377 goto out;
0378 if (req->msg.type == XS_ERROR) {
0379 list_del(&trans->list);
0380 kfree(trans);
0381 } else {
0382 rc = kstrtou32(req->body, 10, &trans->handle.id);
0383 if (WARN_ON(rc))
0384 goto out;
0385 }
0386 } else if (req->type == XS_TRANSACTION_END) {
0387 trans = xenbus_get_transaction(u, req->msg.tx_id);
0388 if (WARN_ON(!trans))
0389 goto out;
0390 list_del(&trans->list);
0391 kfree(trans);
0392 }
0393
0394 mutex_unlock(&u->msgbuffer_mutex);
0395
0396 mutex_lock(&u->reply_mutex);
0397 rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg));
0398 if (!rc)
0399 rc = queue_reply(&staging_q, req->body, req->msg.len);
0400 if (!rc) {
0401 list_splice_tail(&staging_q, &u->read_buffers);
0402 wake_up(&u->read_waitq);
0403 } else {
0404 queue_cleanup(&staging_q);
0405 }
0406 mutex_unlock(&u->reply_mutex);
0407
0408 kfree(req->body);
0409 kfree(req);
0410
0411 kref_put(&u->kref, xenbus_file_free);
0412
0413 return;
0414
0415 out:
0416 mutex_unlock(&u->msgbuffer_mutex);
0417 }
0418
0419 static int xenbus_command_reply(struct xenbus_file_priv *u,
0420 unsigned int msg_type, const char *reply)
0421 {
0422 struct {
0423 struct xsd_sockmsg hdr;
0424 char body[16];
0425 } msg;
0426 int rc;
0427
0428 msg.hdr = u->u.msg;
0429 msg.hdr.type = msg_type;
0430 msg.hdr.len = strlen(reply) + 1;
0431 if (msg.hdr.len > sizeof(msg.body))
0432 return -E2BIG;
0433 memcpy(&msg.body, reply, msg.hdr.len);
0434
0435 mutex_lock(&u->reply_mutex);
0436 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
0437 wake_up(&u->read_waitq);
0438 mutex_unlock(&u->reply_mutex);
0439
0440 if (!rc)
0441 kref_put(&u->kref, xenbus_file_free);
0442
0443 return rc;
0444 }
0445
0446 static int xenbus_write_transaction(unsigned msg_type,
0447 struct xenbus_file_priv *u)
0448 {
0449 int rc;
0450 struct xenbus_transaction_holder *trans = NULL;
0451 struct {
0452 struct xsd_sockmsg hdr;
0453 char body[];
0454 } *msg = (void *)u->u.buffer;
0455
0456 if (msg_type == XS_TRANSACTION_START) {
0457 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
0458 if (!trans) {
0459 rc = -ENOMEM;
0460 goto out;
0461 }
0462 trans->generation_id = xb_dev_generation_id;
0463 list_add(&trans->list, &u->transactions);
0464 } else if (msg->hdr.tx_id != 0 &&
0465 !xenbus_get_transaction(u, msg->hdr.tx_id))
0466 return xenbus_command_reply(u, XS_ERROR, "ENOENT");
0467 else if (msg_type == XS_TRANSACTION_END &&
0468 !(msg->hdr.len == 2 &&
0469 (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
0470 return xenbus_command_reply(u, XS_ERROR, "EINVAL");
0471 else if (msg_type == XS_TRANSACTION_END) {
0472 trans = xenbus_get_transaction(u, msg->hdr.tx_id);
0473 if (trans && trans->generation_id != xb_dev_generation_id) {
0474 list_del(&trans->list);
0475 kfree(trans);
0476 if (!strcmp(msg->body, "T"))
0477 return xenbus_command_reply(u, XS_ERROR,
0478 "EAGAIN");
0479 else
0480 return xenbus_command_reply(u,
0481 XS_TRANSACTION_END,
0482 "OK");
0483 }
0484 }
0485
0486 rc = xenbus_dev_request_and_reply(&msg->hdr, u);
0487 if (rc && trans) {
0488 list_del(&trans->list);
0489 kfree(trans);
0490 }
0491
0492 out:
0493 return rc;
0494 }
0495
0496 static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
0497 {
0498 struct watch_adapter *watch;
0499 char *path, *token;
0500 int err, rc;
0501
0502 path = u->u.buffer + sizeof(u->u.msg);
0503 token = memchr(path, 0, u->u.msg.len);
0504 if (token == NULL) {
0505 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
0506 goto out;
0507 }
0508 token++;
0509 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
0510 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
0511 goto out;
0512 }
0513
0514 if (msg_type == XS_WATCH) {
0515 watch = alloc_watch_adapter(path, token);
0516 if (watch == NULL) {
0517 rc = -ENOMEM;
0518 goto out;
0519 }
0520
0521 watch->watch.callback = watch_fired;
0522 watch->dev_data = u;
0523
0524 err = register_xenbus_watch(&watch->watch);
0525 if (err) {
0526 free_watch_adapter(watch);
0527 rc = err;
0528 goto out;
0529 }
0530 list_add(&watch->list, &u->watches);
0531 } else {
0532 list_for_each_entry(watch, &u->watches, list) {
0533 if (!strcmp(watch->token, token) &&
0534 !strcmp(watch->watch.node, path)) {
0535 unregister_xenbus_watch(&watch->watch);
0536 list_del(&watch->list);
0537 free_watch_adapter(watch);
0538 break;
0539 }
0540 }
0541 }
0542
0543
0544 rc = xenbus_command_reply(u, msg_type, "OK");
0545
0546 out:
0547 return rc;
0548 }
0549
0550 static ssize_t xenbus_file_write(struct file *filp,
0551 const char __user *ubuf,
0552 size_t len, loff_t *ppos)
0553 {
0554 struct xenbus_file_priv *u = filp->private_data;
0555 uint32_t msg_type;
0556 int rc = len;
0557 int ret;
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574 mutex_lock(&u->msgbuffer_mutex);
0575
0576
0577 if (len == 0)
0578 goto out;
0579
0580
0581 if (len > sizeof(u->u.buffer) - u->len) {
0582
0583 u->len = 0;
0584 rc = -EINVAL;
0585 goto out;
0586 }
0587
0588 ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
0589
0590 if (ret != 0) {
0591 rc = -EFAULT;
0592 goto out;
0593 }
0594
0595
0596 len -= ret;
0597 rc = len;
0598
0599 u->len += len;
0600
0601
0602 if (u->len < sizeof(u->u.msg))
0603 goto out;
0604
0605
0606
0607 if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
0608 rc = -E2BIG;
0609 u->len = 0;
0610 goto out;
0611 }
0612
0613 if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
0614 goto out;
0615
0616
0617
0618
0619
0620 kref_get(&u->kref);
0621
0622 msg_type = u->u.msg.type;
0623
0624 switch (msg_type) {
0625 case XS_WATCH:
0626 case XS_UNWATCH:
0627
0628 ret = xenbus_write_watch(msg_type, u);
0629 break;
0630
0631 default:
0632
0633 ret = xenbus_write_transaction(msg_type, u);
0634 break;
0635 }
0636 if (ret != 0) {
0637 rc = ret;
0638 kref_put(&u->kref, xenbus_file_free);
0639 }
0640
0641
0642 u->len = 0;
0643
0644 out:
0645 mutex_unlock(&u->msgbuffer_mutex);
0646 return rc;
0647 }
0648
0649 static int xenbus_file_open(struct inode *inode, struct file *filp)
0650 {
0651 struct xenbus_file_priv *u;
0652
0653 if (xen_store_evtchn == 0)
0654 return -ENOENT;
0655
0656 stream_open(inode, filp);
0657
0658 u = kzalloc(sizeof(*u), GFP_KERNEL);
0659 if (u == NULL)
0660 return -ENOMEM;
0661
0662 kref_init(&u->kref);
0663
0664 INIT_LIST_HEAD(&u->transactions);
0665 INIT_LIST_HEAD(&u->watches);
0666 INIT_LIST_HEAD(&u->read_buffers);
0667 init_waitqueue_head(&u->read_waitq);
0668 INIT_WORK(&u->wq, xenbus_worker);
0669
0670 mutex_init(&u->reply_mutex);
0671 mutex_init(&u->msgbuffer_mutex);
0672
0673 filp->private_data = u;
0674
0675 return 0;
0676 }
0677
0678 static int xenbus_file_release(struct inode *inode, struct file *filp)
0679 {
0680 struct xenbus_file_priv *u = filp->private_data;
0681
0682 kref_put(&u->kref, xenbus_file_free);
0683
0684 return 0;
0685 }
0686
0687 static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
0688 {
0689 struct xenbus_file_priv *u = file->private_data;
0690
0691 poll_wait(file, &u->read_waitq, wait);
0692 if (!list_empty(&u->read_buffers))
0693 return EPOLLIN | EPOLLRDNORM;
0694 return 0;
0695 }
0696
0697 const struct file_operations xen_xenbus_fops = {
0698 .read = xenbus_file_read,
0699 .write = xenbus_file_write,
0700 .open = xenbus_file_open,
0701 .release = xenbus_file_release,
0702 .poll = xenbus_file_poll,
0703 .llseek = no_llseek,
0704 };
0705 EXPORT_SYMBOL_GPL(xen_xenbus_fops);
0706
0707 static struct miscdevice xenbus_dev = {
0708 .minor = MISC_DYNAMIC_MINOR,
0709 .name = "xen/xenbus",
0710 .fops = &xen_xenbus_fops,
0711 };
0712
0713 static int __init xenbus_init(void)
0714 {
0715 int err;
0716
0717 if (!xen_domain())
0718 return -ENODEV;
0719
0720 err = misc_register(&xenbus_dev);
0721 if (err)
0722 pr_err("Could not register xenbus frontend device\n");
0723 return err;
0724 }
0725 device_initcall(xenbus_init);