0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/vmw_vmci_defs.h>
0009 #include <linux/vmw_vmci_api.h>
0010 #include <linux/list.h>
0011 #include <linux/module.h>
0012 #include <linux/sched.h>
0013 #include <linux/slab.h>
0014 #include <linux/rculist.h>
0015
0016 #include "vmci_driver.h"
0017 #include "vmci_event.h"
0018
0019 #define EVENT_MAGIC 0xEABE0000
0020 #define VMCI_EVENT_MAX_ATTEMPTS 10
0021
0022 struct vmci_subscription {
0023 u32 id;
0024 u32 event;
0025 vmci_event_cb callback;
0026 void *callback_data;
0027 struct list_head node;
0028 };
0029
0030 static struct list_head subscriber_array[VMCI_EVENT_MAX];
0031 static DEFINE_MUTEX(subscriber_mutex);
0032
0033 int __init vmci_event_init(void)
0034 {
0035 int i;
0036
0037 for (i = 0; i < VMCI_EVENT_MAX; i++)
0038 INIT_LIST_HEAD(&subscriber_array[i]);
0039
0040 return VMCI_SUCCESS;
0041 }
0042
0043 void vmci_event_exit(void)
0044 {
0045 int e;
0046
0047
0048 for (e = 0; e < VMCI_EVENT_MAX; e++) {
0049 struct vmci_subscription *cur, *p2;
0050 list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
0051
0052
0053
0054
0055
0056
0057 pr_warn("Unexpected free events occurring\n");
0058 list_del(&cur->node);
0059 kfree(cur);
0060 }
0061 }
0062 }
0063
0064
0065
0066
0067 static struct vmci_subscription *event_find(u32 sub_id)
0068 {
0069 int e;
0070
0071 for (e = 0; e < VMCI_EVENT_MAX; e++) {
0072 struct vmci_subscription *cur;
0073 list_for_each_entry(cur, &subscriber_array[e], node) {
0074 if (cur->id == sub_id)
0075 return cur;
0076 }
0077 }
0078 return NULL;
0079 }
0080
0081
0082
0083
0084
0085 static void event_deliver(struct vmci_event_msg *event_msg)
0086 {
0087 struct vmci_subscription *cur;
0088 struct list_head *subscriber_list;
0089
0090 rcu_read_lock();
0091 subscriber_list = &subscriber_array[event_msg->event_data.event];
0092 list_for_each_entry_rcu(cur, subscriber_list, node) {
0093 cur->callback(cur->id, &event_msg->event_data,
0094 cur->callback_data);
0095 }
0096 rcu_read_unlock();
0097 }
0098
0099
0100
0101
0102
0103 int vmci_event_dispatch(struct vmci_datagram *msg)
0104 {
0105 struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
0106
0107 if (msg->payload_size < sizeof(u32) ||
0108 msg->payload_size > sizeof(struct vmci_event_data_max))
0109 return VMCI_ERROR_INVALID_ARGS;
0110
0111 if (!VMCI_EVENT_VALID(event_msg->event_data.event))
0112 return VMCI_ERROR_EVENT_UNKNOWN;
0113
0114 event_deliver(event_msg);
0115 return VMCI_SUCCESS;
0116 }
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 int vmci_event_subscribe(u32 event,
0130 vmci_event_cb callback,
0131 void *callback_data,
0132 u32 *new_subscription_id)
0133 {
0134 struct vmci_subscription *sub;
0135 int attempts;
0136 int retval;
0137 bool have_new_id = false;
0138
0139 if (!new_subscription_id) {
0140 pr_devel("%s: Invalid subscription (NULL)\n", __func__);
0141 return VMCI_ERROR_INVALID_ARGS;
0142 }
0143
0144 if (!VMCI_EVENT_VALID(event) || !callback) {
0145 pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
0146 __func__, event, callback, callback_data);
0147 return VMCI_ERROR_INVALID_ARGS;
0148 }
0149
0150 sub = kzalloc(sizeof(*sub), GFP_KERNEL);
0151 if (!sub)
0152 return VMCI_ERROR_NO_MEM;
0153
0154 sub->id = VMCI_EVENT_MAX;
0155 sub->event = event;
0156 sub->callback = callback;
0157 sub->callback_data = callback_data;
0158 INIT_LIST_HEAD(&sub->node);
0159
0160 mutex_lock(&subscriber_mutex);
0161
0162
0163 for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
0164 static u32 subscription_id;
0165
0166
0167
0168
0169
0170
0171 if (!event_find(++subscription_id)) {
0172 sub->id = subscription_id;
0173 have_new_id = true;
0174 break;
0175 }
0176 }
0177
0178 if (have_new_id) {
0179 list_add_rcu(&sub->node, &subscriber_array[event]);
0180 retval = VMCI_SUCCESS;
0181 } else {
0182 retval = VMCI_ERROR_NO_RESOURCES;
0183 }
0184
0185 mutex_unlock(&subscriber_mutex);
0186
0187 *new_subscription_id = sub->id;
0188 return retval;
0189 }
0190 EXPORT_SYMBOL_GPL(vmci_event_subscribe);
0191
0192
0193
0194
0195
0196
0197
0198
0199 int vmci_event_unsubscribe(u32 sub_id)
0200 {
0201 struct vmci_subscription *s;
0202
0203 mutex_lock(&subscriber_mutex);
0204 s = event_find(sub_id);
0205 if (s)
0206 list_del_rcu(&s->node);
0207 mutex_unlock(&subscriber_mutex);
0208
0209 if (!s)
0210 return VMCI_ERROR_NOT_FOUND;
0211
0212 kvfree_rcu(s);
0213
0214 return VMCI_SUCCESS;
0215 }
0216 EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);