0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/errno.h>
0012 #include <linux/irq.h>
0013
0014 #include <drm/drm_print.h>
0015
0016 #include <xen/xenbus.h>
0017 #include <xen/events.h>
0018 #include <xen/grant_table.h>
0019
0020 #include "xen_drm_front.h"
0021 #include "xen_drm_front_evtchnl.h"
0022
0023 static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
0024 {
0025 struct xen_drm_front_evtchnl *evtchnl = dev_id;
0026 struct xen_drm_front_info *front_info = evtchnl->front_info;
0027 struct xendispl_resp *resp;
0028 RING_IDX i, rp;
0029 unsigned long flags;
0030
0031 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
0032 return IRQ_HANDLED;
0033
0034 spin_lock_irqsave(&front_info->io_lock, flags);
0035
0036 again:
0037 rp = evtchnl->u.req.ring.sring->rsp_prod;
0038
0039 virt_rmb();
0040
0041 for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
0042 resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
0043 if (unlikely(resp->id != evtchnl->evt_id))
0044 continue;
0045
0046 switch (resp->operation) {
0047 case XENDISPL_OP_PG_FLIP:
0048 case XENDISPL_OP_FB_ATTACH:
0049 case XENDISPL_OP_FB_DETACH:
0050 case XENDISPL_OP_DBUF_CREATE:
0051 case XENDISPL_OP_DBUF_DESTROY:
0052 case XENDISPL_OP_SET_CONFIG:
0053 evtchnl->u.req.resp_status = resp->status;
0054 complete(&evtchnl->u.req.completion);
0055 break;
0056
0057 default:
0058 DRM_ERROR("Operation %d is not supported\n",
0059 resp->operation);
0060 break;
0061 }
0062 }
0063
0064 evtchnl->u.req.ring.rsp_cons = i;
0065
0066 if (i != evtchnl->u.req.ring.req_prod_pvt) {
0067 int more_to_do;
0068
0069 RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
0070 more_to_do);
0071 if (more_to_do)
0072 goto again;
0073 } else {
0074 evtchnl->u.req.ring.sring->rsp_event = i + 1;
0075 }
0076
0077 spin_unlock_irqrestore(&front_info->io_lock, flags);
0078 return IRQ_HANDLED;
0079 }
0080
0081 static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
0082 {
0083 struct xen_drm_front_evtchnl *evtchnl = dev_id;
0084 struct xen_drm_front_info *front_info = evtchnl->front_info;
0085 struct xendispl_event_page *page = evtchnl->u.evt.page;
0086 u32 cons, prod;
0087 unsigned long flags;
0088
0089 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
0090 return IRQ_HANDLED;
0091
0092 spin_lock_irqsave(&front_info->io_lock, flags);
0093
0094 prod = page->in_prod;
0095
0096 virt_rmb();
0097 if (prod == page->in_cons)
0098 goto out;
0099
0100 for (cons = page->in_cons; cons != prod; cons++) {
0101 struct xendispl_evt *event;
0102
0103 event = &XENDISPL_IN_RING_REF(page, cons);
0104 if (unlikely(event->id != evtchnl->evt_id++))
0105 continue;
0106
0107 switch (event->type) {
0108 case XENDISPL_EVT_PG_FLIP:
0109 xen_drm_front_on_frame_done(front_info, evtchnl->index,
0110 event->op.pg_flip.fb_cookie);
0111 break;
0112 }
0113 }
0114 page->in_cons = cons;
0115
0116 virt_wmb();
0117
0118 out:
0119 spin_unlock_irqrestore(&front_info->io_lock, flags);
0120 return IRQ_HANDLED;
0121 }
0122
0123 static void evtchnl_free(struct xen_drm_front_info *front_info,
0124 struct xen_drm_front_evtchnl *evtchnl)
0125 {
0126 void *page = NULL;
0127
0128 if (evtchnl->type == EVTCHNL_TYPE_REQ)
0129 page = evtchnl->u.req.ring.sring;
0130 else if (evtchnl->type == EVTCHNL_TYPE_EVT)
0131 page = evtchnl->u.evt.page;
0132 if (!page)
0133 return;
0134
0135 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
0136
0137 if (evtchnl->type == EVTCHNL_TYPE_REQ) {
0138
0139 evtchnl->u.req.resp_status = -EIO;
0140 complete_all(&evtchnl->u.req.completion);
0141 }
0142
0143 if (evtchnl->irq)
0144 unbind_from_irqhandler(evtchnl->irq, evtchnl);
0145
0146 if (evtchnl->port)
0147 xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
0148
0149
0150 xenbus_teardown_ring(&page, 1, &evtchnl->gref);
0151
0152 memset(evtchnl, 0, sizeof(*evtchnl));
0153 }
0154
0155 static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
0156 struct xen_drm_front_evtchnl *evtchnl,
0157 enum xen_drm_front_evtchnl_type type)
0158 {
0159 struct xenbus_device *xb_dev = front_info->xb_dev;
0160 void *page;
0161 irq_handler_t handler;
0162 int ret;
0163
0164 memset(evtchnl, 0, sizeof(*evtchnl));
0165 evtchnl->type = type;
0166 evtchnl->index = index;
0167 evtchnl->front_info = front_info;
0168 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
0169
0170 ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page,
0171 1, &evtchnl->gref);
0172 if (ret)
0173 goto fail;
0174
0175 if (type == EVTCHNL_TYPE_REQ) {
0176 struct xen_displif_sring *sring;
0177
0178 init_completion(&evtchnl->u.req.completion);
0179 mutex_init(&evtchnl->u.req.req_io_lock);
0180 sring = page;
0181 XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
0182
0183 handler = evtchnl_interrupt_ctrl;
0184 } else {
0185 evtchnl->u.evt.page = page;
0186 handler = evtchnl_interrupt_evt;
0187 }
0188
0189 ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
0190 if (ret < 0)
0191 goto fail;
0192
0193 ret = bind_evtchn_to_irqhandler(evtchnl->port,
0194 handler, 0, xb_dev->devicetype,
0195 evtchnl);
0196 if (ret < 0)
0197 goto fail;
0198
0199 evtchnl->irq = ret;
0200 return 0;
0201
0202 fail:
0203 DRM_ERROR("Failed to allocate ring: %d\n", ret);
0204 return ret;
0205 }
0206
0207 int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
0208 {
0209 struct xen_drm_front_cfg *cfg;
0210 int ret, conn;
0211
0212 cfg = &front_info->cfg;
0213
0214 front_info->evt_pairs =
0215 kcalloc(cfg->num_connectors,
0216 sizeof(struct xen_drm_front_evtchnl_pair),
0217 GFP_KERNEL);
0218 if (!front_info->evt_pairs) {
0219 ret = -ENOMEM;
0220 goto fail;
0221 }
0222
0223 for (conn = 0; conn < cfg->num_connectors; conn++) {
0224 ret = evtchnl_alloc(front_info, conn,
0225 &front_info->evt_pairs[conn].req,
0226 EVTCHNL_TYPE_REQ);
0227 if (ret < 0) {
0228 DRM_ERROR("Error allocating control channel\n");
0229 goto fail;
0230 }
0231
0232 ret = evtchnl_alloc(front_info, conn,
0233 &front_info->evt_pairs[conn].evt,
0234 EVTCHNL_TYPE_EVT);
0235 if (ret < 0) {
0236 DRM_ERROR("Error allocating in-event channel\n");
0237 goto fail;
0238 }
0239 }
0240 front_info->num_evt_pairs = cfg->num_connectors;
0241 return 0;
0242
0243 fail:
0244 xen_drm_front_evtchnl_free_all(front_info);
0245 return ret;
0246 }
0247
0248 static int evtchnl_publish(struct xenbus_transaction xbt,
0249 struct xen_drm_front_evtchnl *evtchnl,
0250 const char *path, const char *node_ring,
0251 const char *node_chnl)
0252 {
0253 struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
0254 int ret;
0255
0256
0257 ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
0258 if (ret < 0) {
0259 xenbus_dev_error(xb_dev, ret, "writing ring-ref");
0260 return ret;
0261 }
0262
0263
0264 ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
0265 if (ret < 0) {
0266 xenbus_dev_error(xb_dev, ret, "writing event channel");
0267 return ret;
0268 }
0269
0270 return 0;
0271 }
0272
0273 int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
0274 {
0275 struct xenbus_transaction xbt;
0276 struct xen_drm_front_cfg *plat_data;
0277 int ret, conn;
0278
0279 plat_data = &front_info->cfg;
0280
0281 again:
0282 ret = xenbus_transaction_start(&xbt);
0283 if (ret < 0) {
0284 xenbus_dev_fatal(front_info->xb_dev, ret,
0285 "starting transaction");
0286 return ret;
0287 }
0288
0289 for (conn = 0; conn < plat_data->num_connectors; conn++) {
0290 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
0291 plat_data->connectors[conn].xenstore_path,
0292 XENDISPL_FIELD_REQ_RING_REF,
0293 XENDISPL_FIELD_REQ_CHANNEL);
0294 if (ret < 0)
0295 goto fail;
0296
0297 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
0298 plat_data->connectors[conn].xenstore_path,
0299 XENDISPL_FIELD_EVT_RING_REF,
0300 XENDISPL_FIELD_EVT_CHANNEL);
0301 if (ret < 0)
0302 goto fail;
0303 }
0304
0305 ret = xenbus_transaction_end(xbt, 0);
0306 if (ret < 0) {
0307 if (ret == -EAGAIN)
0308 goto again;
0309
0310 xenbus_dev_fatal(front_info->xb_dev, ret,
0311 "completing transaction");
0312 goto fail_to_end;
0313 }
0314
0315 return 0;
0316
0317 fail:
0318 xenbus_transaction_end(xbt, 1);
0319
0320 fail_to_end:
0321 xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
0322 return ret;
0323 }
0324
0325 void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
0326 {
0327 int notify;
0328
0329 evtchnl->u.req.ring.req_prod_pvt++;
0330 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
0331 if (notify)
0332 notify_remote_via_irq(evtchnl->irq);
0333 }
0334
0335 void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
0336 enum xen_drm_front_evtchnl_state state)
0337 {
0338 unsigned long flags;
0339 int i;
0340
0341 if (!front_info->evt_pairs)
0342 return;
0343
0344 spin_lock_irqsave(&front_info->io_lock, flags);
0345 for (i = 0; i < front_info->num_evt_pairs; i++) {
0346 front_info->evt_pairs[i].req.state = state;
0347 front_info->evt_pairs[i].evt.state = state;
0348 }
0349 spin_unlock_irqrestore(&front_info->io_lock, flags);
0350 }
0351
0352 void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
0353 {
0354 int i;
0355
0356 if (!front_info->evt_pairs)
0357 return;
0358
0359 for (i = 0; i < front_info->num_evt_pairs; i++) {
0360 evtchnl_free(front_info, &front_info->evt_pairs[i].req);
0361 evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
0362 }
0363
0364 kfree(front_info->evt_pairs);
0365 front_info->evt_pairs = NULL;
0366 }