0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/pci.h>
0029 #include <linux/sched/signal.h>
0030
0031 #include "vmwgfx_drv.h"
0032
0033 #define VMW_FENCE_WRAP (1 << 24)
0034
0035 static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
0036 {
0037 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
0038 return SVGA_IRQFLAG_REG_FENCE_GOAL;
0039 else
0040 return SVGA_IRQFLAG_FENCE_GOAL;
0041 }
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 static irqreturn_t vmw_thread_fn(int irq, void *arg)
0055 {
0056 struct drm_device *dev = (struct drm_device *)arg;
0057 struct vmw_private *dev_priv = vmw_priv(dev);
0058 irqreturn_t ret = IRQ_NONE;
0059
0060 if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
0061 dev_priv->irqthread_pending)) {
0062 vmw_fences_update(dev_priv->fman);
0063 wake_up_all(&dev_priv->fence_queue);
0064 ret = IRQ_HANDLED;
0065 }
0066
0067 if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
0068 dev_priv->irqthread_pending)) {
0069 vmw_cmdbuf_irqthread(dev_priv->cman);
0070 ret = IRQ_HANDLED;
0071 }
0072
0073 return ret;
0074 }
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 static irqreturn_t vmw_irq_handler(int irq, void *arg)
0088 {
0089 struct drm_device *dev = (struct drm_device *)arg;
0090 struct vmw_private *dev_priv = vmw_priv(dev);
0091 uint32_t status, masked_status;
0092 irqreturn_t ret = IRQ_HANDLED;
0093
0094 status = vmw_irq_status_read(dev_priv);
0095 masked_status = status & READ_ONCE(dev_priv->irq_mask);
0096
0097 if (likely(status))
0098 vmw_irq_status_write(dev_priv, status);
0099
0100 if (!status)
0101 return IRQ_NONE;
0102
0103 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
0104 wake_up_all(&dev_priv->fifo_queue);
0105
0106 if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
0107 vmw_irqflag_fence_goal(dev_priv))) &&
0108 !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
0109 ret = IRQ_WAKE_THREAD;
0110
0111 if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
0112 SVGA_IRQFLAG_ERROR)) &&
0113 !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
0114 dev_priv->irqthread_pending))
0115 ret = IRQ_WAKE_THREAD;
0116
0117 return ret;
0118 }
0119
0120 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
0121 {
0122
0123 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
0124 }
0125
0126 void vmw_update_seqno(struct vmw_private *dev_priv)
0127 {
0128 uint32_t seqno = vmw_fence_read(dev_priv);
0129
0130 if (dev_priv->last_read_seqno != seqno) {
0131 dev_priv->last_read_seqno = seqno;
0132 vmw_fences_update(dev_priv->fman);
0133 }
0134 }
0135
0136 bool vmw_seqno_passed(struct vmw_private *dev_priv,
0137 uint32_t seqno)
0138 {
0139 bool ret;
0140
0141 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
0142 return true;
0143
0144 vmw_update_seqno(dev_priv);
0145 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
0146 return true;
0147
0148 if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
0149 return true;
0150
0151
0152
0153
0154
0155
0156 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
0157 > VMW_FENCE_WRAP);
0158
0159 return ret;
0160 }
0161
0162 int vmw_fallback_wait(struct vmw_private *dev_priv,
0163 bool lazy,
0164 bool fifo_idle,
0165 uint32_t seqno,
0166 bool interruptible,
0167 unsigned long timeout)
0168 {
0169 struct vmw_fifo_state *fifo_state = dev_priv->fifo;
0170 bool fifo_down = false;
0171
0172 uint32_t count = 0;
0173 uint32_t signal_seq;
0174 int ret;
0175 unsigned long end_jiffies = jiffies + timeout;
0176 bool (*wait_condition)(struct vmw_private *, uint32_t);
0177 DEFINE_WAIT(__wait);
0178
0179 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
0180 &vmw_seqno_passed;
0181
0182
0183
0184
0185
0186 if (fifo_idle) {
0187 if (dev_priv->cman) {
0188 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
0189 10*HZ);
0190 if (ret)
0191 goto out_err;
0192 } else if (fifo_state) {
0193 down_read(&fifo_state->rwsem);
0194 fifo_down = true;
0195 }
0196 }
0197
0198 signal_seq = atomic_read(&dev_priv->marker_seq);
0199 ret = 0;
0200
0201 for (;;) {
0202 prepare_to_wait(&dev_priv->fence_queue, &__wait,
0203 (interruptible) ?
0204 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
0205 if (wait_condition(dev_priv, seqno))
0206 break;
0207 if (time_after_eq(jiffies, end_jiffies)) {
0208 DRM_ERROR("SVGA device lockup.\n");
0209 break;
0210 }
0211 if (lazy)
0212 schedule_timeout(1);
0213 else if ((++count & 0x0F) == 0) {
0214
0215
0216
0217
0218
0219 __set_current_state(TASK_RUNNING);
0220 schedule();
0221 __set_current_state((interruptible) ?
0222 TASK_INTERRUPTIBLE :
0223 TASK_UNINTERRUPTIBLE);
0224 }
0225 if (interruptible && signal_pending(current)) {
0226 ret = -ERESTARTSYS;
0227 break;
0228 }
0229 }
0230 finish_wait(&dev_priv->fence_queue, &__wait);
0231 if (ret == 0 && fifo_idle && fifo_state)
0232 vmw_fence_write(dev_priv, signal_seq);
0233
0234 wake_up_all(&dev_priv->fence_queue);
0235 out_err:
0236 if (fifo_down)
0237 up_read(&fifo_state->rwsem);
0238
0239 return ret;
0240 }
0241
0242 void vmw_generic_waiter_add(struct vmw_private *dev_priv,
0243 u32 flag, int *waiter_count)
0244 {
0245 spin_lock_bh(&dev_priv->waiter_lock);
0246 if ((*waiter_count)++ == 0) {
0247 vmw_irq_status_write(dev_priv, flag);
0248 dev_priv->irq_mask |= flag;
0249 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
0250 }
0251 spin_unlock_bh(&dev_priv->waiter_lock);
0252 }
0253
0254 void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
0255 u32 flag, int *waiter_count)
0256 {
0257 spin_lock_bh(&dev_priv->waiter_lock);
0258 if (--(*waiter_count) == 0) {
0259 dev_priv->irq_mask &= ~flag;
0260 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
0261 }
0262 spin_unlock_bh(&dev_priv->waiter_lock);
0263 }
0264
0265 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
0266 {
0267 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
0268 &dev_priv->fence_queue_waiters);
0269 }
0270
0271 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
0272 {
0273 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
0274 &dev_priv->fence_queue_waiters);
0275 }
0276
0277 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
0278 {
0279 vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
0280 &dev_priv->goal_queue_waiters);
0281 }
0282
0283 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
0284 {
0285 vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
0286 &dev_priv->goal_queue_waiters);
0287 }
0288
0289 static void vmw_irq_preinstall(struct drm_device *dev)
0290 {
0291 struct vmw_private *dev_priv = vmw_priv(dev);
0292 uint32_t status;
0293
0294 status = vmw_irq_status_read(dev_priv);
0295 vmw_irq_status_write(dev_priv, status);
0296 }
0297
0298 void vmw_irq_uninstall(struct drm_device *dev)
0299 {
0300 struct vmw_private *dev_priv = vmw_priv(dev);
0301 struct pci_dev *pdev = to_pci_dev(dev->dev);
0302 uint32_t status;
0303 u32 i;
0304
0305 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
0306 return;
0307
0308 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
0309
0310 status = vmw_irq_status_read(dev_priv);
0311 vmw_irq_status_write(dev_priv, status);
0312
0313 for (i = 0; i < dev_priv->num_irq_vectors; ++i)
0314 free_irq(dev_priv->irqs[i], dev);
0315
0316 pci_free_irq_vectors(pdev);
0317 dev_priv->num_irq_vectors = 0;
0318 }
0319
0320
0321
0322
0323
0324
0325
0326 int vmw_irq_install(struct vmw_private *dev_priv)
0327 {
0328 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
0329 struct drm_device *dev = &dev_priv->drm;
0330 int ret;
0331 int nvec;
0332 int i = 0;
0333
0334 BUILD_BUG_ON((SVGA_IRQFLAG_MAX >> VMWGFX_MAX_NUM_IRQS) != 1);
0335 BUG_ON(VMWGFX_MAX_NUM_IRQS != get_count_order(SVGA_IRQFLAG_MAX));
0336
0337 nvec = pci_alloc_irq_vectors(pdev, 1, VMWGFX_MAX_NUM_IRQS,
0338 PCI_IRQ_ALL_TYPES);
0339
0340 if (nvec <= 0) {
0341 drm_err(&dev_priv->drm,
0342 "IRQ's are unavailable, nvec: %d\n", nvec);
0343 ret = nvec;
0344 goto done;
0345 }
0346
0347 vmw_irq_preinstall(dev);
0348
0349 for (i = 0; i < nvec; ++i) {
0350 ret = pci_irq_vector(pdev, i);
0351 if (ret < 0) {
0352 drm_err(&dev_priv->drm,
0353 "failed getting irq vector: %d\n", ret);
0354 goto done;
0355 }
0356 dev_priv->irqs[i] = ret;
0357
0358 ret = request_threaded_irq(dev_priv->irqs[i], vmw_irq_handler, vmw_thread_fn,
0359 IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
0360 if (ret != 0) {
0361 drm_err(&dev_priv->drm,
0362 "Failed installing irq(%d): %d\n",
0363 dev_priv->irqs[i], ret);
0364 goto done;
0365 }
0366 }
0367
0368 done:
0369 dev_priv->num_irq_vectors = i;
0370 return ret;
0371 }