Back to home page

OSCL-LXR

 
 

    


0001 /**************************************************************************
0002  *
0003  * Copyright © 2007 David Airlie
0004  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
0005  * All Rights Reserved.
0006  *
0007  * Permission is hereby granted, free of charge, to any person obtaining a
0008  * copy of this software and associated documentation files (the
0009  * "Software"), to deal in the Software without restriction, including
0010  * without limitation the rights to use, copy, modify, merge, publish,
0011  * distribute, sub license, and/or sell copies of the Software, and to
0012  * permit persons to whom the Software is furnished to do so, subject to
0013  * the following conditions:
0014  *
0015  * The above copyright notice and this permission notice (including the
0016  * next paragraph) shall be included in all copies or substantial portions
0017  * of the Software.
0018  *
0019  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0020  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0021  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0022  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0023  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0024  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0025  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0026  *
0027  **************************************************************************/
0028 
0029 #include <linux/fb.h>
0030 #include <linux/pci.h>
0031 
0032 #include <drm/drm_fourcc.h>
0033 #include <drm/ttm/ttm_placement.h>
0034 
0035 #include "vmwgfx_drv.h"
0036 #include "vmwgfx_kms.h"
0037 
0038 #define VMW_DIRTY_DELAY (HZ / 30)
0039 
0040 struct vmw_fb_par {
0041     struct vmw_private *vmw_priv;
0042 
0043     void *vmalloc;
0044 
0045     struct mutex bo_mutex;
0046     struct vmw_buffer_object *vmw_bo;
0047     unsigned bo_size;
0048     struct drm_framebuffer *set_fb;
0049     struct drm_display_mode *set_mode;
0050     u32 fb_x;
0051     u32 fb_y;
0052     bool bo_iowrite;
0053 
0054     u32 pseudo_palette[17];
0055 
0056     unsigned max_width;
0057     unsigned max_height;
0058 
0059     struct {
0060         spinlock_t lock;
0061         bool active;
0062         unsigned x1;
0063         unsigned y1;
0064         unsigned x2;
0065         unsigned y2;
0066     } dirty;
0067 
0068     struct drm_crtc *crtc;
0069     struct drm_connector *con;
0070     struct delayed_work local_work;
0071 };
0072 
0073 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
0074                 unsigned blue, unsigned transp,
0075                 struct fb_info *info)
0076 {
0077     struct vmw_fb_par *par = info->par;
0078     u32 *pal = par->pseudo_palette;
0079 
0080     if (regno > 15) {
0081         DRM_ERROR("Bad regno %u.\n", regno);
0082         return 1;
0083     }
0084 
0085     switch (par->set_fb->format->depth) {
0086     case 24:
0087     case 32:
0088         pal[regno] = ((red & 0xff00) << 8) |
0089                   (green & 0xff00) |
0090                  ((blue  & 0xff00) >> 8);
0091         break;
0092     default:
0093         DRM_ERROR("Bad depth %u, bpp %u.\n",
0094               par->set_fb->format->depth,
0095               par->set_fb->format->cpp[0] * 8);
0096         return 1;
0097     }
0098 
0099     return 0;
0100 }
0101 
0102 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
0103                 struct fb_info *info)
0104 {
0105     int depth = var->bits_per_pixel;
0106     struct vmw_fb_par *par = info->par;
0107     struct vmw_private *vmw_priv = par->vmw_priv;
0108 
0109     switch (var->bits_per_pixel) {
0110     case 32:
0111         depth = (var->transp.length > 0) ? 32 : 24;
0112         break;
0113     default:
0114         DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
0115         return -EINVAL;
0116     }
0117 
0118     switch (depth) {
0119     case 24:
0120         var->red.offset = 16;
0121         var->green.offset = 8;
0122         var->blue.offset = 0;
0123         var->red.length = 8;
0124         var->green.length = 8;
0125         var->blue.length = 8;
0126         var->transp.length = 0;
0127         var->transp.offset = 0;
0128         break;
0129     case 32:
0130         var->red.offset = 16;
0131         var->green.offset = 8;
0132         var->blue.offset = 0;
0133         var->red.length = 8;
0134         var->green.length = 8;
0135         var->blue.length = 8;
0136         var->transp.length = 8;
0137         var->transp.offset = 24;
0138         break;
0139     default:
0140         DRM_ERROR("Bad depth %u.\n", depth);
0141         return -EINVAL;
0142     }
0143 
0144     if ((var->xoffset + var->xres) > par->max_width ||
0145         (var->yoffset + var->yres) > par->max_height) {
0146         DRM_ERROR("Requested geom can not fit in framebuffer\n");
0147         return -EINVAL;
0148     }
0149 
0150     if (!vmw_kms_validate_mode_vram(vmw_priv,
0151                     var->xres * var->bits_per_pixel/8,
0152                     var->yoffset + var->yres)) {
0153         DRM_ERROR("Requested geom can not fit in framebuffer\n");
0154         return -EINVAL;
0155     }
0156 
0157     return 0;
0158 }
0159 
0160 static int vmw_fb_blank(int blank, struct fb_info *info)
0161 {
0162     return 0;
0163 }
0164 
0165 /**
0166  * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
0167  *
0168  * @work: The struct work_struct associated with this task.
0169  *
0170  * This function flushes the dirty regions of the vmalloc framebuffer to the
0171  * kms framebuffer, and if the kms framebuffer is visible, also updated the
0172  * corresponding displays. Note that this function runs even if the kms
0173  * framebuffer is not bound to a crtc and thus not visible, but it's turned
0174  * off during hibernation using the par->dirty.active bool.
0175  */
0176 static void vmw_fb_dirty_flush(struct work_struct *work)
0177 {
0178     struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
0179                           local_work.work);
0180     struct vmw_private *vmw_priv = par->vmw_priv;
0181     struct fb_info *info = vmw_priv->fb_info;
0182     unsigned long irq_flags;
0183     s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
0184     u32 cpp, max_x, max_y;
0185     struct drm_clip_rect clip;
0186     struct drm_framebuffer *cur_fb;
0187     u8 *src_ptr, *dst_ptr;
0188     struct vmw_buffer_object *vbo = par->vmw_bo;
0189     void *virtual;
0190 
0191     if (!READ_ONCE(par->dirty.active))
0192         return;
0193 
0194     mutex_lock(&par->bo_mutex);
0195     cur_fb = par->set_fb;
0196     if (!cur_fb)
0197         goto out_unlock;
0198 
0199     (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
0200     virtual = vmw_bo_map_and_cache(vbo);
0201     if (!virtual)
0202         goto out_unreserve;
0203 
0204     spin_lock_irqsave(&par->dirty.lock, irq_flags);
0205     if (!par->dirty.active) {
0206         spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
0207         goto out_unreserve;
0208     }
0209 
0210     /*
0211      * Handle panning when copying from vmalloc to framebuffer.
0212      * Clip dirty area to framebuffer.
0213      */
0214     cpp = cur_fb->format->cpp[0];
0215     max_x = par->fb_x + cur_fb->width;
0216     max_y = par->fb_y + cur_fb->height;
0217 
0218     dst_x1 = par->dirty.x1 - par->fb_x;
0219     dst_y1 = par->dirty.y1 - par->fb_y;
0220     dst_x1 = max_t(s32, dst_x1, 0);
0221     dst_y1 = max_t(s32, dst_y1, 0);
0222 
0223     dst_x2 = par->dirty.x2 - par->fb_x;
0224     dst_y2 = par->dirty.y2 - par->fb_y;
0225     dst_x2 = min_t(s32, dst_x2, max_x);
0226     dst_y2 = min_t(s32, dst_y2, max_y);
0227     w = dst_x2 - dst_x1;
0228     h = dst_y2 - dst_y1;
0229     w = max_t(s32, 0, w);
0230     h = max_t(s32, 0, h);
0231 
0232     par->dirty.x1 = par->dirty.x2 = 0;
0233     par->dirty.y1 = par->dirty.y2 = 0;
0234     spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
0235 
0236     if (w && h) {
0237         dst_ptr = (u8 *)virtual  +
0238             (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
0239         src_ptr = (u8 *)par->vmalloc +
0240             ((dst_y1 + par->fb_y) * info->fix.line_length +
0241              (dst_x1 + par->fb_x) * cpp);
0242 
0243         while (h-- > 0) {
0244             memcpy(dst_ptr, src_ptr, w*cpp);
0245             dst_ptr += par->set_fb->pitches[0];
0246             src_ptr += info->fix.line_length;
0247         }
0248 
0249         clip.x1 = dst_x1;
0250         clip.x2 = dst_x2;
0251         clip.y1 = dst_y1;
0252         clip.y2 = dst_y2;
0253     }
0254 
0255 out_unreserve:
0256     ttm_bo_unreserve(&vbo->base);
0257     if (w && h) {
0258         WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
0259                                &clip, 1));
0260         vmw_cmd_flush(vmw_priv, false);
0261     }
0262 out_unlock:
0263     mutex_unlock(&par->bo_mutex);
0264 }
0265 
0266 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
0267                   unsigned x1, unsigned y1,
0268                   unsigned width, unsigned height)
0269 {
0270     unsigned long flags;
0271     unsigned x2 = x1 + width;
0272     unsigned y2 = y1 + height;
0273 
0274     spin_lock_irqsave(&par->dirty.lock, flags);
0275     if (par->dirty.x1 == par->dirty.x2) {
0276         par->dirty.x1 = x1;
0277         par->dirty.y1 = y1;
0278         par->dirty.x2 = x2;
0279         par->dirty.y2 = y2;
0280         /* if we are active start the dirty work
0281          * we share the work with the defio system */
0282         if (par->dirty.active)
0283             schedule_delayed_work(&par->local_work,
0284                           VMW_DIRTY_DELAY);
0285     } else {
0286         if (x1 < par->dirty.x1)
0287             par->dirty.x1 = x1;
0288         if (y1 < par->dirty.y1)
0289             par->dirty.y1 = y1;
0290         if (x2 > par->dirty.x2)
0291             par->dirty.x2 = x2;
0292         if (y2 > par->dirty.y2)
0293             par->dirty.y2 = y2;
0294     }
0295     spin_unlock_irqrestore(&par->dirty.lock, flags);
0296 }
0297 
0298 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
0299                   struct fb_info *info)
0300 {
0301     struct vmw_fb_par *par = info->par;
0302 
0303     if ((var->xoffset + var->xres) > var->xres_virtual ||
0304         (var->yoffset + var->yres) > var->yres_virtual) {
0305         DRM_ERROR("Requested panning can not fit in framebuffer\n");
0306         return -EINVAL;
0307     }
0308 
0309     mutex_lock(&par->bo_mutex);
0310     par->fb_x = var->xoffset;
0311     par->fb_y = var->yoffset;
0312     if (par->set_fb)
0313         vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
0314                   par->set_fb->height);
0315     mutex_unlock(&par->bo_mutex);
0316 
0317     return 0;
0318 }
0319 
0320 static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist)
0321 {
0322     struct vmw_fb_par *par = info->par;
0323     unsigned long start, end, min, max;
0324     unsigned long flags;
0325     struct fb_deferred_io_pageref *pageref;
0326     int y1, y2;
0327 
0328     min = ULONG_MAX;
0329     max = 0;
0330     list_for_each_entry(pageref, pagereflist, list) {
0331         start = pageref->offset;
0332         end = start + PAGE_SIZE - 1;
0333         min = min(min, start);
0334         max = max(max, end);
0335     }
0336 
0337     if (min < max) {
0338         y1 = min / info->fix.line_length;
0339         y2 = (max / info->fix.line_length) + 1;
0340 
0341         spin_lock_irqsave(&par->dirty.lock, flags);
0342         par->dirty.x1 = 0;
0343         par->dirty.y1 = y1;
0344         par->dirty.x2 = info->var.xres;
0345         par->dirty.y2 = y2;
0346         spin_unlock_irqrestore(&par->dirty.lock, flags);
0347 
0348         /*
0349          * Since we've already waited on this work once, try to
0350          * execute asap.
0351          */
0352         cancel_delayed_work(&par->local_work);
0353         schedule_delayed_work(&par->local_work, 0);
0354     }
0355 };
0356 
0357 static struct fb_deferred_io vmw_defio = {
0358     .delay      = VMW_DIRTY_DELAY,
0359     .deferred_io    = vmw_deferred_io,
0360 };
0361 
0362 /*
0363  * Draw code
0364  */
0365 
0366 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
0367 {
0368     cfb_fillrect(info, rect);
0369     vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
0370               rect->width, rect->height);
0371 }
0372 
0373 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
0374 {
0375     cfb_copyarea(info, region);
0376     vmw_fb_dirty_mark(info->par, region->dx, region->dy,
0377               region->width, region->height);
0378 }
0379 
0380 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
0381 {
0382     cfb_imageblit(info, image);
0383     vmw_fb_dirty_mark(info->par, image->dx, image->dy,
0384               image->width, image->height);
0385 }
0386 
0387 /*
0388  * Bring up code
0389  */
0390 
0391 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
0392                 size_t size, struct vmw_buffer_object **out)
0393 {
0394     struct vmw_buffer_object *vmw_bo;
0395     int ret;
0396 
0397     ret = vmw_bo_create(vmw_priv, size,
0398                   &vmw_sys_placement,
0399                   false, false,
0400                   &vmw_bo_bo_free, &vmw_bo);
0401     if (unlikely(ret != 0))
0402         return ret;
0403 
0404     *out = vmw_bo;
0405 
0406     return ret;
0407 }
0408 
0409 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
0410                 int *depth)
0411 {
0412     switch (var->bits_per_pixel) {
0413     case 32:
0414         *depth = (var->transp.length > 0) ? 32 : 24;
0415         break;
0416     default:
0417         DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
0418         return -EINVAL;
0419     }
0420 
0421     return 0;
0422 }
0423 
0424 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
0425 {
0426     struct drm_crtc *crtc = set->crtc;
0427     struct drm_modeset_acquire_ctx ctx;
0428     int ret;
0429 
0430     drm_modeset_acquire_init(&ctx, 0);
0431 
0432 restart:
0433     ret = crtc->funcs->set_config(set, &ctx);
0434 
0435     if (ret == -EDEADLK) {
0436         drm_modeset_backoff(&ctx);
0437         goto restart;
0438     }
0439 
0440     drm_modeset_drop_locks(&ctx);
0441     drm_modeset_acquire_fini(&ctx);
0442 
0443     return ret;
0444 }
0445 
0446 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
0447                  bool detach_bo,
0448                  bool unref_bo)
0449 {
0450     struct drm_framebuffer *cur_fb = par->set_fb;
0451     int ret;
0452 
0453     /* Detach the KMS framebuffer from crtcs */
0454     if (par->set_mode) {
0455         struct drm_mode_set set;
0456 
0457         set.crtc = par->crtc;
0458         set.x = 0;
0459         set.y = 0;
0460         set.mode = NULL;
0461         set.fb = NULL;
0462         set.num_connectors = 0;
0463         set.connectors = &par->con;
0464         ret = vmwgfx_set_config_internal(&set);
0465         if (ret) {
0466             DRM_ERROR("Could not unset a mode.\n");
0467             return ret;
0468         }
0469         drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
0470         par->set_mode = NULL;
0471     }
0472 
0473     if (cur_fb) {
0474         drm_framebuffer_put(cur_fb);
0475         par->set_fb = NULL;
0476     }
0477 
0478     if (par->vmw_bo && detach_bo && unref_bo)
0479         vmw_bo_unreference(&par->vmw_bo);
0480 
0481     return 0;
0482 }
0483 
0484 static int vmw_fb_kms_framebuffer(struct fb_info *info)
0485 {
0486     struct drm_mode_fb_cmd2 mode_cmd = {0};
0487     struct vmw_fb_par *par = info->par;
0488     struct fb_var_screeninfo *var = &info->var;
0489     struct drm_framebuffer *cur_fb;
0490     struct vmw_framebuffer *vfb;
0491     int ret = 0, depth;
0492     size_t new_bo_size;
0493 
0494     ret = vmw_fb_compute_depth(var, &depth);
0495     if (ret)
0496         return ret;
0497 
0498     mode_cmd.width = var->xres;
0499     mode_cmd.height = var->yres;
0500     mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
0501     mode_cmd.pixel_format =
0502         drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
0503 
0504     cur_fb = par->set_fb;
0505     if (cur_fb && cur_fb->width == mode_cmd.width &&
0506         cur_fb->height == mode_cmd.height &&
0507         cur_fb->format->format == mode_cmd.pixel_format &&
0508         cur_fb->pitches[0] == mode_cmd.pitches[0])
0509         return 0;
0510 
0511     /* Need new buffer object ? */
0512     new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
0513     ret = vmw_fb_kms_detach(par,
0514                 par->bo_size < new_bo_size ||
0515                 par->bo_size > 2*new_bo_size,
0516                 true);
0517     if (ret)
0518         return ret;
0519 
0520     if (!par->vmw_bo) {
0521         ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
0522                        &par->vmw_bo);
0523         if (ret) {
0524             DRM_ERROR("Failed creating a buffer object for "
0525                   "fbdev.\n");
0526             return ret;
0527         }
0528         par->bo_size = new_bo_size;
0529     }
0530 
0531     vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
0532                       true, &mode_cmd);
0533     if (IS_ERR(vfb))
0534         return PTR_ERR(vfb);
0535 
0536     par->set_fb = &vfb->base;
0537 
0538     return 0;
0539 }
0540 
0541 static int vmw_fb_set_par(struct fb_info *info)
0542 {
0543     struct vmw_fb_par *par = info->par;
0544     struct vmw_private *vmw_priv = par->vmw_priv;
0545     struct drm_mode_set set;
0546     struct fb_var_screeninfo *var = &info->var;
0547     struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
0548         DRM_MODE_TYPE_DRIVER,
0549         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0550         DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
0551     };
0552     struct drm_display_mode *mode;
0553     int ret;
0554 
0555     mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
0556     if (!mode) {
0557         DRM_ERROR("Could not create new fb mode.\n");
0558         return -ENOMEM;
0559     }
0560 
0561     mode->hdisplay = var->xres;
0562     mode->vdisplay = var->yres;
0563     vmw_guess_mode_timing(mode);
0564 
0565     if (!vmw_kms_validate_mode_vram(vmw_priv,
0566                     mode->hdisplay *
0567                     DIV_ROUND_UP(var->bits_per_pixel, 8),
0568                     mode->vdisplay)) {
0569         drm_mode_destroy(&vmw_priv->drm, mode);
0570         return -EINVAL;
0571     }
0572 
0573     mutex_lock(&par->bo_mutex);
0574     ret = vmw_fb_kms_framebuffer(info);
0575     if (ret)
0576         goto out_unlock;
0577 
0578     par->fb_x = var->xoffset;
0579     par->fb_y = var->yoffset;
0580 
0581     set.crtc = par->crtc;
0582     set.x = 0;
0583     set.y = 0;
0584     set.mode = mode;
0585     set.fb = par->set_fb;
0586     set.num_connectors = 1;
0587     set.connectors = &par->con;
0588 
0589     ret = vmwgfx_set_config_internal(&set);
0590     if (ret)
0591         goto out_unlock;
0592 
0593     vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
0594               par->set_fb->width, par->set_fb->height);
0595 
0596     /* If there already was stuff dirty we wont
0597      * schedule a new work, so lets do it now */
0598 
0599     schedule_delayed_work(&par->local_work, 0);
0600 
0601 out_unlock:
0602     if (par->set_mode)
0603         drm_mode_destroy(&vmw_priv->drm, par->set_mode);
0604     par->set_mode = mode;
0605 
0606     mutex_unlock(&par->bo_mutex);
0607 
0608     return ret;
0609 }
0610 
0611 
0612 static const struct fb_ops vmw_fb_ops = {
0613     .owner = THIS_MODULE,
0614     .fb_check_var = vmw_fb_check_var,
0615     .fb_set_par = vmw_fb_set_par,
0616     .fb_setcolreg = vmw_fb_setcolreg,
0617     .fb_fillrect = vmw_fb_fillrect,
0618     .fb_copyarea = vmw_fb_copyarea,
0619     .fb_imageblit = vmw_fb_imageblit,
0620     .fb_pan_display = vmw_fb_pan_display,
0621     .fb_blank = vmw_fb_blank,
0622     .fb_mmap = fb_deferred_io_mmap,
0623 };
0624 
0625 int vmw_fb_init(struct vmw_private *vmw_priv)
0626 {
0627     struct device *device = vmw_priv->drm.dev;
0628     struct vmw_fb_par *par;
0629     struct fb_info *info;
0630     unsigned fb_width, fb_height;
0631     unsigned int fb_bpp, fb_pitch, fb_size;
0632     struct drm_display_mode *init_mode;
0633     int ret;
0634 
0635     fb_bpp = 32;
0636 
0637     /* XXX As shouldn't these be as well. */
0638     fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
0639     fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
0640 
0641     fb_pitch = fb_width * fb_bpp / 8;
0642     fb_size = fb_pitch * fb_height;
0643 
0644     info = framebuffer_alloc(sizeof(*par), device);
0645     if (!info)
0646         return -ENOMEM;
0647 
0648     /*
0649      * Par
0650      */
0651     vmw_priv->fb_info = info;
0652     par = info->par;
0653     memset(par, 0, sizeof(*par));
0654     INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
0655     par->vmw_priv = vmw_priv;
0656     par->vmalloc = NULL;
0657     par->max_width = fb_width;
0658     par->max_height = fb_height;
0659 
0660     ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
0661                       par->max_height, &par->con,
0662                       &par->crtc, &init_mode);
0663     if (ret)
0664         goto err_kms;
0665 
0666     info->var.xres = init_mode->hdisplay;
0667     info->var.yres = init_mode->vdisplay;
0668 
0669     /*
0670      * Create buffers and alloc memory
0671      */
0672     par->vmalloc = vzalloc(fb_size);
0673     if (unlikely(par->vmalloc == NULL)) {
0674         ret = -ENOMEM;
0675         goto err_free;
0676     }
0677 
0678     /*
0679      * Fixed and var
0680      */
0681     strcpy(info->fix.id, "svgadrmfb");
0682     info->fix.type = FB_TYPE_PACKED_PIXELS;
0683     info->fix.visual = FB_VISUAL_TRUECOLOR;
0684     info->fix.type_aux = 0;
0685     info->fix.xpanstep = 1; /* doing it in hw */
0686     info->fix.ypanstep = 1; /* doing it in hw */
0687     info->fix.ywrapstep = 0;
0688     info->fix.accel = FB_ACCEL_NONE;
0689     info->fix.line_length = fb_pitch;
0690 
0691     info->fix.smem_start = 0;
0692     info->fix.smem_len = fb_size;
0693 
0694     info->pseudo_palette = par->pseudo_palette;
0695     info->screen_base = (char __iomem *)par->vmalloc;
0696     info->screen_size = fb_size;
0697 
0698     info->fbops = &vmw_fb_ops;
0699 
0700     /* 24 depth per default */
0701     info->var.red.offset = 16;
0702     info->var.green.offset = 8;
0703     info->var.blue.offset = 0;
0704     info->var.red.length = 8;
0705     info->var.green.length = 8;
0706     info->var.blue.length = 8;
0707     info->var.transp.offset = 0;
0708     info->var.transp.length = 0;
0709 
0710     info->var.xres_virtual = fb_width;
0711     info->var.yres_virtual = fb_height;
0712     info->var.bits_per_pixel = fb_bpp;
0713     info->var.xoffset = 0;
0714     info->var.yoffset = 0;
0715     info->var.activate = FB_ACTIVATE_NOW;
0716     info->var.height = -1;
0717     info->var.width = -1;
0718 
0719     /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
0720     info->apertures = alloc_apertures(1);
0721     if (!info->apertures) {
0722         ret = -ENOMEM;
0723         goto err_aper;
0724     }
0725     info->apertures->ranges[0].base = vmw_priv->vram_start;
0726     info->apertures->ranges[0].size = vmw_priv->vram_size;
0727 
0728     /*
0729      * Dirty & Deferred IO
0730      */
0731     par->dirty.x1 = par->dirty.x2 = 0;
0732     par->dirty.y1 = par->dirty.y2 = 0;
0733     par->dirty.active = true;
0734     spin_lock_init(&par->dirty.lock);
0735     mutex_init(&par->bo_mutex);
0736     info->fbdefio = &vmw_defio;
0737     fb_deferred_io_init(info);
0738 
0739     ret = register_framebuffer(info);
0740     if (unlikely(ret != 0))
0741         goto err_defio;
0742 
0743     vmw_fb_set_par(info);
0744 
0745     return 0;
0746 
0747 err_defio:
0748     fb_deferred_io_cleanup(info);
0749 err_aper:
0750 err_free:
0751     vfree(par->vmalloc);
0752 err_kms:
0753     framebuffer_release(info);
0754     vmw_priv->fb_info = NULL;
0755 
0756     return ret;
0757 }
0758 
0759 int vmw_fb_close(struct vmw_private *vmw_priv)
0760 {
0761     struct fb_info *info;
0762     struct vmw_fb_par *par;
0763 
0764     if (!vmw_priv->fb_info)
0765         return 0;
0766 
0767     info = vmw_priv->fb_info;
0768     par = info->par;
0769 
0770     /* ??? order */
0771     fb_deferred_io_cleanup(info);
0772     cancel_delayed_work_sync(&par->local_work);
0773     unregister_framebuffer(info);
0774 
0775     mutex_lock(&par->bo_mutex);
0776     (void) vmw_fb_kms_detach(par, true, true);
0777     mutex_unlock(&par->bo_mutex);
0778 
0779     vfree(par->vmalloc);
0780     framebuffer_release(info);
0781 
0782     return 0;
0783 }
0784 
0785 int vmw_fb_off(struct vmw_private *vmw_priv)
0786 {
0787     struct fb_info *info;
0788     struct vmw_fb_par *par;
0789     unsigned long flags;
0790 
0791     if (!vmw_priv->fb_info)
0792         return -EINVAL;
0793 
0794     info = vmw_priv->fb_info;
0795     par = info->par;
0796 
0797     spin_lock_irqsave(&par->dirty.lock, flags);
0798     par->dirty.active = false;
0799     spin_unlock_irqrestore(&par->dirty.lock, flags);
0800 
0801     flush_delayed_work(&info->deferred_work);
0802     flush_delayed_work(&par->local_work);
0803 
0804     return 0;
0805 }
0806 
0807 int vmw_fb_on(struct vmw_private *vmw_priv)
0808 {
0809     struct fb_info *info;
0810     struct vmw_fb_par *par;
0811     unsigned long flags;
0812 
0813     if (!vmw_priv->fb_info)
0814         return -EINVAL;
0815 
0816     info = vmw_priv->fb_info;
0817     par = info->par;
0818 
0819     spin_lock_irqsave(&par->dirty.lock, flags);
0820     par->dirty.active = true;
0821     spin_unlock_irqrestore(&par->dirty.lock, flags);
0822 
0823     /*
0824      * Need to reschedule a dirty update, because otherwise that's
0825      * only done in dirty_mark() if the previous coalesced
0826      * dirty region was empty.
0827      */
0828     schedule_delayed_work(&par->local_work, 0);
0829 
0830     return 0;
0831 }