Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /*
0003  * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
0004  *
0005  * Permission is hereby granted, free of charge, to any person obtaining a
0006  * copy of this software and associated documentation files (the
0007  * "Software"), to deal in the Software without restriction, including
0008  * without limitation the rights to use, copy, modify, merge, publish,
0009  * distribute, sub license, and/or sell copies of the Software, and to
0010  * permit persons to whom the Software is furnished to do so, subject to
0011  * the following conditions:
0012  *
0013  * The above copyright notice and this permission notice (including the
0014  * next paragraph) shall be included in all copies or substantial portions
0015  * of the Software.
0016  *
0017  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0018  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0019  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0020  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0021  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0022  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0023  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0024  *
0025  */
0026 
0027 #include <linux/objtool.h>
0028 #include <linux/kernel.h>
0029 #include <linux/module.h>
0030 #include <linux/slab.h>
0031 #include <linux/cc_platform.h>
0032 
0033 #include <asm/hypervisor.h>
0034 #include <drm/drm_ioctl.h>
0035 
0036 #include "vmwgfx_drv.h"
0037 #include "vmwgfx_msg_x86.h"
0038 #include "vmwgfx_msg_arm64.h"
0039 #include "vmwgfx_mksstat.h"
0040 
0041 #define MESSAGE_STATUS_SUCCESS  0x0001
0042 #define MESSAGE_STATUS_DORECV   0x0002
0043 #define MESSAGE_STATUS_CPT      0x0010
0044 #define MESSAGE_STATUS_HB       0x0080
0045 
0046 #define RPCI_PROTOCOL_NUM       0x49435052
0047 #define GUESTMSG_FLAG_COOKIE    0x80000000
0048 
0049 #define RETRIES                 3
0050 
0051 #define VMW_HYPERVISOR_MAGIC    0x564D5868
0052 
0053 #define VMW_PORT_CMD_MSG        30
0054 #define VMW_PORT_CMD_HB_MSG     0
0055 #define VMW_PORT_CMD_OPEN_CHANNEL  (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
0056 #define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
0057 #define VMW_PORT_CMD_SENDSIZE   (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
0058 #define VMW_PORT_CMD_RECVSIZE   (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
0059 #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
0060 
0061 #define VMW_PORT_CMD_MKS_GUEST_STATS   85
0062 #define VMW_PORT_CMD_MKSGS_RESET       (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
0063 #define VMW_PORT_CMD_MKSGS_ADD_PPN     (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
0064 #define VMW_PORT_CMD_MKSGS_REMOVE_PPN  (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
0065 
0066 #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
0067 
0068 #define MAX_USER_MSG_LENGTH PAGE_SIZE
0069 
0070 static u32 vmw_msg_enabled = 1;
0071 
0072 enum rpc_msg_type {
0073     MSG_TYPE_OPEN,
0074     MSG_TYPE_SENDSIZE,
0075     MSG_TYPE_SENDPAYLOAD,
0076     MSG_TYPE_RECVSIZE,
0077     MSG_TYPE_RECVPAYLOAD,
0078     MSG_TYPE_RECVSTATUS,
0079     MSG_TYPE_CLOSE,
0080 };
0081 
0082 struct rpc_channel {
0083     u16 channel_id;
0084     u32 cookie_high;
0085     u32 cookie_low;
0086 };
0087 
0088 
0089 
0090 /**
0091  * vmw_open_channel
0092  *
0093  * @channel: RPC channel
0094  * @protocol:
0095  *
0096  * Returns: 0 on success
0097  */
0098 static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
0099 {
0100     unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
0101 
0102     VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
0103         (protocol | GUESTMSG_FLAG_COOKIE), si, di,
0104         0,
0105         VMW_HYPERVISOR_MAGIC,
0106         eax, ebx, ecx, edx, si, di);
0107 
0108     if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
0109         return -EINVAL;
0110 
0111     channel->channel_id  = HIGH_WORD(edx);
0112     channel->cookie_high = si;
0113     channel->cookie_low  = di;
0114 
0115     return 0;
0116 }
0117 
0118 
0119 
0120 /**
0121  * vmw_close_channel
0122  *
0123  * @channel: RPC channel
0124  *
0125  * Returns: 0 on success
0126  */
0127 static int vmw_close_channel(struct rpc_channel *channel)
0128 {
0129     unsigned long eax, ebx, ecx, edx, si, di;
0130 
0131     /* Set up additional parameters */
0132     si  = channel->cookie_high;
0133     di  = channel->cookie_low;
0134 
0135     VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
0136         0, si, di,
0137         channel->channel_id << 16,
0138         VMW_HYPERVISOR_MAGIC,
0139         eax, ebx, ecx, edx, si, di);
0140 
0141     if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
0142         return -EINVAL;
0143 
0144     return 0;
0145 }
0146 
0147 /**
0148  * vmw_port_hb_out - Send the message payload either through the
0149  * high-bandwidth port if available, or through the backdoor otherwise.
0150  * @channel: The rpc channel.
0151  * @msg: NULL-terminated message.
0152  * @hb: Whether the high-bandwidth port is available.
0153  *
0154  * Return: The port status.
0155  */
0156 static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
0157                      const char *msg, bool hb)
0158 {
0159     unsigned long si, di, eax, ebx, ecx, edx;
0160     unsigned long msg_len = strlen(msg);
0161 
0162     /* HB port can't access encrypted memory. */
0163     if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
0164         unsigned long bp = channel->cookie_high;
0165         u32 channel_id = (channel->channel_id << 16);
0166 
0167         si = (uintptr_t) msg;
0168         di = channel->cookie_low;
0169 
0170         VMW_PORT_HB_OUT(
0171             (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
0172             msg_len, si, di,
0173             VMWARE_HYPERVISOR_HB | channel_id |
0174             VMWARE_HYPERVISOR_OUT,
0175             VMW_HYPERVISOR_MAGIC, bp,
0176             eax, ebx, ecx, edx, si, di);
0177 
0178         return ebx;
0179     }
0180 
0181     /* HB port not available. Send the message 4 bytes at a time. */
0182     ecx = MESSAGE_STATUS_SUCCESS << 16;
0183     while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
0184         unsigned int bytes = min_t(size_t, msg_len, 4);
0185         unsigned long word = 0;
0186 
0187         memcpy(&word, msg, bytes);
0188         msg_len -= bytes;
0189         msg += bytes;
0190         si = channel->cookie_high;
0191         di = channel->cookie_low;
0192 
0193         VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
0194              word, si, di,
0195              channel->channel_id << 16,
0196              VMW_HYPERVISOR_MAGIC,
0197              eax, ebx, ecx, edx, si, di);
0198     }
0199 
0200     return ecx;
0201 }
0202 
0203 /**
0204  * vmw_port_hb_in - Receive the message payload either through the
0205  * high-bandwidth port if available, or through the backdoor otherwise.
0206  * @channel: The rpc channel.
0207  * @reply: Pointer to buffer holding reply.
0208  * @reply_len: Length of the reply.
0209  * @hb: Whether the high-bandwidth port is available.
0210  *
0211  * Return: The port status.
0212  */
0213 static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
0214                     unsigned long reply_len, bool hb)
0215 {
0216     unsigned long si, di, eax, ebx, ecx, edx;
0217 
0218     /* HB port can't access encrypted memory */
0219     if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
0220         unsigned long bp = channel->cookie_low;
0221         u32 channel_id = (channel->channel_id << 16);
0222 
0223         si = channel->cookie_high;
0224         di = (uintptr_t) reply;
0225 
0226         VMW_PORT_HB_IN(
0227             (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
0228             reply_len, si, di,
0229             VMWARE_HYPERVISOR_HB | channel_id,
0230             VMW_HYPERVISOR_MAGIC, bp,
0231             eax, ebx, ecx, edx, si, di);
0232 
0233         return ebx;
0234     }
0235 
0236     /* HB port not available. Retrieve the message 4 bytes at a time. */
0237     ecx = MESSAGE_STATUS_SUCCESS << 16;
0238     while (reply_len) {
0239         unsigned int bytes = min_t(unsigned long, reply_len, 4);
0240 
0241         si = channel->cookie_high;
0242         di = channel->cookie_low;
0243 
0244         VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
0245              MESSAGE_STATUS_SUCCESS, si, di,
0246              channel->channel_id << 16,
0247              VMW_HYPERVISOR_MAGIC,
0248              eax, ebx, ecx, edx, si, di);
0249 
0250         if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
0251             break;
0252 
0253         memcpy(reply, &ebx, bytes);
0254         reply_len -= bytes;
0255         reply += bytes;
0256     }
0257 
0258     return ecx;
0259 }
0260 
0261 
0262 /**
0263  * vmw_send_msg: Sends a message to the host
0264  *
0265  * @channel: RPC channel
0266  * @msg: NULL terminated string
0267  *
0268  * Returns: 0 on success
0269  */
0270 static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
0271 {
0272     unsigned long eax, ebx, ecx, edx, si, di;
0273     size_t msg_len = strlen(msg);
0274     int retries = 0;
0275 
0276     while (retries < RETRIES) {
0277         retries++;
0278 
0279         /* Set up additional parameters */
0280         si  = channel->cookie_high;
0281         di  = channel->cookie_low;
0282 
0283         VMW_PORT(VMW_PORT_CMD_SENDSIZE,
0284             msg_len, si, di,
0285             channel->channel_id << 16,
0286             VMW_HYPERVISOR_MAGIC,
0287             eax, ebx, ecx, edx, si, di);
0288 
0289         if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
0290             /* Expected success. Give up. */
0291             return -EINVAL;
0292         }
0293 
0294         /* Send msg */
0295         ebx = vmw_port_hb_out(channel, msg,
0296                       !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
0297 
0298         if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
0299             return 0;
0300         } else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
0301             /* A checkpoint occurred. Retry. */
0302             continue;
0303         } else {
0304             break;
0305         }
0306     }
0307 
0308     return -EINVAL;
0309 }
0310 STACK_FRAME_NON_STANDARD(vmw_send_msg);
0311 
0312 
0313 /**
0314  * vmw_recv_msg: Receives a message from the host
0315  *
0316  * Note:  It is the caller's responsibility to call kfree() on msg.
0317  *
0318  * @channel:  channel opened by vmw_open_channel
0319  * @msg:  [OUT] message received from the host
0320  * @msg_len: message length
0321  */
0322 static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
0323             size_t *msg_len)
0324 {
0325     unsigned long eax, ebx, ecx, edx, si, di;
0326     char *reply;
0327     size_t reply_len;
0328     int retries = 0;
0329 
0330 
0331     *msg_len = 0;
0332     *msg = NULL;
0333 
0334     while (retries < RETRIES) {
0335         retries++;
0336 
0337         /* Set up additional parameters */
0338         si  = channel->cookie_high;
0339         di  = channel->cookie_low;
0340 
0341         VMW_PORT(VMW_PORT_CMD_RECVSIZE,
0342             0, si, di,
0343             channel->channel_id << 16,
0344             VMW_HYPERVISOR_MAGIC,
0345             eax, ebx, ecx, edx, si, di);
0346 
0347         if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
0348             DRM_ERROR("Failed to get reply size for host message.\n");
0349             return -EINVAL;
0350         }
0351 
0352         /* No reply available.  This is okay. */
0353         if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
0354             return 0;
0355 
0356         reply_len = ebx;
0357         reply     = kzalloc(reply_len + 1, GFP_KERNEL);
0358         if (!reply) {
0359             DRM_ERROR("Cannot allocate memory for host message reply.\n");
0360             return -ENOMEM;
0361         }
0362 
0363 
0364         /* Receive buffer */
0365         ebx = vmw_port_hb_in(channel, reply, reply_len,
0366                      !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
0367         if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
0368             kfree(reply);
0369             reply = NULL;
0370             if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
0371                 /* A checkpoint occurred. Retry. */
0372                 continue;
0373             }
0374 
0375             return -EINVAL;
0376         }
0377 
0378         reply[reply_len] = '\0';
0379 
0380 
0381         /* Ack buffer */
0382         si  = channel->cookie_high;
0383         di  = channel->cookie_low;
0384 
0385         VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
0386             MESSAGE_STATUS_SUCCESS, si, di,
0387             channel->channel_id << 16,
0388             VMW_HYPERVISOR_MAGIC,
0389             eax, ebx, ecx, edx, si, di);
0390 
0391         if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
0392             kfree(reply);
0393             reply = NULL;
0394             if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
0395                 /* A checkpoint occurred. Retry. */
0396                 continue;
0397             }
0398 
0399             return -EINVAL;
0400         }
0401 
0402         break;
0403     }
0404 
0405     if (!reply)
0406         return -EINVAL;
0407 
0408     *msg_len = reply_len;
0409     *msg     = reply;
0410 
0411     return 0;
0412 }
0413 STACK_FRAME_NON_STANDARD(vmw_recv_msg);
0414 
0415 
0416 /**
0417  * vmw_host_get_guestinfo: Gets a GuestInfo parameter
0418  *
0419  * Gets the value of a  GuestInfo.* parameter.  The value returned will be in
0420  * a string, and it is up to the caller to post-process.
0421  *
0422  * @guest_info_param:  Parameter to get, e.g. GuestInfo.svga.gl3
0423  * @buffer: if NULL, *reply_len will contain reply size.
0424  * @length: size of the reply_buf.  Set to size of reply upon return
0425  *
0426  * Returns: 0 on success
0427  */
0428 int vmw_host_get_guestinfo(const char *guest_info_param,
0429                char *buffer, size_t *length)
0430 {
0431     struct rpc_channel channel;
0432     char *msg, *reply = NULL;
0433     size_t reply_len = 0;
0434 
0435     if (!vmw_msg_enabled)
0436         return -ENODEV;
0437 
0438     if (!guest_info_param || !length)
0439         return -EINVAL;
0440 
0441     msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
0442     if (!msg) {
0443         DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
0444               guest_info_param);
0445         return -ENOMEM;
0446     }
0447 
0448     if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
0449         goto out_open;
0450 
0451     if (vmw_send_msg(&channel, msg) ||
0452         vmw_recv_msg(&channel, (void *) &reply, &reply_len))
0453         goto out_msg;
0454 
0455     vmw_close_channel(&channel);
0456     if (buffer && reply && reply_len > 0) {
0457         /* Remove reply code, which are the first 2 characters of
0458          * the reply
0459          */
0460         reply_len = max(reply_len - 2, (size_t) 0);
0461         reply_len = min(reply_len, *length);
0462 
0463         if (reply_len > 0)
0464             memcpy(buffer, reply + 2, reply_len);
0465     }
0466 
0467     *length = reply_len;
0468 
0469     kfree(reply);
0470     kfree(msg);
0471 
0472     return 0;
0473 
0474 out_msg:
0475     vmw_close_channel(&channel);
0476     kfree(reply);
0477 out_open:
0478     *length = 0;
0479     kfree(msg);
0480     DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
0481 
0482     return -EINVAL;
0483 }
0484 
0485 
0486 /**
0487  * vmw_host_printf: Sends a log message to the host
0488  *
0489  * @fmt: Regular printf format string and arguments
0490  *
0491  * Returns: 0 on success
0492  */
0493 __printf(1, 2)
0494 int vmw_host_printf(const char *fmt, ...)
0495 {
0496     va_list ap;
0497     struct rpc_channel channel;
0498     char *msg;
0499     char *log;
0500     int ret = 0;
0501 
0502     if (!vmw_msg_enabled)
0503         return -ENODEV;
0504 
0505     if (!fmt)
0506         return ret;
0507 
0508     va_start(ap, fmt);
0509     log = kvasprintf(GFP_KERNEL, fmt, ap);
0510     va_end(ap);
0511     if (!log) {
0512         DRM_ERROR("Cannot allocate memory for the log message.\n");
0513         return -ENOMEM;
0514     }
0515 
0516     msg = kasprintf(GFP_KERNEL, "log %s", log);
0517     if (!msg) {
0518         DRM_ERROR("Cannot allocate memory for host log message.\n");
0519         kfree(log);
0520         return -ENOMEM;
0521     }
0522 
0523     if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
0524         goto out_open;
0525 
0526     if (vmw_send_msg(&channel, msg))
0527         goto out_msg;
0528 
0529     vmw_close_channel(&channel);
0530     kfree(msg);
0531     kfree(log);
0532 
0533     return 0;
0534 
0535 out_msg:
0536     vmw_close_channel(&channel);
0537 out_open:
0538     kfree(msg);
0539     kfree(log);
0540     DRM_ERROR("Failed to send host log message.\n");
0541 
0542     return -EINVAL;
0543 }
0544 
0545 
0546 /**
0547  * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
0548  *
0549  * Sends a message from user-space to host.
0550  * Can also receive a result from host and return that to user-space.
0551  *
0552  * @dev: Identifies the drm device.
0553  * @data: Pointer to the ioctl argument.
0554  * @file_priv: Identifies the caller.
0555  * Return: Zero on success, negative error code on error.
0556  */
0557 
0558 int vmw_msg_ioctl(struct drm_device *dev, void *data,
0559           struct drm_file *file_priv)
0560 {
0561     struct drm_vmw_msg_arg *arg =
0562             (struct drm_vmw_msg_arg *)data;
0563     struct rpc_channel channel;
0564     char *msg;
0565     int length;
0566 
0567     msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
0568     if (!msg) {
0569         DRM_ERROR("Cannot allocate memory for log message.\n");
0570         return -ENOMEM;
0571     }
0572 
0573     length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
0574                    MAX_USER_MSG_LENGTH);
0575     if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
0576         DRM_ERROR("Userspace message access failure.\n");
0577         kfree(msg);
0578         return -EINVAL;
0579     }
0580 
0581 
0582     if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
0583         DRM_ERROR("Failed to open channel.\n");
0584         goto out_open;
0585     }
0586 
0587     if (vmw_send_msg(&channel, msg)) {
0588         DRM_ERROR("Failed to send message to host.\n");
0589         goto out_msg;
0590     }
0591 
0592     if (!arg->send_only) {
0593         char *reply = NULL;
0594         size_t reply_len = 0;
0595 
0596         if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
0597             DRM_ERROR("Failed to receive message from host.\n");
0598             goto out_msg;
0599         }
0600         if (reply && reply_len > 0) {
0601             if (copy_to_user((void __user *)((unsigned long)arg->receive),
0602                      reply, reply_len)) {
0603                 DRM_ERROR("Failed to copy message to userspace.\n");
0604                 kfree(reply);
0605                 goto out_msg;
0606             }
0607             arg->receive_len = (__u32)reply_len;
0608         }
0609         kfree(reply);
0610     }
0611 
0612     vmw_close_channel(&channel);
0613     kfree(msg);
0614 
0615     return 0;
0616 
0617 out_msg:
0618     vmw_close_channel(&channel);
0619 out_open:
0620     kfree(msg);
0621 
0622     return -EINVAL;
0623 }
0624 
0625 /**
0626  * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content
0627  *
0628  * @arr: Array to reset.
0629  * @size: Array length.
0630  */
0631 static inline void reset_ppn_array(PPN64 *arr, size_t size)
0632 {
0633     size_t i;
0634 
0635     BUG_ON(!arr || size == 0);
0636 
0637     for (i = 0; i < size; ++i)
0638         arr[i] = INVALID_PPN64;
0639 }
0640 
0641 /**
0642  * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from
0643  * the hypervisor. All related pages should be subsequently unpinned or freed.
0644  *
0645  */
0646 static inline void hypervisor_ppn_reset_all(void)
0647 {
0648     unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
0649 
0650     VMW_PORT(VMW_PORT_CMD_MKSGS_RESET,
0651         0, si, di,
0652         0,
0653         VMW_HYPERVISOR_MAGIC,
0654         eax, ebx, ecx, edx, si, di);
0655 }
0656 
0657 /**
0658  * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
0659  * hypervisor. Any related userspace pages should be pinned in advance.
0660  *
0661  * @pfn: Physical page number of the instance descriptor
0662  */
0663 static inline void hypervisor_ppn_add(PPN64 pfn)
0664 {
0665     unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
0666 
0667     VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN,
0668         (unsigned long)pfn, si, di,
0669         0,
0670         VMW_HYPERVISOR_MAGIC,
0671         eax, ebx, ecx, edx, si, di);
0672 }
0673 
0674 /**
0675  * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
0676  * the hypervisor. All related pages should be subsequently unpinned or freed.
0677  *
0678  * @pfn: Physical page number of the instance descriptor
0679  */
0680 static inline void hypervisor_ppn_remove(PPN64 pfn)
0681 {
0682     unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
0683 
0684     VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN,
0685         (unsigned long)pfn, si, di,
0686         0,
0687         VMW_HYPERVISOR_MAGIC,
0688         eax, ebx, ecx, edx, si, di);
0689 }
0690 
0691 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
0692 
0693 /* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
0694 #define MKSSTAT_KERNEL_PAGES_ORDER 2
0695 /* Header to the text description of mksGuestStat instance descriptor */
0696 #define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
0697 
0698 /* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
0699 static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
0700 {
0701     { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
0702 };
0703 
0704 /**
0705  * mksstat_init_record: Initializes an MKSGuestStatCounter-based record
0706  * for the respective mksGuestStat index.
0707  *
0708  * @stat_idx: Index of the MKSGuestStatCounter-based mksGuestStat record.
0709  * @pstat: Pointer to array of MKSGuestStatCounterTime.
0710  * @pinfo: Pointer to array of MKSGuestStatInfoEntry.
0711  * @pstrs: Pointer to current end of the name/description sequence.
0712  * Return: Pointer to the new end of the names/description sequence.
0713  */
0714 
0715 static inline char *mksstat_init_record(mksstat_kern_stats_t stat_idx,
0716     MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
0717 {
0718     char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
0719     strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
0720     strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
0721 
0722     pinfo[stat_idx].name.s = pstrs;
0723     pinfo[stat_idx].description.s = pstrd;
0724     pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_NONE;
0725     pinfo[stat_idx].stat.counter = (MKSGuestStatCounter *)&pstat[stat_idx];
0726 
0727     return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
0728 }
0729 
0730 /**
0731  * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
0732  * for the respective mksGuestStat index.
0733  *
0734  * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
0735  * @pstat: Pointer to array of MKSGuestStatCounterTime.
0736  * @pinfo: Pointer to array of MKSGuestStatInfoEntry.
0737  * @pstrs: Pointer to current end of the name/description sequence.
0738  * Return: Pointer to the new end of the names/description sequence.
0739  */
0740 
0741 static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
0742     MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
0743 {
0744     char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
0745     strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
0746     strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
0747 
0748     pinfo[stat_idx].name.s = pstrs;
0749     pinfo[stat_idx].description.s = pstrd;
0750     pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
0751     pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
0752 
0753     return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
0754 }
0755 
0756 /**
0757  * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
0758  * kernel-internal counters. Adds PFN mapping to the hypervisor.
0759  *
0760  * Create a single mksGuestStat instance descriptor and corresponding structures
0761  * for all kernel-internal counters. The corresponding PFNs are mapped with the
0762  * hypervisor.
0763  *
0764  * @ppage: Output pointer to page containing the instance descriptor.
0765  * Return: Zero on success, negative error code on error.
0766  */
0767 
0768 static int mksstat_init_kern_id(struct page **ppage)
0769 {
0770     MKSGuestStatInstanceDescriptor *pdesc;
0771     MKSGuestStatCounterTime *pstat;
0772     MKSGuestStatInfoEntry *pinfo;
0773     char *pstrs, *pstrs_acc;
0774 
0775     /* Allocate pages for the kernel-internal instance descriptor */
0776     struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
0777 
0778     if (!page)
0779         return -ENOMEM;
0780 
0781     pdesc = page_address(page);
0782     pstat = vmw_mksstat_get_kern_pstat(pdesc);
0783     pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
0784     pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
0785 
0786     /* Set up all kernel-internal counters and corresponding structures */
0787     pstrs_acc = pstrs;
0788     pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
0789 
0790     /* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
0791 
0792     BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
0793 
0794     /* Set up the kernel-internal instance descriptor */
0795     pdesc->reservedMBZ = 0;
0796     pdesc->statStartVA = (uintptr_t)pstat;
0797     pdesc->strsStartVA = (uintptr_t)pstrs;
0798     pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
0799     pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
0800     pdesc->strsLength = pstrs_acc - pstrs;
0801     snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
0802         MKSSTAT_KERNEL_DESCRIPTION, current->pid);
0803 
0804     pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
0805     reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
0806 
0807     pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
0808     reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
0809 
0810     pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
0811     reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
0812 
0813     *ppage = page;
0814 
0815     hypervisor_ppn_add((PPN64)page_to_pfn(page));
0816 
0817     return 0;
0818 }
0819 
0820 /**
0821  * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
0822  * mksGuestStat instance descriptor.
0823  *
0824  * Find a slot for a single kernel-internal mksGuestStat instance descriptor.
0825  * In case no such was already present, allocate a new one and set up a kernel-
0826  * internal mksGuestStat instance descriptor for the former.
0827  *
0828  * @pid: Process for which a slot is sought.
0829  * @dev_priv: Identifies the drm private device.
0830  * Return: Non-negative slot on success, negative error code on error.
0831  */
0832 
0833 int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
0834 {
0835     const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
0836     size_t i;
0837 
0838     for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
0839         const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
0840 
0841         /* Check if an instance descriptor for this pid is already present */
0842         if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
0843             return (int)slot;
0844 
0845         /* Set up a new instance descriptor for this pid */
0846         if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
0847             const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
0848 
0849             if (!ret) {
0850                 /* Reset top-timer tracking for this slot */
0851                 dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
0852 
0853                 atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
0854                 return (int)slot;
0855             }
0856 
0857             atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
0858             return ret;
0859         }
0860     }
0861 
0862     return -ENOSPC;
0863 }
0864 
0865 #endif
0866 
0867 /**
0868  * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
0869  * mksGuestStat instance-descriptor page and unpins all related user pages.
0870  *
0871  * Unpin all user pages realated to this instance descriptor and free
0872  * the instance-descriptor page itself.
0873  *
0874  * @page: Page of the instance descriptor.
0875  */
0876 
0877 static void vmw_mksstat_cleanup_descriptor(struct page *page)
0878 {
0879     MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
0880     size_t i;
0881 
0882     for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
0883         unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
0884 
0885     for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
0886         unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
0887 
0888     for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
0889         unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
0890 
0891     __free_page(page);
0892 }
0893 
0894 /**
0895  * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors
0896  * from the hypervisor.
0897  *
0898  * Discard all hypervisor PFN mappings, containing active mksGuestState instance
0899  * descriptors, unpin the related userspace pages and free the related kernel pages.
0900  *
0901  * @dev_priv: Identifies the drm private device.
0902  * Return: Zero on success, negative error code on error.
0903  */
0904 
0905 int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
0906 {
0907     int ret = 0;
0908     size_t i;
0909 
0910     /* Discard all PFN mappings with the hypervisor */
0911     hypervisor_ppn_reset_all();
0912 
0913     /* Discard all userspace-originating instance descriptors and unpin all related pages */
0914     for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
0915         const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
0916 
0917         if (!pid0)
0918             continue;
0919 
0920         if (pid0 != MKSSTAT_PID_RESERVED) {
0921             const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
0922 
0923             if (!pid1)
0924                 continue;
0925 
0926             if (pid1 == pid0) {
0927                 struct page *const page = dev_priv->mksstat_user_pages[i];
0928 
0929                 BUG_ON(!page);
0930 
0931                 dev_priv->mksstat_user_pages[i] = NULL;
0932                 atomic_set(&dev_priv->mksstat_user_pids[i], 0);
0933 
0934                 vmw_mksstat_cleanup_descriptor(page);
0935                 continue;
0936             }
0937         }
0938 
0939         ret = -EAGAIN;
0940     }
0941 
0942 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
0943     /* Discard all kernel-internal instance descriptors and free all related pages */
0944     for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
0945         const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
0946 
0947         if (!pid0)
0948             continue;
0949 
0950         if (pid0 != MKSSTAT_PID_RESERVED) {
0951             const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
0952 
0953             if (!pid1)
0954                 continue;
0955 
0956             if (pid1 == pid0) {
0957                 struct page *const page = dev_priv->mksstat_kern_pages[i];
0958 
0959                 BUG_ON(!page);
0960 
0961                 dev_priv->mksstat_kern_pages[i] = NULL;
0962                 atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
0963 
0964                 __free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
0965                 continue;
0966             }
0967         }
0968 
0969         ret = -EAGAIN;
0970     }
0971 
0972 #endif
0973     return ret;
0974 }
0975 
0976 /**
0977  * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors
0978  * from the hypervisor.
0979  *
0980  * Discard all hypervisor PFN mappings, containing active mksGuestStat instance
0981  * descriptors, unpin the related userspace pages and free the related kernel pages.
0982  *
0983  * @dev: Identifies the drm device.
0984  * @data: Pointer to the ioctl argument.
0985  * @file_priv: Identifies the caller; unused.
0986  * Return: Zero on success, negative error code on error.
0987  */
0988 
0989 int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
0990                 struct drm_file *file_priv)
0991 {
0992     struct vmw_private *const dev_priv = vmw_priv(dev);
0993     return vmw_mksstat_remove_all(dev_priv);
0994 }
0995 
0996 /**
0997  * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
0998  * instance descriptor and registers that with the hypervisor.
0999  *
1000  * Create a hypervisor PFN mapping, containing a single mksGuestStat instance
1001  * descriptor and pin the corresponding userspace pages.
1002  *
1003  * @dev: Identifies the drm device.
1004  * @data: Pointer to the ioctl argument.
1005  * @file_priv: Identifies the caller; unused.
1006  * Return: Zero on success, negative error code on error.
1007  */
1008 
1009 int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
1010                 struct drm_file *file_priv)
1011 {
1012     struct drm_vmw_mksstat_add_arg *arg =
1013         (struct drm_vmw_mksstat_add_arg *) data;
1014 
1015     struct vmw_private *const dev_priv = vmw_priv(dev);
1016 
1017     struct page *page;
1018     MKSGuestStatInstanceDescriptor *pdesc;
1019     const size_t num_pages_stat = PFN_UP(arg->stat_len);
1020     const size_t num_pages_info = PFN_UP(arg->info_len);
1021     const size_t num_pages_strs = PFN_UP(arg->strs_len);
1022     long desc_len;
1023     long nr_pinned_stat;
1024     long nr_pinned_info;
1025     long nr_pinned_strs;
1026     struct page *pages_stat[ARRAY_SIZE(pdesc->statPPNs)];
1027     struct page *pages_info[ARRAY_SIZE(pdesc->infoPPNs)];
1028     struct page *pages_strs[ARRAY_SIZE(pdesc->strsPPNs)];
1029     size_t i, slot;
1030 
1031     arg->id = -1;
1032 
1033     if (!arg->stat || !arg->info || !arg->strs)
1034         return -EINVAL;
1035 
1036     if (!arg->stat_len || !arg->info_len || !arg->strs_len)
1037         return -EINVAL;
1038 
1039     if (!arg->description)
1040         return -EINVAL;
1041 
1042     if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
1043         num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
1044         num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
1045         return -EINVAL;
1046 
1047     /* Find an available slot in the mksGuestStats user array and reserve it */
1048     for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
1049         if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
1050             break;
1051 
1052     if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
1053         return -ENOSPC;
1054 
1055     BUG_ON(dev_priv->mksstat_user_pages[slot]);
1056 
1057     /* Allocate a page for the instance descriptor */
1058     page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1059 
1060     if (!page) {
1061         atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1062         return -ENOMEM;
1063     }
1064 
1065     /* Set up the instance descriptor */
1066     pdesc = page_address(page);
1067 
1068     pdesc->reservedMBZ = 0;
1069     pdesc->statStartVA = arg->stat;
1070     pdesc->strsStartVA = arg->strs;
1071     pdesc->statLength = arg->stat_len;
1072     pdesc->infoLength = arg->info_len;
1073     pdesc->strsLength = arg->strs_len;
1074     desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
1075         ARRAY_SIZE(pdesc->description) - 1);
1076 
1077     if (desc_len < 0) {
1078         atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1079         return -EFAULT;
1080     }
1081 
1082     reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
1083     reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
1084     reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
1085 
1086     /* Pin mksGuestStat user pages and store those in the instance descriptor */
1087     nr_pinned_stat = pin_user_pages(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat, NULL);
1088     if (num_pages_stat != nr_pinned_stat)
1089         goto err_pin_stat;
1090 
1091     for (i = 0; i < num_pages_stat; ++i)
1092         pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
1093 
1094     nr_pinned_info = pin_user_pages(arg->info, num_pages_info, FOLL_LONGTERM, pages_info, NULL);
1095     if (num_pages_info != nr_pinned_info)
1096         goto err_pin_info;
1097 
1098     for (i = 0; i < num_pages_info; ++i)
1099         pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
1100 
1101     nr_pinned_strs = pin_user_pages(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs, NULL);
1102     if (num_pages_strs != nr_pinned_strs)
1103         goto err_pin_strs;
1104 
1105     for (i = 0; i < num_pages_strs; ++i)
1106         pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
1107 
1108     /* Send the descriptor to the host via a hypervisor call. The mksGuestStat
1109        pages will remain in use until the user requests a matching remove stats
1110        or a stats reset occurs. */
1111     hypervisor_ppn_add((PPN64)page_to_pfn(page));
1112 
1113     dev_priv->mksstat_user_pages[slot] = page;
1114     atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current));
1115 
1116     arg->id = slot;
1117 
1118     DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
1119 
1120     return 0;
1121 
1122 err_pin_strs:
1123     if (nr_pinned_strs > 0)
1124         unpin_user_pages(pages_strs, nr_pinned_strs);
1125 
1126 err_pin_info:
1127     if (nr_pinned_info > 0)
1128         unpin_user_pages(pages_info, nr_pinned_info);
1129 
1130 err_pin_stat:
1131     if (nr_pinned_stat > 0)
1132         unpin_user_pages(pages_stat, nr_pinned_stat);
1133 
1134     atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1135     __free_page(page);
1136     return -ENOMEM;
1137 }
1138 
1139 /**
1140  * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
1141  * instance descriptor from the hypervisor.
1142  *
1143  * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
1144  * descriptor and unpin the corresponding userspace pages.
1145  *
1146  * @dev: Identifies the drm device.
1147  * @data: Pointer to the ioctl argument.
1148  * @file_priv: Identifies the caller; unused.
1149  * Return: Zero on success, negative error code on error.
1150  */
1151 
1152 int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
1153                 struct drm_file *file_priv)
1154 {
1155     struct drm_vmw_mksstat_remove_arg *arg =
1156         (struct drm_vmw_mksstat_remove_arg *) data;
1157 
1158     struct vmw_private *const dev_priv = vmw_priv(dev);
1159 
1160     const size_t slot = arg->id;
1161     pid_t pgid, pid;
1162 
1163     if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
1164         return -EINVAL;
1165 
1166     DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot);
1167 
1168     pgid = task_pgrp_vnr(current);
1169     pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED);
1170 
1171     if (!pid)
1172         return 0;
1173 
1174     if (pid == pgid) {
1175         struct page *const page = dev_priv->mksstat_user_pages[slot];
1176 
1177         BUG_ON(!page);
1178 
1179         dev_priv->mksstat_user_pages[slot] = NULL;
1180         atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1181 
1182         hypervisor_ppn_remove((PPN64)page_to_pfn(page));
1183 
1184         vmw_mksstat_cleanup_descriptor(page);
1185         return 0;
1186     }
1187 
1188     return -EAGAIN;
1189 }