0001
0002
0003
0004 #include <linux/vbox_err.h>
0005 #include "vbox_drv.h"
0006 #include "vboxvideo_guest.h"
0007 #include "hgsmi_channels.h"
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
0023 {
0024 s32 diff = vbva->data_offset - vbva->free_offset;
0025
0026 return diff > 0 ? diff : vbva->data_len + diff;
0027 }
0028
0029 static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
0030 const void *p, u32 len, u32 offset)
0031 {
0032 struct vbva_buffer *vbva = vbva_ctx->vbva;
0033 u32 bytes_till_boundary = vbva->data_len - offset;
0034 u8 *dst = &vbva->data[offset];
0035 s32 diff = len - bytes_till_boundary;
0036
0037 if (diff <= 0) {
0038
0039 memcpy(dst, p, len);
0040 } else {
0041
0042 memcpy(dst, p, bytes_till_boundary);
0043 memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
0044 }
0045 }
0046
0047 static void vbva_buffer_flush(struct gen_pool *ctx)
0048 {
0049 struct vbva_flush *p;
0050
0051 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
0052 if (!p)
0053 return;
0054
0055 p->reserved = 0;
0056
0057 hgsmi_buffer_submit(ctx, p);
0058 hgsmi_buffer_free(ctx, p);
0059 }
0060
0061 bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
0062 const void *p, u32 len)
0063 {
0064 struct vbva_record *record;
0065 struct vbva_buffer *vbva;
0066 u32 available;
0067
0068 vbva = vbva_ctx->vbva;
0069 record = vbva_ctx->record;
0070
0071 if (!vbva || vbva_ctx->buffer_overflow ||
0072 !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
0073 return false;
0074
0075 available = vbva_buffer_available(vbva);
0076
0077 while (len > 0) {
0078 u32 chunk = len;
0079
0080 if (chunk >= available) {
0081 vbva_buffer_flush(ctx);
0082 available = vbva_buffer_available(vbva);
0083 }
0084
0085 if (chunk >= available) {
0086 if (WARN_ON(available <= vbva->partial_write_tresh)) {
0087 vbva_ctx->buffer_overflow = true;
0088 return false;
0089 }
0090 chunk = available - vbva->partial_write_tresh;
0091 }
0092
0093 vbva_buffer_place_data_at(vbva_ctx, p, chunk,
0094 vbva->free_offset);
0095
0096 vbva->free_offset = (vbva->free_offset + chunk) %
0097 vbva->data_len;
0098 record->len_and_flags += chunk;
0099 available -= chunk;
0100 len -= chunk;
0101 p += chunk;
0102 }
0103
0104 return true;
0105 }
0106
0107 static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
0108 struct gen_pool *ctx, s32 screen, bool enable)
0109 {
0110 struct vbva_enable_ex *p;
0111 bool ret;
0112
0113 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
0114 if (!p)
0115 return false;
0116
0117 p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
0118 p->base.offset = vbva_ctx->buffer_offset;
0119 p->base.result = VERR_NOT_SUPPORTED;
0120 if (screen >= 0) {
0121 p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
0122 p->screen_id = screen;
0123 }
0124
0125 hgsmi_buffer_submit(ctx, p);
0126
0127 if (enable)
0128 ret = p->base.result >= 0;
0129 else
0130 ret = true;
0131
0132 hgsmi_buffer_free(ctx, p);
0133
0134 return ret;
0135 }
0136
0137 bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
0138 struct vbva_buffer *vbva, s32 screen)
0139 {
0140 bool ret = false;
0141
0142 memset(vbva, 0, sizeof(*vbva));
0143 vbva->partial_write_tresh = 256;
0144 vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
0145 vbva_ctx->vbva = vbva;
0146
0147 ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
0148 if (!ret)
0149 vbva_disable(vbva_ctx, ctx, screen);
0150
0151 return ret;
0152 }
0153
0154 void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
0155 s32 screen)
0156 {
0157 vbva_ctx->buffer_overflow = false;
0158 vbva_ctx->record = NULL;
0159 vbva_ctx->vbva = NULL;
0160
0161 vbva_inform_host(vbva_ctx, ctx, screen, false);
0162 }
0163
0164 bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
0165 struct gen_pool *ctx)
0166 {
0167 struct vbva_record *record;
0168 u32 next;
0169
0170 if (!vbva_ctx->vbva ||
0171 !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
0172 return false;
0173
0174 WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
0175
0176 next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
0177
0178
0179 if (next == vbva_ctx->vbva->record_first_index)
0180 vbva_buffer_flush(ctx);
0181
0182
0183 if (next == vbva_ctx->vbva->record_first_index)
0184 return false;
0185
0186 record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
0187 record->len_and_flags = VBVA_F_RECORD_PARTIAL;
0188 vbva_ctx->vbva->record_free_index = next;
0189
0190 vbva_ctx->record = record;
0191
0192 return true;
0193 }
0194
0195 void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
0196 {
0197 struct vbva_record *record = vbva_ctx->record;
0198
0199 WARN_ON(!vbva_ctx->vbva || !record ||
0200 !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
0201
0202
0203 record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
0204
0205 vbva_ctx->buffer_overflow = false;
0206 vbva_ctx->record = NULL;
0207 }
0208
0209 void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
0210 u32 buffer_offset, u32 buffer_length)
0211 {
0212 vbva_ctx->buffer_offset = buffer_offset;
0213 vbva_ctx->buffer_length = buffer_length;
0214 }