0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/percpu.h>
0024 #include <linux/hardirq.h>
0025 #include <linux/debugfs.h>
0026
0027 #include <asm/xen/hypercall.h>
0028
0029 #include "multicalls.h"
0030 #include "debugfs.h"
0031
0032 #define MC_BATCH 32
0033
0034 #define MC_DEBUG 0
0035
0036 #define MC_ARGS (MC_BATCH * 16)
0037
0038
0039 struct mc_buffer {
0040 unsigned mcidx, argidx, cbidx;
0041 struct multicall_entry entries[MC_BATCH];
0042 #if MC_DEBUG
0043 struct multicall_entry debug[MC_BATCH];
0044 void *caller[MC_BATCH];
0045 #endif
0046 unsigned char args[MC_ARGS];
0047 struct callback {
0048 void (*fn)(void *);
0049 void *data;
0050 } callbacks[MC_BATCH];
0051 };
0052
0053 static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
0054 DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
0055
0056 void xen_mc_flush(void)
0057 {
0058 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
0059 struct multicall_entry *mc;
0060 int ret = 0;
0061 unsigned long flags;
0062 int i;
0063
0064 BUG_ON(preemptible());
0065
0066
0067
0068 local_irq_save(flags);
0069
0070 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
0071
0072 #if MC_DEBUG
0073 memcpy(b->debug, b->entries,
0074 b->mcidx * sizeof(struct multicall_entry));
0075 #endif
0076
0077 switch (b->mcidx) {
0078 case 0:
0079
0080 BUG_ON(b->argidx != 0);
0081 break;
0082
0083 case 1:
0084
0085
0086 mc = &b->entries[0];
0087
0088 mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1],
0089 mc->args[2], mc->args[3],
0090 mc->args[4]);
0091 ret = mc->result < 0;
0092 break;
0093
0094 default:
0095 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
0096 BUG();
0097 for (i = 0; i < b->mcidx; i++)
0098 if (b->entries[i].result < 0)
0099 ret++;
0100 }
0101
0102 if (WARN_ON(ret)) {
0103 pr_err("%d of %d multicall(s) failed: cpu %d\n",
0104 ret, b->mcidx, smp_processor_id());
0105 for (i = 0; i < b->mcidx; i++) {
0106 if (b->entries[i].result < 0) {
0107 #if MC_DEBUG
0108 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n",
0109 i + 1,
0110 b->debug[i].op,
0111 b->debug[i].args[0],
0112 b->entries[i].result,
0113 b->caller[i]);
0114 #else
0115 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
0116 i + 1,
0117 b->entries[i].op,
0118 b->entries[i].args[0],
0119 b->entries[i].result);
0120 #endif
0121 }
0122 }
0123 }
0124
0125 b->mcidx = 0;
0126 b->argidx = 0;
0127
0128 for (i = 0; i < b->cbidx; i++) {
0129 struct callback *cb = &b->callbacks[i];
0130
0131 (*cb->fn)(cb->data);
0132 }
0133 b->cbidx = 0;
0134
0135 local_irq_restore(flags);
0136 }
0137
0138 struct multicall_space __xen_mc_entry(size_t args)
0139 {
0140 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
0141 struct multicall_space ret;
0142 unsigned argidx = roundup(b->argidx, sizeof(u64));
0143
0144 trace_xen_mc_entry_alloc(args);
0145
0146 BUG_ON(preemptible());
0147 BUG_ON(b->argidx >= MC_ARGS);
0148
0149 if (unlikely(b->mcidx == MC_BATCH ||
0150 (argidx + args) >= MC_ARGS)) {
0151 trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
0152 XEN_MC_FL_BATCH : XEN_MC_FL_ARGS);
0153 xen_mc_flush();
0154 argidx = roundup(b->argidx, sizeof(u64));
0155 }
0156
0157 ret.mc = &b->entries[b->mcidx];
0158 #if MC_DEBUG
0159 b->caller[b->mcidx] = __builtin_return_address(0);
0160 #endif
0161 b->mcidx++;
0162 ret.args = &b->args[argidx];
0163 b->argidx = argidx + args;
0164
0165 BUG_ON(b->argidx >= MC_ARGS);
0166 return ret;
0167 }
0168
0169 struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
0170 {
0171 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
0172 struct multicall_space ret = { NULL, NULL };
0173
0174 BUG_ON(preemptible());
0175 BUG_ON(b->argidx >= MC_ARGS);
0176
0177 if (unlikely(b->mcidx == 0 ||
0178 b->entries[b->mcidx - 1].op != op)) {
0179 trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP);
0180 goto out;
0181 }
0182
0183 if (unlikely((b->argidx + size) >= MC_ARGS)) {
0184 trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE);
0185 goto out;
0186 }
0187
0188 ret.mc = &b->entries[b->mcidx - 1];
0189 ret.args = &b->args[b->argidx];
0190 b->argidx += size;
0191
0192 BUG_ON(b->argidx >= MC_ARGS);
0193
0194 trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK);
0195 out:
0196 return ret;
0197 }
0198
0199 void xen_mc_callback(void (*fn)(void *), void *data)
0200 {
0201 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
0202 struct callback *cb;
0203
0204 if (b->cbidx == MC_BATCH) {
0205 trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK);
0206 xen_mc_flush();
0207 }
0208
0209 trace_xen_mc_callback(fn, data);
0210
0211 cb = &b->callbacks[b->cbidx++];
0212 cb->fn = fn;
0213 cb->data = data;
0214 }