0001
0002
0003
0004
0005
0006
0007
0008
0009 #undef pr_fmt
0010 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
0011
0012 #include "rtrs-clt.h"
0013
0014 void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
0015 {
0016 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
0017 struct rtrs_clt_stats *stats = clt_path->stats;
0018 struct rtrs_clt_stats_pcpu *s;
0019 int cpu;
0020
0021 cpu = raw_smp_processor_id();
0022 s = get_cpu_ptr(stats->pcpu_stats);
0023 if (con->cpu != cpu) {
0024 s->cpu_migr.to++;
0025
0026
0027 s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
0028 atomic_inc(&s->cpu_migr.from);
0029 }
0030 put_cpu_ptr(stats->pcpu_stats);
0031 }
0032
0033 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
0034 {
0035 this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
0036 }
0037
0038 int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
0039 {
0040 struct rtrs_clt_stats_pcpu *s;
0041
0042 size_t used;
0043 int cpu;
0044
0045 used = 0;
0046 for_each_possible_cpu(cpu) {
0047 s = per_cpu_ptr(stats->pcpu_stats, cpu);
0048 used += sysfs_emit_at(buf, used, "%d ",
0049 atomic_read(&s->cpu_migr.from));
0050 }
0051
0052 used += sysfs_emit_at(buf, used, "\n");
0053
0054 return used;
0055 }
0056
0057 int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
0058 {
0059 struct rtrs_clt_stats_pcpu *s;
0060
0061 size_t used;
0062 int cpu;
0063
0064 used = 0;
0065 for_each_possible_cpu(cpu) {
0066 s = per_cpu_ptr(stats->pcpu_stats, cpu);
0067 used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
0068 }
0069
0070 used += sysfs_emit_at(buf, used, "\n");
0071
0072 return used;
0073 }
0074
0075 int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
0076 {
0077 return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
0078 stats->reconnects.fail_cnt);
0079 }
0080
0081 ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
0082 {
0083 struct rtrs_clt_stats_rdma sum;
0084 struct rtrs_clt_stats_rdma *r;
0085 int cpu;
0086
0087 memset(&sum, 0, sizeof(sum));
0088
0089 for_each_possible_cpu(cpu) {
0090 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
0091
0092 sum.dir[READ].cnt += r->dir[READ].cnt;
0093 sum.dir[READ].size_total += r->dir[READ].size_total;
0094 sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
0095 sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
0096 sum.failover_cnt += r->failover_cnt;
0097 }
0098
0099 return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
0100 sum.dir[READ].cnt, sum.dir[READ].size_total,
0101 sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
0102 atomic_read(&stats->inflight), sum.failover_cnt);
0103 }
0104
0105 ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
0106 {
0107 return sysfs_emit(page, "echo 1 to reset all statistics\n");
0108 }
0109
0110 int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
0111 {
0112 struct rtrs_clt_stats_pcpu *s;
0113 int cpu;
0114
0115 if (!enable)
0116 return -EINVAL;
0117
0118 for_each_possible_cpu(cpu) {
0119 s = per_cpu_ptr(stats->pcpu_stats, cpu);
0120 memset(&s->rdma, 0, sizeof(s->rdma));
0121 }
0122
0123 return 0;
0124 }
0125
0126 int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
0127 {
0128 struct rtrs_clt_stats_pcpu *s;
0129 int cpu;
0130
0131 if (!enable)
0132 return -EINVAL;
0133
0134 for_each_possible_cpu(cpu) {
0135 s = per_cpu_ptr(stats->pcpu_stats, cpu);
0136 memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
0137 }
0138
0139 return 0;
0140 }
0141
0142 int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
0143 {
0144 if (!enable)
0145 return -EINVAL;
0146
0147 memset(&stats->reconnects, 0, sizeof(stats->reconnects));
0148
0149 return 0;
0150 }
0151
0152 int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
0153 {
0154 if (enable) {
0155 rtrs_clt_reset_rdma_stats(s, enable);
0156 rtrs_clt_reset_cpu_migr_stats(s, enable);
0157 rtrs_clt_reset_reconnects_stat(s, enable);
0158 atomic_set(&s->inflight, 0);
0159 return 0;
0160 }
0161
0162 return -EINVAL;
0163 }
0164
0165 static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
0166 size_t size, int d)
0167 {
0168 this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
0169 this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
0170 }
0171
0172 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
0173 {
0174 struct rtrs_clt_con *con = req->con;
0175 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
0176 struct rtrs_clt_stats *stats = clt_path->stats;
0177 unsigned int len;
0178
0179 len = req->usr_len + req->data_len;
0180 rtrs_clt_update_rdma_stats(stats, len, dir);
0181 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
0182 atomic_inc(&stats->inflight);
0183 }
0184
0185 int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
0186 {
0187 stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
0188 if (!stats->pcpu_stats)
0189 return -ENOMEM;
0190
0191
0192
0193
0194
0195 stats->reconnects.successful_cnt = -1;
0196
0197 return 0;
0198 }