0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/init.h>
0012 #include <linux/mutex.h>
0013 #include <linux/percpu.h>
0014 #include <linux/ctype.h>
0015 #include <linux/smc.h>
0016 #include <net/genetlink.h>
0017 #include <net/sock.h>
0018 #include "smc_netlink.h"
0019 #include "smc_stats.h"
0020
0021 int smc_stats_init(struct net *net)
0022 {
0023 net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
0024 if (!net->smc.fback_rsn)
0025 goto err_fback;
0026 net->smc.smc_stats = alloc_percpu(struct smc_stats);
0027 if (!net->smc.smc_stats)
0028 goto err_stats;
0029 mutex_init(&net->smc.mutex_fback_rsn);
0030 return 0;
0031
0032 err_stats:
0033 kfree(net->smc.fback_rsn);
0034 err_fback:
0035 return -ENOMEM;
0036 }
0037
0038 void smc_stats_exit(struct net *net)
0039 {
0040 kfree(net->smc.fback_rsn);
0041 if (net->smc.smc_stats)
0042 free_percpu(net->smc.smc_stats);
0043 }
0044
0045 static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
0046 struct smc_stats *stats, int tech,
0047 int type)
0048 {
0049 struct smc_stats_rmbcnt *stats_rmb_cnt;
0050 struct nlattr *attrs;
0051
0052 if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
0053 stats_rmb_cnt = &stats->smc[tech].rmb_tx;
0054 else
0055 stats_rmb_cnt = &stats->smc[tech].rmb_rx;
0056
0057 attrs = nla_nest_start(skb, type);
0058 if (!attrs)
0059 goto errout;
0060 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
0061 stats_rmb_cnt->reuse_cnt,
0062 SMC_NLA_STATS_RMB_PAD))
0063 goto errattr;
0064 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
0065 stats_rmb_cnt->buf_size_small_peer_cnt,
0066 SMC_NLA_STATS_RMB_PAD))
0067 goto errattr;
0068 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
0069 stats_rmb_cnt->buf_size_small_cnt,
0070 SMC_NLA_STATS_RMB_PAD))
0071 goto errattr;
0072 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
0073 stats_rmb_cnt->buf_full_peer_cnt,
0074 SMC_NLA_STATS_RMB_PAD))
0075 goto errattr;
0076 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
0077 stats_rmb_cnt->buf_full_cnt,
0078 SMC_NLA_STATS_RMB_PAD))
0079 goto errattr;
0080 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
0081 stats_rmb_cnt->alloc_cnt,
0082 SMC_NLA_STATS_RMB_PAD))
0083 goto errattr;
0084 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
0085 stats_rmb_cnt->dgrade_cnt,
0086 SMC_NLA_STATS_RMB_PAD))
0087 goto errattr;
0088
0089 nla_nest_end(skb, attrs);
0090 return 0;
0091
0092 errattr:
0093 nla_nest_cancel(skb, attrs);
0094 errout:
0095 return -EMSGSIZE;
0096 }
0097
0098 static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
0099 struct smc_stats *stats, int tech,
0100 int type)
0101 {
0102 struct smc_stats_memsize *stats_pload;
0103 struct nlattr *attrs;
0104
0105 if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
0106 stats_pload = &stats->smc[tech].tx_pd;
0107 else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
0108 stats_pload = &stats->smc[tech].rx_pd;
0109 else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
0110 stats_pload = &stats->smc[tech].tx_rmbsize;
0111 else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
0112 stats_pload = &stats->smc[tech].rx_rmbsize;
0113 else
0114 goto errout;
0115
0116 attrs = nla_nest_start(skb, type);
0117 if (!attrs)
0118 goto errout;
0119 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
0120 stats_pload->buf[SMC_BUF_8K],
0121 SMC_NLA_STATS_PLOAD_PAD))
0122 goto errattr;
0123 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
0124 stats_pload->buf[SMC_BUF_16K],
0125 SMC_NLA_STATS_PLOAD_PAD))
0126 goto errattr;
0127 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
0128 stats_pload->buf[SMC_BUF_32K],
0129 SMC_NLA_STATS_PLOAD_PAD))
0130 goto errattr;
0131 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
0132 stats_pload->buf[SMC_BUF_64K],
0133 SMC_NLA_STATS_PLOAD_PAD))
0134 goto errattr;
0135 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
0136 stats_pload->buf[SMC_BUF_128K],
0137 SMC_NLA_STATS_PLOAD_PAD))
0138 goto errattr;
0139 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
0140 stats_pload->buf[SMC_BUF_256K],
0141 SMC_NLA_STATS_PLOAD_PAD))
0142 goto errattr;
0143 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
0144 stats_pload->buf[SMC_BUF_512K],
0145 SMC_NLA_STATS_PLOAD_PAD))
0146 goto errattr;
0147 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
0148 stats_pload->buf[SMC_BUF_1024K],
0149 SMC_NLA_STATS_PLOAD_PAD))
0150 goto errattr;
0151 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
0152 stats_pload->buf[SMC_BUF_G_1024K],
0153 SMC_NLA_STATS_PLOAD_PAD))
0154 goto errattr;
0155
0156 nla_nest_end(skb, attrs);
0157 return 0;
0158
0159 errattr:
0160 nla_nest_cancel(skb, attrs);
0161 errout:
0162 return -EMSGSIZE;
0163 }
0164
0165 static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
0166 struct smc_stats *stats, int tech)
0167 {
0168 struct smc_stats_tech *smc_tech;
0169 struct nlattr *attrs;
0170
0171 smc_tech = &stats->smc[tech];
0172 if (tech == SMC_TYPE_D)
0173 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
0174 else
0175 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
0176
0177 if (!attrs)
0178 goto errout;
0179 if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
0180 SMC_NLA_STATS_T_TX_RMB_STATS))
0181 goto errattr;
0182 if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
0183 SMC_NLA_STATS_T_RX_RMB_STATS))
0184 goto errattr;
0185 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
0186 SMC_NLA_STATS_T_TXPLOAD_SIZE))
0187 goto errattr;
0188 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
0189 SMC_NLA_STATS_T_RXPLOAD_SIZE))
0190 goto errattr;
0191 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
0192 SMC_NLA_STATS_T_TX_RMB_SIZE))
0193 goto errattr;
0194 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
0195 SMC_NLA_STATS_T_RX_RMB_SIZE))
0196 goto errattr;
0197 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
0198 smc_tech->clnt_v1_succ_cnt,
0199 SMC_NLA_STATS_PAD))
0200 goto errattr;
0201 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
0202 smc_tech->clnt_v2_succ_cnt,
0203 SMC_NLA_STATS_PAD))
0204 goto errattr;
0205 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
0206 smc_tech->srv_v1_succ_cnt,
0207 SMC_NLA_STATS_PAD))
0208 goto errattr;
0209 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
0210 smc_tech->srv_v2_succ_cnt,
0211 SMC_NLA_STATS_PAD))
0212 goto errattr;
0213 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
0214 smc_tech->rx_bytes,
0215 SMC_NLA_STATS_PAD))
0216 goto errattr;
0217 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
0218 smc_tech->tx_bytes,
0219 SMC_NLA_STATS_PAD))
0220 goto errattr;
0221 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
0222 smc_tech->rx_cnt,
0223 SMC_NLA_STATS_PAD))
0224 goto errattr;
0225 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
0226 smc_tech->tx_cnt,
0227 SMC_NLA_STATS_PAD))
0228 goto errattr;
0229 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
0230 smc_tech->sendpage_cnt,
0231 SMC_NLA_STATS_PAD))
0232 goto errattr;
0233 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
0234 smc_tech->cork_cnt,
0235 SMC_NLA_STATS_PAD))
0236 goto errattr;
0237 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
0238 smc_tech->ndly_cnt,
0239 SMC_NLA_STATS_PAD))
0240 goto errattr;
0241 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
0242 smc_tech->splice_cnt,
0243 SMC_NLA_STATS_PAD))
0244 goto errattr;
0245 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
0246 smc_tech->urg_data_cnt,
0247 SMC_NLA_STATS_PAD))
0248 goto errattr;
0249
0250 nla_nest_end(skb, attrs);
0251 return 0;
0252
0253 errattr:
0254 nla_nest_cancel(skb, attrs);
0255 errout:
0256 return -EMSGSIZE;
0257 }
0258
0259 int smc_nl_get_stats(struct sk_buff *skb,
0260 struct netlink_callback *cb)
0261 {
0262 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
0263 struct net *net = sock_net(skb->sk);
0264 struct smc_stats *stats;
0265 struct nlattr *attrs;
0266 int cpu, i, size;
0267 void *nlh;
0268 u64 *src;
0269 u64 *sum;
0270
0271 if (cb_ctx->pos[0])
0272 goto errmsg;
0273 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
0274 &smc_gen_nl_family, NLM_F_MULTI,
0275 SMC_NETLINK_GET_STATS);
0276 if (!nlh)
0277 goto errmsg;
0278
0279 attrs = nla_nest_start(skb, SMC_GEN_STATS);
0280 if (!attrs)
0281 goto errnest;
0282 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
0283 if (!stats)
0284 goto erralloc;
0285 size = sizeof(*stats) / sizeof(u64);
0286 for_each_possible_cpu(cpu) {
0287 src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
0288 sum = (u64 *)stats;
0289 for (i = 0; i < size; i++)
0290 *(sum++) += *(src++);
0291 }
0292 if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
0293 goto errattr;
0294 if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
0295 goto errattr;
0296 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
0297 stats->clnt_hshake_err_cnt,
0298 SMC_NLA_STATS_PAD))
0299 goto errattr;
0300 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
0301 stats->srv_hshake_err_cnt,
0302 SMC_NLA_STATS_PAD))
0303 goto errattr;
0304
0305 nla_nest_end(skb, attrs);
0306 genlmsg_end(skb, nlh);
0307 cb_ctx->pos[0] = 1;
0308 kfree(stats);
0309 return skb->len;
0310
0311 errattr:
0312 kfree(stats);
0313 erralloc:
0314 nla_nest_cancel(skb, attrs);
0315 errnest:
0316 genlmsg_cancel(skb, nlh);
0317 errmsg:
0318 return skb->len;
0319 }
0320
0321 static int smc_nl_get_fback_details(struct sk_buff *skb,
0322 struct netlink_callback *cb, int pos,
0323 bool is_srv)
0324 {
0325 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
0326 struct net *net = sock_net(skb->sk);
0327 int cnt_reported = cb_ctx->pos[2];
0328 struct smc_stats_fback *trgt_arr;
0329 struct nlattr *attrs;
0330 int rc = 0;
0331 void *nlh;
0332
0333 if (is_srv)
0334 trgt_arr = &net->smc.fback_rsn->srv[0];
0335 else
0336 trgt_arr = &net->smc.fback_rsn->clnt[0];
0337 if (!trgt_arr[pos].fback_code)
0338 return -ENODATA;
0339 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
0340 &smc_gen_nl_family, NLM_F_MULTI,
0341 SMC_NETLINK_GET_FBACK_STATS);
0342 if (!nlh)
0343 goto errmsg;
0344 attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
0345 if (!attrs)
0346 goto errout;
0347 if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
0348 goto errattr;
0349 if (!cnt_reported) {
0350 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
0351 net->smc.fback_rsn->srv_fback_cnt,
0352 SMC_NLA_FBACK_STATS_PAD))
0353 goto errattr;
0354 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
0355 net->smc.fback_rsn->clnt_fback_cnt,
0356 SMC_NLA_FBACK_STATS_PAD))
0357 goto errattr;
0358 cnt_reported = 1;
0359 }
0360
0361 if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
0362 trgt_arr[pos].fback_code))
0363 goto errattr;
0364 if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
0365 trgt_arr[pos].count))
0366 goto errattr;
0367
0368 cb_ctx->pos[2] = cnt_reported;
0369 nla_nest_end(skb, attrs);
0370 genlmsg_end(skb, nlh);
0371 return rc;
0372
0373 errattr:
0374 nla_nest_cancel(skb, attrs);
0375 errout:
0376 genlmsg_cancel(skb, nlh);
0377 errmsg:
0378 return -EMSGSIZE;
0379 }
0380
0381 int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
0382 {
0383 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
0384 struct net *net = sock_net(skb->sk);
0385 int rc_srv = 0, rc_clnt = 0, k;
0386 int skip_serv = cb_ctx->pos[1];
0387 int snum = cb_ctx->pos[0];
0388 bool is_srv = true;
0389
0390 mutex_lock(&net->smc.mutex_fback_rsn);
0391 for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
0392 if (k < snum)
0393 continue;
0394 if (!skip_serv) {
0395 rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
0396 if (rc_srv && rc_srv != -ENODATA)
0397 break;
0398 } else {
0399 skip_serv = 0;
0400 }
0401 rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
0402 if (rc_clnt && rc_clnt != -ENODATA) {
0403 skip_serv = 1;
0404 break;
0405 }
0406 if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
0407 break;
0408 }
0409 mutex_unlock(&net->smc.mutex_fback_rsn);
0410 cb_ctx->pos[1] = skip_serv;
0411 cb_ctx->pos[0] = k;
0412 return skb->len;
0413 }