0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define KMSG_COMPONENT "zfcp"
0011 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0012
0013 #include <linux/module.h>
0014 #include <linux/ctype.h>
0015 #include <linux/slab.h>
0016 #include <asm/debug.h>
0017 #include "zfcp_dbf.h"
0018 #include "zfcp_ext.h"
0019 #include "zfcp_fc.h"
0020
0021 static u32 dbfsize = 4;
0022
0023 module_param(dbfsize, uint, 0400);
0024 MODULE_PARM_DESC(dbfsize,
0025 "number of pages for each debug feature area (default 4)");
0026
0027 static u32 dbflevel = 3;
0028
0029 module_param(dbflevel, uint, 0400);
0030 MODULE_PARM_DESC(dbflevel,
0031 "log level for each debug feature area "
0032 "(default 3, range 0..6)");
0033
0034 static inline unsigned int zfcp_dbf_plen(unsigned int offset)
0035 {
0036 return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
0037 }
0038
0039 static inline
0040 void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
0041 u64 req_id)
0042 {
0043 struct zfcp_dbf_pay *pl = &dbf->pay_buf;
0044 u16 offset = 0, rec_length;
0045
0046 spin_lock(&dbf->pay_lock);
0047 memset(pl, 0, sizeof(*pl));
0048 pl->fsf_req_id = req_id;
0049 memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
0050
0051 while (offset < length) {
0052 rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
0053 (u16) (length - offset));
0054 memcpy(pl->data, data + offset, rec_length);
0055 debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
0056
0057 offset += rec_length;
0058 pl->counter++;
0059 }
0060
0061 spin_unlock(&dbf->pay_lock);
0062 }
0063
0064
0065
0066
0067
0068
0069
0070 void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
0071 {
0072 struct zfcp_dbf *dbf = req->adapter->dbf;
0073 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
0074 struct fsf_qtcb_header *q_head = &req->qtcb->header;
0075 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
0076 unsigned long flags;
0077
0078 spin_lock_irqsave(&dbf->hba_lock, flags);
0079 memset(rec, 0, sizeof(*rec));
0080
0081 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0082 rec->id = ZFCP_DBF_HBA_RES;
0083 rec->fsf_req_id = req->req_id;
0084 rec->fsf_req_status = req->status;
0085 rec->fsf_cmd = q_head->fsf_command;
0086 rec->fsf_seq_no = q_pref->req_seq_no;
0087 rec->u.res.req_issued = req->issued;
0088 rec->u.res.prot_status = q_pref->prot_status;
0089 rec->u.res.fsf_status = q_head->fsf_status;
0090 rec->u.res.port_handle = q_head->port_handle;
0091 rec->u.res.lun_handle = q_head->lun_handle;
0092
0093 memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
0094 FSF_PROT_STATUS_QUAL_SIZE);
0095 memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
0096 FSF_STATUS_QUALIFIER_SIZE);
0097
0098 rec->pl_len = q_head->log_length;
0099 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
0100 rec->pl_len, "fsf_res", req->req_id);
0101
0102 debug_event(dbf->hba, level, rec, sizeof(*rec));
0103 spin_unlock_irqrestore(&dbf->hba_lock, flags);
0104 }
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
0116 u32 fc_security_old, u32 fc_security_new)
0117 {
0118 struct zfcp_dbf *dbf = req->adapter->dbf;
0119 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
0120 struct fsf_qtcb_header *q_head = &req->qtcb->header;
0121 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
0122 static int const level = 3;
0123 unsigned long flags;
0124
0125 if (unlikely(!debug_level_enabled(dbf->hba, level)))
0126 return;
0127
0128 spin_lock_irqsave(&dbf->hba_lock, flags);
0129 memset(rec, 0, sizeof(*rec));
0130
0131 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0132 rec->id = ZFCP_DBF_HBA_FCES;
0133 rec->fsf_req_id = req->req_id;
0134 rec->fsf_req_status = req->status;
0135 rec->fsf_cmd = q_head->fsf_command;
0136 rec->fsf_seq_no = q_pref->req_seq_no;
0137 rec->u.fces.req_issued = req->issued;
0138 rec->u.fces.fsf_status = q_head->fsf_status;
0139 rec->u.fces.port_handle = q_head->port_handle;
0140 rec->u.fces.wwpn = wwpn;
0141 rec->u.fces.fc_security_old = fc_security_old;
0142 rec->u.fces.fc_security_new = fc_security_new;
0143
0144 debug_event(dbf->hba, level, rec, sizeof(*rec));
0145 spin_unlock_irqrestore(&dbf->hba_lock, flags);
0146 }
0147
0148
0149
0150
0151
0152
0153 void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
0154 {
0155 struct zfcp_dbf *dbf = req->adapter->dbf;
0156 struct fsf_status_read_buffer *srb = req->data;
0157 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
0158 static int const level = 2;
0159 unsigned long flags;
0160
0161 if (unlikely(!debug_level_enabled(dbf->hba, level)))
0162 return;
0163
0164 spin_lock_irqsave(&dbf->hba_lock, flags);
0165 memset(rec, 0, sizeof(*rec));
0166
0167 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0168 rec->id = ZFCP_DBF_HBA_USS;
0169 rec->fsf_req_id = req->req_id;
0170 rec->fsf_req_status = req->status;
0171 rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
0172
0173 if (!srb)
0174 goto log;
0175
0176 rec->u.uss.status_type = srb->status_type;
0177 rec->u.uss.status_subtype = srb->status_subtype;
0178 rec->u.uss.d_id = ntoh24(srb->d_id);
0179 rec->u.uss.lun = srb->fcp_lun;
0180 memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
0181 sizeof(rec->u.uss.queue_designator));
0182
0183
0184 rec->pl_len = (!srb->length) ? 0 : srb->length -
0185 offsetof(struct fsf_status_read_buffer, payload);
0186
0187 if (rec->pl_len)
0188 zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
0189 "fsf_uss", req->req_id);
0190 log:
0191 debug_event(dbf->hba, level, rec, sizeof(*rec));
0192 spin_unlock_irqrestore(&dbf->hba_lock, flags);
0193 }
0194
0195
0196
0197
0198
0199
0200 void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
0201 {
0202 struct zfcp_dbf *dbf = req->adapter->dbf;
0203 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
0204 struct fsf_status_read_buffer *sr_buf = req->data;
0205 static int const level = 1;
0206 unsigned long flags;
0207
0208 if (unlikely(!debug_level_enabled(dbf->hba, level)))
0209 return;
0210
0211 spin_lock_irqsave(&dbf->hba_lock, flags);
0212 memset(rec, 0, sizeof(*rec));
0213
0214 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0215 rec->id = ZFCP_DBF_HBA_BIT;
0216 rec->fsf_req_id = req->req_id;
0217 rec->fsf_req_status = req->status;
0218 rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
0219 memcpy(&rec->u.be, &sr_buf->payload.bit_error,
0220 sizeof(struct fsf_bit_error_payload));
0221
0222 debug_event(dbf->hba, level, rec, sizeof(*rec));
0223 spin_unlock_irqrestore(&dbf->hba_lock, flags);
0224 }
0225
0226
0227
0228
0229
0230
0231
0232
0233 void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
0234 void **pl)
0235 {
0236 struct zfcp_dbf *dbf = adapter->dbf;
0237 struct zfcp_dbf_pay *payload = &dbf->pay_buf;
0238 unsigned long flags;
0239 static int const level = 1;
0240 u16 length;
0241
0242 if (unlikely(!debug_level_enabled(dbf->pay, level)))
0243 return;
0244
0245 if (!pl)
0246 return;
0247
0248 spin_lock_irqsave(&dbf->pay_lock, flags);
0249 memset(payload, 0, sizeof(*payload));
0250
0251 memcpy(payload->area, "def_err", 7);
0252 payload->fsf_req_id = req_id;
0253 payload->counter = 0;
0254 length = min((u16)sizeof(struct qdio_buffer),
0255 (u16)ZFCP_DBF_PAY_MAX_REC);
0256
0257 while (payload->counter < scount && (char *)pl[payload->counter]) {
0258 memcpy(payload->data, (char *)pl[payload->counter], length);
0259 debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
0260 payload->counter++;
0261 }
0262
0263 spin_unlock_irqrestore(&dbf->pay_lock, flags);
0264 }
0265
0266 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
0267 struct zfcp_adapter *adapter,
0268 struct zfcp_port *port,
0269 struct scsi_device *sdev)
0270 {
0271 rec->adapter_status = atomic_read(&adapter->status);
0272 if (port) {
0273 rec->port_status = atomic_read(&port->status);
0274 rec->wwpn = port->wwpn;
0275 rec->d_id = port->d_id;
0276 }
0277 if (sdev) {
0278 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
0279 rec->lun = zfcp_scsi_dev_lun(sdev);
0280 } else
0281 rec->lun = ZFCP_DBF_INVALID_LUN;
0282 }
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
0296 struct zfcp_port *port, struct scsi_device *sdev,
0297 u8 want, u8 need)
0298 {
0299 struct zfcp_dbf *dbf = adapter->dbf;
0300 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
0301 static int const level = 1;
0302 struct list_head *entry;
0303 unsigned long flags;
0304
0305 lockdep_assert_held(&adapter->erp_lock);
0306
0307 if (unlikely(!debug_level_enabled(dbf->rec, level)))
0308 return;
0309
0310 spin_lock_irqsave(&dbf->rec_lock, flags);
0311 memset(rec, 0, sizeof(*rec));
0312
0313 rec->id = ZFCP_DBF_REC_TRIG;
0314 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0315 zfcp_dbf_set_common(rec, adapter, port, sdev);
0316
0317 list_for_each(entry, &adapter->erp_ready_head)
0318 rec->u.trig.ready++;
0319
0320 list_for_each(entry, &adapter->erp_running_head)
0321 rec->u.trig.running++;
0322
0323 rec->u.trig.want = want;
0324 rec->u.trig.need = need;
0325
0326 debug_event(dbf->rec, level, rec, sizeof(*rec));
0327 spin_unlock_irqrestore(&dbf->rec_lock, flags);
0328 }
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
0342 struct zfcp_port *port, struct scsi_device *sdev,
0343 u8 want, u8 need)
0344 {
0345 unsigned long flags;
0346
0347 read_lock_irqsave(&adapter->erp_lock, flags);
0348 zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
0349 read_unlock_irqrestore(&adapter->erp_lock, flags);
0350 }
0351
0352
0353
0354
0355
0356
0357
0358 void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
0359 {
0360 struct zfcp_dbf *dbf = erp->adapter->dbf;
0361 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
0362 unsigned long flags;
0363
0364 if (!debug_level_enabled(dbf->rec, level))
0365 return;
0366
0367 spin_lock_irqsave(&dbf->rec_lock, flags);
0368 memset(rec, 0, sizeof(*rec));
0369
0370 rec->id = ZFCP_DBF_REC_RUN;
0371 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0372 zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
0373
0374 rec->u.run.fsf_req_id = erp->fsf_req_id;
0375 rec->u.run.rec_status = erp->status;
0376 rec->u.run.rec_step = erp->step;
0377 rec->u.run.rec_action = erp->type;
0378
0379 if (erp->sdev)
0380 rec->u.run.rec_count =
0381 atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
0382 else if (erp->port)
0383 rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
0384 else
0385 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
0386
0387 debug_event(dbf->rec, level, rec, sizeof(*rec));
0388 spin_unlock_irqrestore(&dbf->rec_lock, flags);
0389 }
0390
0391
0392
0393
0394
0395
0396 void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
0397 {
0398 zfcp_dbf_rec_run_lvl(1, tag, erp);
0399 }
0400
0401
0402
0403
0404
0405
0406
0407 void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
0408 u64 req_id)
0409 {
0410 struct zfcp_dbf *dbf = wka_port->adapter->dbf;
0411 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
0412 static int const level = 1;
0413 unsigned long flags;
0414
0415 if (unlikely(!debug_level_enabled(dbf->rec, level)))
0416 return;
0417
0418 spin_lock_irqsave(&dbf->rec_lock, flags);
0419 memset(rec, 0, sizeof(*rec));
0420
0421 rec->id = ZFCP_DBF_REC_RUN;
0422 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0423 rec->port_status = wka_port->status;
0424 rec->d_id = wka_port->d_id;
0425 rec->lun = ZFCP_DBF_INVALID_LUN;
0426
0427 rec->u.run.fsf_req_id = req_id;
0428 rec->u.run.rec_status = ~0;
0429 rec->u.run.rec_step = ~0;
0430 rec->u.run.rec_action = ~0;
0431 rec->u.run.rec_count = ~0;
0432
0433 debug_event(dbf->rec, level, rec, sizeof(*rec));
0434 spin_unlock_irqrestore(&dbf->rec_lock, flags);
0435 }
0436
0437 #define ZFCP_DBF_SAN_LEVEL 1
0438
0439 static inline
0440 void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
0441 char *paytag, struct scatterlist *sg, u8 id, u16 len,
0442 u64 req_id, u32 d_id, u16 cap_len)
0443 {
0444 struct zfcp_dbf_san *rec = &dbf->san_buf;
0445 u16 rec_len;
0446 unsigned long flags;
0447 struct zfcp_dbf_pay *payload = &dbf->pay_buf;
0448 u16 pay_sum = 0;
0449
0450 spin_lock_irqsave(&dbf->san_lock, flags);
0451 memset(rec, 0, sizeof(*rec));
0452
0453 rec->id = id;
0454 rec->fsf_req_id = req_id;
0455 rec->d_id = d_id;
0456 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0457 rec->pl_len = len;
0458 if (!sg)
0459 goto out;
0460 rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
0461 memcpy(rec->payload, sg_virt(sg), rec_len);
0462 if (len <= rec_len)
0463 goto out;
0464
0465
0466
0467
0468 spin_lock(&dbf->pay_lock);
0469 memset(payload, 0, sizeof(*payload));
0470 memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
0471 payload->fsf_req_id = req_id;
0472 payload->counter = 0;
0473 for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
0474 u16 pay_len, offset = 0;
0475
0476 while (offset < sg->length && pay_sum < cap_len) {
0477 pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
0478 (u16)(sg->length - offset));
0479
0480 memcpy(payload->data, sg_virt(sg) + offset, pay_len);
0481 debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
0482 zfcp_dbf_plen(pay_len));
0483 payload->counter++;
0484 offset += pay_len;
0485 pay_sum += pay_len;
0486 }
0487 }
0488 spin_unlock(&dbf->pay_lock);
0489
0490 out:
0491 debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
0492 spin_unlock_irqrestore(&dbf->san_lock, flags);
0493 }
0494
0495
0496
0497
0498
0499
0500
0501
0502 void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
0503 {
0504 struct zfcp_dbf *dbf = fsf->adapter->dbf;
0505 struct zfcp_fsf_ct_els *ct_els = fsf->data;
0506 u16 length;
0507
0508 if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
0509 return;
0510
0511 length = (u16)zfcp_qdio_real_bytes(ct_els->req);
0512 zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
0513 length, fsf->req_id, d_id, length);
0514 }
0515
0516 static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
0517 struct zfcp_fsf_req *fsf,
0518 u16 len)
0519 {
0520 struct zfcp_fsf_ct_els *ct_els = fsf->data;
0521 struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
0522 struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
0523 struct scatterlist *resp_entry = ct_els->resp;
0524 struct fc_ct_hdr *resph;
0525 struct fc_gpn_ft_resp *acc;
0526 int max_entries, x, last = 0;
0527
0528 if (!(memcmp(tag, "fsscth2", 7) == 0
0529 && ct_els->d_id == FC_FID_DIR_SERV
0530 && reqh->ct_rev == FC_CT_REV
0531 && reqh->ct_in_id[0] == 0
0532 && reqh->ct_in_id[1] == 0
0533 && reqh->ct_in_id[2] == 0
0534 && reqh->ct_fs_type == FC_FST_DIR
0535 && reqh->ct_fs_subtype == FC_NS_SUBTYPE
0536 && reqh->ct_options == 0
0537 && reqh->_ct_resvd1 == 0
0538 && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
0539
0540 && reqh->_ct_resvd2 == 0
0541 && reqh->ct_reason == 0
0542 && reqh->ct_explan == 0
0543 && reqh->ct_vendor == 0
0544 && reqn->fn_resvd == 0
0545 && reqn->fn_domain_id_scope == 0
0546 && reqn->fn_area_id_scope == 0
0547 && reqn->fn_fc4_type == FC_TYPE_FCP))
0548 return len;
0549
0550 acc = sg_virt(resp_entry);
0551
0552
0553 resph = (struct fc_ct_hdr *)acc;
0554 if ((ct_els->status) ||
0555 (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
0556 return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
0557
0558 max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
0559 sizeof(struct fc_gpn_ft_resp))
0560 + 1
0561 ;
0562
0563
0564
0565
0566 for (x = 1; x < max_entries && !last; x++) {
0567 if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
0568 acc++;
0569 else
0570 acc = sg_virt(++resp_entry);
0571
0572 last = acc->fp_flags & FC_NS_FID_LAST;
0573 }
0574 len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
0575 return len;
0576 }
0577
0578
0579
0580
0581
0582
0583 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
0584 {
0585 struct zfcp_dbf *dbf = fsf->adapter->dbf;
0586 struct zfcp_fsf_ct_els *ct_els = fsf->data;
0587 u16 length;
0588
0589 if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
0590 return;
0591
0592 length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
0593 zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
0594 length, fsf->req_id, ct_els->d_id,
0595 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
0596 }
0597
0598
0599
0600
0601
0602
0603 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
0604 {
0605 struct zfcp_dbf *dbf = fsf->adapter->dbf;
0606 struct fsf_status_read_buffer *srb =
0607 (struct fsf_status_read_buffer *) fsf->data;
0608 u16 length;
0609 struct scatterlist sg;
0610
0611 if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
0612 return;
0613
0614 length = (u16)(srb->length -
0615 offsetof(struct fsf_status_read_buffer, payload));
0616 sg_init_one(&sg, srb->payload.data, length);
0617 zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
0618 fsf->req_id, ntoh24(srb->d_id), length);
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629 void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
0630 struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
0631 {
0632 struct zfcp_adapter *adapter =
0633 (struct zfcp_adapter *) sdev->host->hostdata[0];
0634 struct zfcp_dbf *dbf = adapter->dbf;
0635 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
0636 struct fcp_resp_with_ext *fcp_rsp;
0637 struct fcp_resp_rsp_info *fcp_rsp_info;
0638 unsigned long flags;
0639
0640 spin_lock_irqsave(&dbf->scsi_lock, flags);
0641 memset(rec, 0, sizeof(*rec));
0642
0643 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0644 rec->id = ZFCP_DBF_SCSI_CMND;
0645 if (sc) {
0646 rec->scsi_result = sc->result;
0647 rec->scsi_retries = sc->retries;
0648 rec->scsi_allowed = sc->allowed;
0649 rec->scsi_id = sc->device->id;
0650 rec->scsi_lun = (u32)sc->device->lun;
0651 rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
0652 rec->host_scribble = (unsigned long)sc->host_scribble;
0653
0654 memcpy(rec->scsi_opcode, sc->cmnd,
0655 min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
0656 } else {
0657 rec->scsi_result = ~0;
0658 rec->scsi_retries = ~0;
0659 rec->scsi_allowed = ~0;
0660 rec->scsi_id = sdev->id;
0661 rec->scsi_lun = (u32)sdev->lun;
0662 rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
0663 rec->host_scribble = ~0;
0664
0665 memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
0666 }
0667
0668 if (fsf) {
0669 rec->fsf_req_id = fsf->req_id;
0670 rec->pl_len = FCP_RESP_WITH_EXT;
0671 fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
0672
0673 memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
0674 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
0675 fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
0676 rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
0677 rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
0678 }
0679 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
0680 rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
0681 }
0682
0683
0684
0685 if (fcp_rsp->resp.fr_flags != 0)
0686 zfcp_dbf_pl_write(
0687 dbf, fcp_rsp,
0688
0689
0690
0691 min_t(u16, max_t(u16, rec->pl_len,
0692 ZFCP_DBF_PAY_MAX_REC),
0693 FSF_FCP_RSP_SIZE),
0694 "fcp_riu", fsf->req_id);
0695 }
0696
0697 debug_event(dbf->scsi, level, rec, sizeof(*rec));
0698 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
0699 }
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711 void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
0712 unsigned int scsi_id, int ret)
0713 {
0714 struct zfcp_dbf *dbf = adapter->dbf;
0715 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
0716 unsigned long flags;
0717 static int const level = 1;
0718
0719 if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
0720 return;
0721
0722 spin_lock_irqsave(&dbf->scsi_lock, flags);
0723 memset(rec, 0, sizeof(*rec));
0724
0725 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
0726 rec->id = ZFCP_DBF_SCSI_CMND;
0727 rec->scsi_result = ret;
0728 rec->scsi_retries = ~0;
0729 rec->scsi_allowed = ~0;
0730 rec->fcp_rsp_info = ~0;
0731 rec->scsi_id = scsi_id;
0732 rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
0733 rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
0734 rec->host_scribble = ~0;
0735 memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
0736
0737 debug_event(dbf->scsi, level, rec, sizeof(*rec));
0738 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
0739 }
0740
0741 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
0742 {
0743 struct debug_info *d;
0744
0745 d = debug_register(name, size, 1, rec_size);
0746 if (!d)
0747 return NULL;
0748
0749 debug_register_view(d, &debug_hex_ascii_view);
0750 debug_set_level(d, dbflevel);
0751
0752 return d;
0753 }
0754
0755 static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
0756 {
0757 if (!dbf)
0758 return;
0759
0760 debug_unregister(dbf->scsi);
0761 debug_unregister(dbf->san);
0762 debug_unregister(dbf->hba);
0763 debug_unregister(dbf->pay);
0764 debug_unregister(dbf->rec);
0765 kfree(dbf);
0766 }
0767
0768
0769
0770
0771
0772
0773 int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
0774 {
0775 char name[DEBUG_MAX_NAME_LEN];
0776 struct zfcp_dbf *dbf;
0777
0778 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
0779 if (!dbf)
0780 return -ENOMEM;
0781
0782 spin_lock_init(&dbf->pay_lock);
0783 spin_lock_init(&dbf->hba_lock);
0784 spin_lock_init(&dbf->san_lock);
0785 spin_lock_init(&dbf->scsi_lock);
0786 spin_lock_init(&dbf->rec_lock);
0787
0788
0789 sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
0790 dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
0791 if (!dbf->rec)
0792 goto err_out;
0793
0794
0795 sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
0796 dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
0797 if (!dbf->hba)
0798 goto err_out;
0799
0800
0801 sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
0802 dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
0803 if (!dbf->pay)
0804 goto err_out;
0805
0806
0807 sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
0808 dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
0809 if (!dbf->san)
0810 goto err_out;
0811
0812
0813 sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
0814 dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
0815 if (!dbf->scsi)
0816 goto err_out;
0817
0818 adapter->dbf = dbf;
0819
0820 return 0;
0821 err_out:
0822 zfcp_dbf_unregister(dbf);
0823 return -ENOMEM;
0824 }
0825
0826
0827
0828
0829
0830 void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
0831 {
0832 struct zfcp_dbf *dbf = adapter->dbf;
0833
0834 adapter->dbf = NULL;
0835 zfcp_dbf_unregister(dbf);
0836 }
0837