0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/kernel.h>
0022 #include <linux/types.h>
0023 #include <linux/pci.h>
0024 #include <linux/list.h>
0025 #include <linux/moduleparam.h>
0026 #include <linux/module.h>
0027 #include <linux/spinlock.h>
0028 #include <linux/interrupt.h>
0029 #include <linux/delay.h>
0030 #include <linux/uio.h>
0031 #include <linux/uaccess.h>
0032 #include <linux/fs.h>
0033 #include <linux/compat.h>
0034 #include <linux/blkdev.h>
0035 #include <linux/poll.h>
0036 #include <linux/irq_poll.h>
0037
0038 #include <scsi/scsi.h>
0039 #include <scsi/scsi_cmnd.h>
0040 #include <scsi/scsi_device.h>
0041 #include <scsi/scsi_host.h>
0042
0043 #include "megaraid_sas_fusion.h"
0044 #include "megaraid_sas.h"
0045 #include <asm/div64.h>
0046
0047 #define LB_PENDING_CMDS_DEFAULT 4
0048 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
0049 module_param(lb_pending_cmds, int, 0444);
0050 MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
0051 "threshold. Valid Values are 1-128. Default: 4");
0052
0053
0054 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
0055 #define MR_LD_STATE_OPTIMAL 3
0056
0057 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
0058 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
0059 #define SPAN_INVALID 0xff
0060
0061
0062 static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
0063 PLD_SPAN_INFO ldSpanInfo);
0064 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
0065 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
0066 struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
0067 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
0068 u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
0069
0070 u32 mega_mod64(u64 dividend, u32 divisor)
0071 {
0072 u64 d;
0073 u32 remainder;
0074
0075 if (!divisor)
0076 printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
0077 d = dividend;
0078 remainder = do_div(d, divisor);
0079 return remainder;
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089 static u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
0090 {
0091 u64 d = dividend;
0092
0093 if (!divisor)
0094 printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
0095
0096 do_div(d, divisor);
0097
0098 return d;
0099 }
0100
0101 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
0102 {
0103 return &map->raidMap.ldSpanMap[ld].ldRaid;
0104 }
0105
0106 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
0107 struct MR_DRV_RAID_MAP_ALL
0108 *map)
0109 {
0110 return &map->raidMap.ldSpanMap[ld].spanBlock[0];
0111 }
0112
0113 static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
0114 {
0115 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
0116 }
0117
0118 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
0119 {
0120 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
0121 }
0122
0123 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
0124 {
0125 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
0126 }
0127
0128 __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
0129 {
0130 return map->raidMap.devHndlInfo[pd].curDevHdl;
0131 }
0132
0133 static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
0134 {
0135 return map->raidMap.devHndlInfo[pd].interfaceType;
0136 }
0137
0138 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
0139 {
0140 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
0141 }
0142
0143 u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
0144 {
0145 return map->raidMap.ldTgtIdToLd[ldTgtId];
0146 }
0147
0148 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
0149 struct MR_DRV_RAID_MAP_ALL *map)
0150 {
0151 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
0152 }
0153
0154
0155
0156
0157 static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
0158 {
0159 struct fusion_context *fusion = instance->ctrl_context;
0160 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
0161 struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
0162 int i, j;
0163 u16 ld_count;
0164 struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
0165 struct MR_FW_RAID_MAP_EXT *fw_map_ext;
0166 struct MR_RAID_MAP_DESC_TABLE *desc_table;
0167
0168
0169 struct MR_DRV_RAID_MAP_ALL *drv_map =
0170 fusion->ld_drv_map[(map_id & 1)];
0171 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
0172 void *raid_map_data = NULL;
0173
0174 memset(drv_map, 0, fusion->drv_map_sz);
0175 memset(pDrvRaidMap->ldTgtIdToLd,
0176 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
0177
0178 if (instance->max_raid_mapsize) {
0179 fw_map_dyn = fusion->ld_map[(map_id & 1)];
0180 desc_table =
0181 (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
0182 if (desc_table != fw_map_dyn->raid_map_desc_table)
0183 dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
0184 desc_table, fw_map_dyn->raid_map_desc_table);
0185
0186 ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
0187 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
0188 pDrvRaidMap->fpPdIoTimeoutSec =
0189 fw_map_dyn->fp_pd_io_timeout_sec;
0190 pDrvRaidMap->totalSize =
0191 cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL));
0192
0193 raid_map_data = (void *)fw_map_dyn +
0194 le32_to_cpu(fw_map_dyn->desc_table_offset) +
0195 le32_to_cpu(fw_map_dyn->desc_table_size);
0196
0197 for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
0198 switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
0199 case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
0200 fw_map_dyn->dev_hndl_info =
0201 (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
0202 memcpy(pDrvRaidMap->devHndlInfo,
0203 fw_map_dyn->dev_hndl_info,
0204 sizeof(struct MR_DEV_HANDLE_INFO) *
0205 le32_to_cpu(desc_table->raid_map_desc_elements));
0206 break;
0207 case RAID_MAP_DESC_TYPE_TGTID_INFO:
0208 fw_map_dyn->ld_tgt_id_to_ld =
0209 (u16 *)(raid_map_data +
0210 le32_to_cpu(desc_table->raid_map_desc_offset));
0211 for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
0212 pDrvRaidMap->ldTgtIdToLd[j] =
0213 le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
0214 }
0215 break;
0216 case RAID_MAP_DESC_TYPE_ARRAY_INFO:
0217 fw_map_dyn->ar_map_info =
0218 (struct MR_ARRAY_INFO *)
0219 (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
0220 memcpy(pDrvRaidMap->arMapInfo,
0221 fw_map_dyn->ar_map_info,
0222 sizeof(struct MR_ARRAY_INFO) *
0223 le32_to_cpu(desc_table->raid_map_desc_elements));
0224 break;
0225 case RAID_MAP_DESC_TYPE_SPAN_INFO:
0226 fw_map_dyn->ld_span_map =
0227 (struct MR_LD_SPAN_MAP *)
0228 (raid_map_data +
0229 le32_to_cpu(desc_table->raid_map_desc_offset));
0230 memcpy(pDrvRaidMap->ldSpanMap,
0231 fw_map_dyn->ld_span_map,
0232 sizeof(struct MR_LD_SPAN_MAP) *
0233 le32_to_cpu(desc_table->raid_map_desc_elements));
0234 break;
0235 default:
0236 dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
0237 fw_map_dyn->desc_table_num_elements);
0238 }
0239 ++desc_table;
0240 }
0241
0242 } else if (instance->supportmax256vd) {
0243 fw_map_ext =
0244 (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
0245 ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
0246 if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
0247 dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
0248 return 1;
0249 }
0250
0251 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
0252 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
0253 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
0254 pDrvRaidMap->ldTgtIdToLd[i] =
0255 (u16)fw_map_ext->ldTgtIdToLd[i];
0256 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
0257 sizeof(struct MR_LD_SPAN_MAP) * ld_count);
0258 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
0259 sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
0260 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
0261 sizeof(struct MR_DEV_HANDLE_INFO) *
0262 MAX_RAIDMAP_PHYSICAL_DEVICES);
0263
0264
0265
0266
0267 pDrvRaidMap->totalSize =
0268 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
0269 } else {
0270 fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
0271 fusion->ld_map[(map_id & 1)];
0272 pFwRaidMap = &fw_map_old->raidMap;
0273 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
0274 if (ld_count > MAX_LOGICAL_DRIVES) {
0275 dev_dbg(&instance->pdev->dev,
0276 "LD count exposed in RAID map in not valid\n");
0277 return 1;
0278 }
0279
0280 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
0281 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
0282 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
0283 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
0284 pDrvRaidMap->ldTgtIdToLd[i] =
0285 (u8)pFwRaidMap->ldTgtIdToLd[i];
0286 for (i = 0; i < ld_count; i++) {
0287 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
0288 }
0289 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
0290 sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
0291 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
0292 sizeof(struct MR_DEV_HANDLE_INFO) *
0293 MAX_RAIDMAP_PHYSICAL_DEVICES);
0294 }
0295
0296 return 0;
0297 }
0298
0299
0300
0301
0302 u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
0303 {
0304 struct fusion_context *fusion;
0305 struct MR_DRV_RAID_MAP_ALL *drv_map;
0306 struct MR_DRV_RAID_MAP *pDrvRaidMap;
0307 struct LD_LOAD_BALANCE_INFO *lbInfo;
0308 PLD_SPAN_INFO ldSpanInfo;
0309 struct MR_LD_RAID *raid;
0310 u16 num_lds, i;
0311 u16 ld;
0312 u32 expected_size;
0313
0314 if (MR_PopulateDrvRaidMap(instance, map_id))
0315 return 0;
0316
0317 fusion = instance->ctrl_context;
0318 drv_map = fusion->ld_drv_map[(map_id & 1)];
0319 pDrvRaidMap = &drv_map->raidMap;
0320
0321 lbInfo = fusion->load_balance_info;
0322 ldSpanInfo = fusion->log_to_span;
0323
0324 if (instance->max_raid_mapsize)
0325 expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
0326 else if (instance->supportmax256vd)
0327 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
0328 else
0329 expected_size =
0330 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
0331 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
0332
0333 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
0334 dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
0335 le32_to_cpu(pDrvRaidMap->totalSize));
0336 dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
0337 (unsigned int)expected_size);
0338 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
0339 (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
0340 le32_to_cpu(pDrvRaidMap->totalSize));
0341 return 0;
0342 }
0343
0344 if (instance->UnevenSpanSupport)
0345 mr_update_span_set(drv_map, ldSpanInfo);
0346
0347 if (lbInfo)
0348 mr_update_load_balance_params(drv_map, lbInfo);
0349
0350 num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
0351
0352 memcpy(instance->ld_ids_prev,
0353 instance->ld_ids_from_raidmap,
0354 sizeof(instance->ld_ids_from_raidmap));
0355 memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS);
0356
0357 for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
0358 ld = MR_TargetIdToLdGet(i, drv_map);
0359
0360
0361 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
0362 continue;
0363
0364 raid = MR_LdRaidGet(ld, drv_map);
0365 le32_to_cpus((u32 *)&raid->capability);
0366 instance->ld_ids_from_raidmap[i] = i;
0367 num_lds--;
0368 }
0369
0370 return 1;
0371 }
0372
0373 static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
0374 struct MR_DRV_RAID_MAP_ALL *map)
0375 {
0376 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
0377 struct MR_QUAD_ELEMENT *quad;
0378 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0379 u32 span, j;
0380
0381 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
0382
0383 for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
0384 quad = &pSpanBlock->block_span_info.quad[j];
0385
0386 if (le32_to_cpu(quad->diff) == 0)
0387 return SPAN_INVALID;
0388 if (le64_to_cpu(quad->logStart) <= row && row <=
0389 le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
0390 le32_to_cpu(quad->diff))) == 0) {
0391 if (span_blk != NULL) {
0392 u64 blk;
0393 blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
0394
0395 blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
0396 *span_blk = blk;
0397 }
0398 return span;
0399 }
0400 }
0401 }
0402 return SPAN_INVALID;
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 static u32 mr_spanset_get_span_block(struct megasas_instance *instance,
0424 u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
0425 {
0426 struct fusion_context *fusion = instance->ctrl_context;
0427 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0428 LD_SPAN_SET *span_set;
0429 struct MR_QUAD_ELEMENT *quad;
0430 u32 span, info;
0431 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
0432
0433 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
0434 span_set = &(ldSpanInfo[ld].span_set[info]);
0435
0436 if (span_set->span_row_data_width == 0)
0437 break;
0438
0439 if (row > span_set->data_row_end)
0440 continue;
0441
0442 for (span = 0; span < raid->spanDepth; span++)
0443 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
0444 block_span_info.noElements) >= info+1) {
0445 quad = &map->raidMap.ldSpanMap[ld].
0446 spanBlock[span].
0447 block_span_info.quad[info];
0448 if (le32_to_cpu(quad->diff) == 0)
0449 return SPAN_INVALID;
0450 if (le64_to_cpu(quad->logStart) <= row &&
0451 row <= le64_to_cpu(quad->logEnd) &&
0452 (mega_mod64(row - le64_to_cpu(quad->logStart),
0453 le32_to_cpu(quad->diff))) == 0) {
0454 if (span_blk != NULL) {
0455 u64 blk;
0456 blk = mega_div64_32
0457 ((row - le64_to_cpu(quad->logStart)),
0458 le32_to_cpu(quad->diff));
0459 blk = (blk + le64_to_cpu(quad->offsetInSpan))
0460 << raid->stripeShift;
0461 *span_blk = blk;
0462 }
0463 return span;
0464 }
0465 }
0466 }
0467 return SPAN_INVALID;
0468 }
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486 static u64 get_row_from_strip(struct megasas_instance *instance,
0487 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
0488 {
0489 struct fusion_context *fusion = instance->ctrl_context;
0490 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0491 LD_SPAN_SET *span_set;
0492 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
0493 u32 info, strip_offset, span, span_offset;
0494 u64 span_set_Strip, span_set_Row, retval;
0495
0496 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
0497 span_set = &(ldSpanInfo[ld].span_set[info]);
0498
0499 if (span_set->span_row_data_width == 0)
0500 break;
0501 if (strip > span_set->data_strip_end)
0502 continue;
0503
0504 span_set_Strip = strip - span_set->data_strip_start;
0505 strip_offset = mega_mod64(span_set_Strip,
0506 span_set->span_row_data_width);
0507 span_set_Row = mega_div64_32(span_set_Strip,
0508 span_set->span_row_data_width) * span_set->diff;
0509 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
0510 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
0511 block_span_info.noElements) >= info+1) {
0512 if (strip_offset >=
0513 span_set->strip_offset[span])
0514 span_offset++;
0515 else
0516 break;
0517 }
0518
0519 retval = (span_set->data_row_start + span_set_Row +
0520 (span_offset - 1));
0521 return retval;
0522 }
0523 return -1LLU;
0524 }
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543 static u64 get_strip_from_row(struct megasas_instance *instance,
0544 u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
0545 {
0546 struct fusion_context *fusion = instance->ctrl_context;
0547 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0548 LD_SPAN_SET *span_set;
0549 struct MR_QUAD_ELEMENT *quad;
0550 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
0551 u32 span, info;
0552 u64 strip;
0553
0554 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
0555 span_set = &(ldSpanInfo[ld].span_set[info]);
0556
0557 if (span_set->span_row_data_width == 0)
0558 break;
0559 if (row > span_set->data_row_end)
0560 continue;
0561
0562 for (span = 0; span < raid->spanDepth; span++)
0563 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
0564 block_span_info.noElements) >= info+1) {
0565 quad = &map->raidMap.ldSpanMap[ld].
0566 spanBlock[span].block_span_info.quad[info];
0567 if (le64_to_cpu(quad->logStart) <= row &&
0568 row <= le64_to_cpu(quad->logEnd) &&
0569 mega_mod64((row - le64_to_cpu(quad->logStart)),
0570 le32_to_cpu(quad->diff)) == 0) {
0571 strip = mega_div64_32
0572 (((row - span_set->data_row_start)
0573 - le64_to_cpu(quad->logStart)),
0574 le32_to_cpu(quad->diff));
0575 strip *= span_set->span_row_data_width;
0576 strip += span_set->data_strip_start;
0577 strip += span_set->strip_offset[span];
0578 return strip;
0579 }
0580 }
0581 }
0582 dev_err(&instance->pdev->dev, "get_strip_from_row"
0583 "returns invalid strip for ld=%x, row=%lx\n",
0584 ld, (long unsigned int)row);
0585 return -1;
0586 }
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 static u32 get_arm_from_strip(struct megasas_instance *instance,
0605 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
0606 {
0607 struct fusion_context *fusion = instance->ctrl_context;
0608 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0609 LD_SPAN_SET *span_set;
0610 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
0611 u32 info, strip_offset, span, span_offset, retval;
0612
0613 for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
0614 span_set = &(ldSpanInfo[ld].span_set[info]);
0615
0616 if (span_set->span_row_data_width == 0)
0617 break;
0618 if (strip > span_set->data_strip_end)
0619 continue;
0620
0621 strip_offset = (uint)mega_mod64
0622 ((strip - span_set->data_strip_start),
0623 span_set->span_row_data_width);
0624
0625 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
0626 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
0627 block_span_info.noElements) >= info+1) {
0628 if (strip_offset >=
0629 span_set->strip_offset[span])
0630 span_offset =
0631 span_set->strip_offset[span];
0632 else
0633 break;
0634 }
0635
0636 retval = (strip_offset - span_offset);
0637 return retval;
0638 }
0639
0640 dev_err(&instance->pdev->dev, "get_arm_from_strip"
0641 "returns invalid arm for ld=%x strip=%lx\n",
0642 ld, (long unsigned int)strip);
0643
0644 return -1;
0645 }
0646
0647
0648 static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
0649 struct MR_DRV_RAID_MAP_ALL *map)
0650 {
0651 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0652
0653 u32 arm = 0;
0654
0655 switch (raid->level) {
0656 case 0:
0657 case 5:
0658 case 6:
0659 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
0660 break;
0661 case 1:
0662
0663 arm = get_arm_from_strip(instance, ld, stripe, map);
0664 if (arm != -1U)
0665 arm *= 2;
0666 break;
0667 }
0668
0669 return arm;
0670 }
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
0691 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
0692 struct RAID_CONTEXT *pRAID_Context,
0693 struct MR_DRV_RAID_MAP_ALL *map)
0694 {
0695 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0696 u32 pd, arRef, r1_alt_pd;
0697 u8 physArm, span;
0698 u64 row;
0699 u8 retval = true;
0700 u64 *pdBlock = &io_info->pdBlock;
0701 __le16 *pDevHandle = &io_info->devHandle;
0702 u8 *pPdInterface = &io_info->pd_interface;
0703 u32 logArm, rowMod, armQ, arm;
0704
0705 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
0706
0707
0708 row = io_info->start_row;
0709 span = io_info->start_span;
0710
0711
0712 if (raid->level == 6) {
0713 logArm = get_arm_from_strip(instance, ld, stripRow, map);
0714 if (logArm == -1U)
0715 return false;
0716 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
0717 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
0718 arm = armQ + 1 + logArm;
0719 if (arm >= SPAN_ROW_SIZE(map, ld, span))
0720 arm -= SPAN_ROW_SIZE(map, ld, span);
0721 physArm = (u8)arm;
0722 } else
0723
0724 physArm = get_arm(instance, ld, span, stripRow, map);
0725 if (physArm == 0xFF)
0726 return false;
0727
0728 arRef = MR_LdSpanArrayGet(ld, span, map);
0729 pd = MR_ArPdGet(arRef, physArm, map);
0730
0731 if (pd != MR_PD_INVALID) {
0732 *pDevHandle = MR_PdDevHandleGet(pd, map);
0733 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
0734
0735 if ((instance->adapter_type >= VENTURA_SERIES) &&
0736 (raid->level == 1) &&
0737 !io_info->isRead) {
0738 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
0739 if (r1_alt_pd != MR_PD_INVALID)
0740 io_info->r1_alt_dev_handle =
0741 MR_PdDevHandleGet(r1_alt_pd, map);
0742 }
0743 } else {
0744 if ((raid->level >= 5) &&
0745 ((instance->adapter_type == THUNDERBOLT_SERIES) ||
0746 ((instance->adapter_type == INVADER_SERIES) &&
0747 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
0748 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
0749 else if (raid->level == 1) {
0750 physArm = physArm + 1;
0751 pd = MR_ArPdGet(arRef, physArm, map);
0752 if (pd != MR_PD_INVALID) {
0753 *pDevHandle = MR_PdDevHandleGet(pd, map);
0754 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
0755 }
0756 }
0757 }
0758
0759 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
0760 if (instance->adapter_type >= VENTURA_SERIES) {
0761 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
0762 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
0763 io_info->span_arm =
0764 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
0765 } else {
0766 pRAID_Context->span_arm =
0767 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
0768 io_info->span_arm = pRAID_Context->span_arm;
0769 }
0770 io_info->pd_after_lb = pd;
0771 return retval;
0772 }
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791 static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
0792 u16 stripRef, struct IO_REQUEST_INFO *io_info,
0793 struct RAID_CONTEXT *pRAID_Context,
0794 struct MR_DRV_RAID_MAP_ALL *map)
0795 {
0796 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0797 u32 pd, arRef, r1_alt_pd;
0798 u8 physArm, span;
0799 u64 row;
0800 u8 retval = true;
0801 u64 *pdBlock = &io_info->pdBlock;
0802 __le16 *pDevHandle = &io_info->devHandle;
0803 u8 *pPdInterface = &io_info->pd_interface;
0804
0805 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
0806
0807 row = mega_div64_32(stripRow, raid->rowDataSize);
0808
0809 if (raid->level == 6) {
0810
0811 u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
0812 u32 rowMod, armQ, arm;
0813
0814 if (raid->rowSize == 0)
0815 return false;
0816
0817 rowMod = mega_mod64(row, raid->rowSize);
0818 armQ = raid->rowSize-1-rowMod;
0819 arm = armQ+1+logArm;
0820 if (arm >= raid->rowSize)
0821 arm -= raid->rowSize;
0822 physArm = (u8)arm;
0823 } else {
0824 if (raid->modFactor == 0)
0825 return false;
0826 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
0827 raid->modFactor),
0828 map);
0829 }
0830
0831 if (raid->spanDepth == 1) {
0832 span = 0;
0833 *pdBlock = row << raid->stripeShift;
0834 } else {
0835 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
0836 if (span == SPAN_INVALID)
0837 return false;
0838 }
0839
0840
0841 arRef = MR_LdSpanArrayGet(ld, span, map);
0842 pd = MR_ArPdGet(arRef, physArm, map);
0843
0844 if (pd != MR_PD_INVALID) {
0845
0846 *pDevHandle = MR_PdDevHandleGet(pd, map);
0847 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
0848
0849 if ((instance->adapter_type >= VENTURA_SERIES) &&
0850 (raid->level == 1) &&
0851 !io_info->isRead) {
0852 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
0853 if (r1_alt_pd != MR_PD_INVALID)
0854 io_info->r1_alt_dev_handle =
0855 MR_PdDevHandleGet(r1_alt_pd, map);
0856 }
0857 } else {
0858 if ((raid->level >= 5) &&
0859 ((instance->adapter_type == THUNDERBOLT_SERIES) ||
0860 ((instance->adapter_type == INVADER_SERIES) &&
0861 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
0862 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
0863 else if (raid->level == 1) {
0864
0865 physArm = physArm + 1;
0866 pd = MR_ArPdGet(arRef, physArm, map);
0867 if (pd != MR_PD_INVALID) {
0868
0869 *pDevHandle = MR_PdDevHandleGet(pd, map);
0870 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
0871 }
0872 }
0873 }
0874
0875 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
0876 if (instance->adapter_type >= VENTURA_SERIES) {
0877 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
0878 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
0879 io_info->span_arm =
0880 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
0881 } else {
0882 pRAID_Context->span_arm =
0883 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
0884 io_info->span_arm = pRAID_Context->span_arm;
0885 }
0886 io_info->pd_after_lb = pd;
0887 return retval;
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance,
0903 u32 ld, u64 stripNo,
0904 struct IO_REQUEST_INFO *io_info,
0905 struct RAID_CONTEXT_G35 *pRAID_Context,
0906 struct MR_DRV_RAID_MAP_ALL *map)
0907 {
0908 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
0909 u8 span, dataArms, arms, dataArm, logArm;
0910 s8 rightmostParityArm, PParityArm;
0911 u64 rowNum;
0912 u64 *pdBlock = &io_info->pdBlock;
0913
0914 dataArms = raid->rowDataSize;
0915 arms = raid->rowSize;
0916
0917 rowNum = mega_div64_32(stripNo, dataArms);
0918
0919 rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms);
0920
0921
0922 logArm = mega_mod64(stripNo, dataArms);
0923
0924 dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms);
0925
0926 if (raid->spanDepth == 1) {
0927 span = 0;
0928 } else {
0929 span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map);
0930 if (span == SPAN_INVALID)
0931 return;
0932 }
0933
0934 if (raid->level == 6) {
0935
0936 PParityArm = (arms - 2) - mega_mod64(rowNum, arms);
0937
0938 if (PParityArm < 0)
0939 PParityArm += arms;
0940
0941
0942 pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm;
0943 pRAID_Context->flow_specific.r56_arm_map |=
0944 (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT);
0945 } else {
0946 pRAID_Context->flow_specific.r56_arm_map |=
0947 (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT);
0948 }
0949
0950 pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum);
0951 pRAID_Context->flow_specific.r56_arm_map |=
0952 (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT);
0953 cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map);
0954 pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm;
0955 pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD <<
0956 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
0957
0958 return;
0959 }
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970 u8
0971 MR_BuildRaidContext(struct megasas_instance *instance,
0972 struct IO_REQUEST_INFO *io_info,
0973 struct RAID_CONTEXT *pRAID_Context,
0974 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
0975 {
0976 struct fusion_context *fusion;
0977 struct MR_LD_RAID *raid;
0978 u32 stripSize, stripe_mask;
0979 u64 endLba, endStrip, endRow, start_row, start_strip;
0980 u64 regStart;
0981 u32 regSize;
0982 u8 num_strips, numRows;
0983 u16 ref_in_start_stripe, ref_in_end_stripe;
0984 u64 ldStartBlock;
0985 u32 numBlocks, ldTgtId;
0986 u8 isRead;
0987 u8 retval = 0;
0988 u8 startlba_span = SPAN_INVALID;
0989 u64 *pdBlock = &io_info->pdBlock;
0990 u16 ld;
0991
0992 ldStartBlock = io_info->ldStartBlock;
0993 numBlocks = io_info->numBlocks;
0994 ldTgtId = io_info->ldTgtId;
0995 isRead = io_info->isRead;
0996 io_info->IoforUnevenSpan = 0;
0997 io_info->start_span = SPAN_INVALID;
0998 fusion = instance->ctrl_context;
0999
1000 ld = MR_TargetIdToLdGet(ldTgtId, map);
1001 raid = MR_LdRaidGet(ld, map);
1002
1003 io_info->ra_capable = raid->capability.ra_capable;
1004
1005
1006
1007
1008
1009 if (raid->rowDataSize == 0) {
1010 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
1011 return false;
1012 else if (instance->UnevenSpanSupport) {
1013 io_info->IoforUnevenSpan = 1;
1014 } else {
1015 dev_info(&instance->pdev->dev,
1016 "raid->rowDataSize is 0, but has SPAN[0]"
1017 "rowDataSize = 0x%0x,"
1018 "but there is _NO_ UnevenSpanSupport\n",
1019 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
1020 return false;
1021 }
1022 }
1023
1024 stripSize = 1 << raid->stripeShift;
1025 stripe_mask = stripSize-1;
1026
1027 io_info->data_arms = raid->rowDataSize;
1028
1029
1030
1031
1032 start_strip = ldStartBlock >> raid->stripeShift;
1033 ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
1034 endLba = ldStartBlock + numBlocks - 1;
1035 ref_in_end_stripe = (u16)(endLba & stripe_mask);
1036 endStrip = endLba >> raid->stripeShift;
1037 num_strips = (u8)(endStrip - start_strip + 1);
1038
1039 if (io_info->IoforUnevenSpan) {
1040 start_row = get_row_from_strip(instance, ld, start_strip, map);
1041 endRow = get_row_from_strip(instance, ld, endStrip, map);
1042 if (start_row == -1ULL || endRow == -1ULL) {
1043 dev_info(&instance->pdev->dev, "return from %s %d."
1044 "Send IO w/o region lock.\n",
1045 __func__, __LINE__);
1046 return false;
1047 }
1048
1049 if (raid->spanDepth == 1) {
1050 startlba_span = 0;
1051 *pdBlock = start_row << raid->stripeShift;
1052 } else
1053 startlba_span = (u8)mr_spanset_get_span_block(instance,
1054 ld, start_row, pdBlock, map);
1055 if (startlba_span == SPAN_INVALID) {
1056 dev_info(&instance->pdev->dev, "return from %s %d"
1057 "for row 0x%llx,start strip %llx"
1058 "endSrip %llx\n", __func__, __LINE__,
1059 (unsigned long long)start_row,
1060 (unsigned long long)start_strip,
1061 (unsigned long long)endStrip);
1062 return false;
1063 }
1064 io_info->start_span = startlba_span;
1065 io_info->start_row = start_row;
1066 } else {
1067 start_row = mega_div64_32(start_strip, raid->rowDataSize);
1068 endRow = mega_div64_32(endStrip, raid->rowDataSize);
1069 }
1070 numRows = (u8)(endRow - start_row + 1);
1071
1072
1073
1074
1075
1076
1077 regStart = start_row << raid->stripeShift;
1078
1079 regSize = stripSize;
1080
1081 io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
1082
1083
1084 if (raid->capability.fpCapable) {
1085 if (isRead)
1086 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
1087 ((num_strips == 1) ||
1088 raid->capability.
1089 fpReadAcrossStripe));
1090 else
1091 io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
1092 ((num_strips == 1) ||
1093 raid->capability.
1094 fpWriteAcrossStripe));
1095 } else
1096 io_info->fpOkForIo = false;
1097
1098 if (numRows == 1) {
1099
1100 if (num_strips == 1) {
1101 regStart += ref_in_start_stripe;
1102 regSize = numBlocks;
1103 }
1104
1105 } else if (io_info->IoforUnevenSpan == 0) {
1106
1107
1108
1109
1110 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
1111 regStart += ref_in_start_stripe;
1112
1113
1114 regSize = stripSize - ref_in_start_stripe;
1115 }
1116
1117
1118 if (numRows > 2)
1119 regSize += (numRows-2) << raid->stripeShift;
1120
1121
1122 if (endStrip == endRow*raid->rowDataSize)
1123 regSize += ref_in_end_stripe+1;
1124 else
1125 regSize += stripSize;
1126 } else {
1127
1128
1129
1130
1131 if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
1132 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
1133 regStart += ref_in_start_stripe;
1134
1135
1136
1137 regSize = stripSize - ref_in_start_stripe;
1138 }
1139
1140
1141 if (numRows > 2)
1142
1143 regSize += (numRows-2) << raid->stripeShift;
1144
1145
1146 if (endStrip == get_strip_from_row(instance, ld, endRow, map))
1147 regSize += ref_in_end_stripe + 1;
1148 else
1149 regSize += stripSize;
1150 }
1151
1152 pRAID_Context->timeout_value =
1153 cpu_to_le16(raid->fpIoTimeoutForLd ?
1154 raid->fpIoTimeoutForLd :
1155 map->raidMap.fpPdIoTimeoutSec);
1156 if (instance->adapter_type == INVADER_SERIES)
1157 pRAID_Context->reg_lock_flags = (isRead) ?
1158 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1159 else if (instance->adapter_type == THUNDERBOLT_SERIES)
1160 pRAID_Context->reg_lock_flags = (isRead) ?
1161 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1162 pRAID_Context->virtual_disk_tgt_id = raid->targetId;
1163 pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart);
1164 pRAID_Context->reg_lock_length = cpu_to_le32(regSize);
1165 pRAID_Context->config_seq_num = raid->seqNum;
1166
1167 *raidLUN = raid->LUN;
1168
1169
1170 if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) {
1171 mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info,
1172 (struct RAID_CONTEXT_G35 *)pRAID_Context,
1173 map);
1174 return true;
1175 }
1176
1177
1178
1179 if (io_info->fpOkForIo) {
1180 retval = io_info->IoforUnevenSpan ?
1181 mr_spanset_get_phy_params(instance, ld,
1182 start_strip, ref_in_start_stripe,
1183 io_info, pRAID_Context, map) :
1184 MR_GetPhyParams(instance, ld, start_strip,
1185 ref_in_start_stripe, io_info,
1186 pRAID_Context, map);
1187
1188 if (io_info->devHandle == MR_DEVHANDLE_INVALID)
1189 io_info->fpOkForIo = false;
1190 return retval;
1191 } else if (isRead) {
1192 uint stripIdx;
1193 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
1194 retval = io_info->IoforUnevenSpan ?
1195 mr_spanset_get_phy_params(instance, ld,
1196 start_strip + stripIdx,
1197 ref_in_start_stripe, io_info,
1198 pRAID_Context, map) :
1199 MR_GetPhyParams(instance, ld,
1200 start_strip + stripIdx, ref_in_start_stripe,
1201 io_info, pRAID_Context, map);
1202 if (!retval)
1203 return true;
1204 }
1205 }
1206 return true;
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220 void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1221 PLD_SPAN_INFO ldSpanInfo)
1222 {
1223 u8 span, count;
1224 u32 element, span_row_width;
1225 u64 span_row;
1226 struct MR_LD_RAID *raid;
1227 LD_SPAN_SET *span_set, *span_set_prev;
1228 struct MR_QUAD_ELEMENT *quad;
1229 int ldCount;
1230 u16 ld;
1231
1232
1233 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1234 ld = MR_TargetIdToLdGet(ldCount, map);
1235 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
1236 continue;
1237 raid = MR_LdRaidGet(ld, map);
1238 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1239 for (span = 0; span < raid->spanDepth; span++) {
1240 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1241 block_span_info.noElements) <
1242 element + 1)
1243 continue;
1244 span_set = &(ldSpanInfo[ld].span_set[element]);
1245 quad = &map->raidMap.ldSpanMap[ld].
1246 spanBlock[span].block_span_info.
1247 quad[element];
1248
1249 span_set->diff = le32_to_cpu(quad->diff);
1250
1251 for (count = 0, span_row_width = 0;
1252 count < raid->spanDepth; count++) {
1253 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1254 spanBlock[count].
1255 block_span_info.
1256 noElements) >= element + 1) {
1257 span_set->strip_offset[count] =
1258 span_row_width;
1259 span_row_width +=
1260 MR_LdSpanPtrGet
1261 (ld, count, map)->spanRowDataSize;
1262 }
1263 }
1264
1265 span_set->span_row_data_width = span_row_width;
1266 span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
1267 le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
1268 le32_to_cpu(quad->diff));
1269
1270 if (element == 0) {
1271 span_set->log_start_lba = 0;
1272 span_set->log_end_lba =
1273 ((span_row << raid->stripeShift)
1274 * span_row_width) - 1;
1275
1276 span_set->span_row_start = 0;
1277 span_set->span_row_end = span_row - 1;
1278
1279 span_set->data_strip_start = 0;
1280 span_set->data_strip_end =
1281 (span_row * span_row_width) - 1;
1282
1283 span_set->data_row_start = 0;
1284 span_set->data_row_end =
1285 (span_row * le32_to_cpu(quad->diff)) - 1;
1286 } else {
1287 span_set_prev = &(ldSpanInfo[ld].
1288 span_set[element - 1]);
1289 span_set->log_start_lba =
1290 span_set_prev->log_end_lba + 1;
1291 span_set->log_end_lba =
1292 span_set->log_start_lba +
1293 ((span_row << raid->stripeShift)
1294 * span_row_width) - 1;
1295
1296 span_set->span_row_start =
1297 span_set_prev->span_row_end + 1;
1298 span_set->span_row_end =
1299 span_set->span_row_start + span_row - 1;
1300
1301 span_set->data_strip_start =
1302 span_set_prev->data_strip_end + 1;
1303 span_set->data_strip_end =
1304 span_set->data_strip_start +
1305 (span_row * span_row_width) - 1;
1306
1307 span_set->data_row_start =
1308 span_set_prev->data_row_end + 1;
1309 span_set->data_row_end =
1310 span_set->data_row_start +
1311 (span_row * le32_to_cpu(quad->diff)) - 1;
1312 }
1313 break;
1314 }
1315 if (span == raid->spanDepth)
1316 break;
1317 }
1318 }
1319 }
1320
1321 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1322 struct LD_LOAD_BALANCE_INFO *lbInfo)
1323 {
1324 int ldCount;
1325 u16 ld;
1326 struct MR_LD_RAID *raid;
1327
1328 if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
1329 lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
1330
1331 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1332 ld = MR_TargetIdToLdGet(ldCount, drv_map);
1333 if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
1334 lbInfo[ldCount].loadBalanceFlag = 0;
1335 continue;
1336 }
1337
1338 raid = MR_LdRaidGet(ld, drv_map);
1339 if ((raid->level != 1) ||
1340 (raid->ldState != MR_LD_STATE_OPTIMAL)) {
1341 lbInfo[ldCount].loadBalanceFlag = 0;
1342 continue;
1343 }
1344 lbInfo[ldCount].loadBalanceFlag = 1;
1345 }
1346 }
1347
1348 static u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1349 struct LD_LOAD_BALANCE_INFO *lbInfo,
1350 struct IO_REQUEST_INFO *io_info,
1351 struct MR_DRV_RAID_MAP_ALL *drv_map)
1352 {
1353 struct MR_LD_RAID *raid;
1354 u16 pd1_dev_handle;
1355 u16 pend0, pend1, ld;
1356 u64 diff0, diff1;
1357 u8 bestArm, pd0, pd1, span, arm;
1358 u32 arRef, span_row_size;
1359
1360 u64 block = io_info->ldStartBlock;
1361 u32 count = io_info->numBlocks;
1362
1363 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
1364 >> RAID_CTX_SPANARM_SPAN_SHIFT);
1365 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
1366
1367 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
1368 raid = MR_LdRaidGet(ld, drv_map);
1369 span_row_size = instance->UnevenSpanSupport ?
1370 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
1371
1372 arRef = MR_LdSpanArrayGet(ld, span, drv_map);
1373 pd0 = MR_ArPdGet(arRef, arm, drv_map);
1374 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
1375 (arm + 1 - span_row_size) : arm + 1, drv_map);
1376
1377
1378
1379 pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
1380
1381 if (pd1_dev_handle == MR_DEVHANDLE_INVALID) {
1382 bestArm = arm;
1383 } else {
1384
1385 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
1386 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1387
1388
1389 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
1390 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
1391 bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1392
1393
1394
1395
1396 if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
1397 (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1398 bestArm ^= 1;
1399
1400
1401 io_info->span_arm =
1402 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1403 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1404 }
1405
1406 lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
1407 return io_info->pd_after_lb;
1408 }
1409
1410 __le16 get_updated_dev_handle(struct megasas_instance *instance,
1411 struct LD_LOAD_BALANCE_INFO *lbInfo,
1412 struct IO_REQUEST_INFO *io_info,
1413 struct MR_DRV_RAID_MAP_ALL *drv_map)
1414 {
1415 u8 arm_pd;
1416 __le16 devHandle;
1417
1418
1419 arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map);
1420 devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1421 io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
1422 atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1423
1424 return devHandle;
1425 }