0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/kernel.h>
0020 #include <linux/init.h>
0021 #include <linux/types.h>
0022 #include <linux/spinlock.h>
0023 #include <linux/slab.h>
0024 #include <linux/completion.h>
0025 #include <linux/blkdev.h>
0026
0027 #include "aacraid.h"
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 unsigned int aac_response_normal(struct aac_queue * q)
0040 {
0041 struct aac_dev * dev = q->dev;
0042 struct aac_entry *entry;
0043 struct hw_fib * hwfib;
0044 struct fib * fib;
0045 int consumed = 0;
0046 unsigned long flags, mflags;
0047
0048 spin_lock_irqsave(q->lock, flags);
0049
0050
0051
0052
0053
0054
0055 while(aac_consumer_get(dev, q, &entry))
0056 {
0057 int fast;
0058 u32 index = le32_to_cpu(entry->addr);
0059 fast = index & 0x01;
0060 fib = &dev->fibs[index >> 2];
0061 hwfib = fib->hw_fib_va;
0062
0063 aac_consumer_free(dev, q, HostNormRespQueue);
0064
0065
0066
0067
0068
0069
0070
0071
0072 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
0073
0074 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
0075 spin_unlock_irqrestore(q->lock, flags);
0076 aac_fib_complete(fib);
0077 aac_fib_free(fib);
0078 spin_lock_irqsave(q->lock, flags);
0079 continue;
0080 }
0081 spin_unlock_irqrestore(q->lock, flags);
0082
0083 if (fast) {
0084
0085
0086
0087 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
0088 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
0089 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
0090 }
0091
0092 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
0093
0094 if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
0095 {
0096 __le32 *pstatus = (__le32 *)hwfib->data;
0097 if (*pstatus & cpu_to_le32(0xffff0000))
0098 *pstatus = cpu_to_le32(ST_OK);
0099 }
0100 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
0101 {
0102 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) {
0103 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
0104 } else {
0105 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
0106 }
0107
0108
0109
0110
0111 fib->callback(fib->callback_data, fib);
0112 } else {
0113 unsigned long flagv;
0114 spin_lock_irqsave(&fib->event_lock, flagv);
0115 if (!fib->done) {
0116 fib->done = 1;
0117 complete(&fib->event_wait);
0118 }
0119 spin_unlock_irqrestore(&fib->event_lock, flagv);
0120
0121 spin_lock_irqsave(&dev->manage_lock, mflags);
0122 dev->management_fib_count--;
0123 spin_unlock_irqrestore(&dev->manage_lock, mflags);
0124
0125 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
0126 if (fib->done == 2) {
0127 spin_lock_irqsave(&fib->event_lock, flagv);
0128 fib->done = 0;
0129 spin_unlock_irqrestore(&fib->event_lock, flagv);
0130 aac_fib_complete(fib);
0131 aac_fib_free(fib);
0132 }
0133 }
0134 consumed++;
0135 spin_lock_irqsave(q->lock, flags);
0136 }
0137
0138 if (consumed > aac_config.peak_fibs)
0139 aac_config.peak_fibs = consumed;
0140 if (consumed == 0)
0141 aac_config.zero_fibs++;
0142
0143 spin_unlock_irqrestore(q->lock, flags);
0144 return 0;
0145 }
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 unsigned int aac_command_normal(struct aac_queue *q)
0159 {
0160 struct aac_dev * dev = q->dev;
0161 struct aac_entry *entry;
0162 unsigned long flags;
0163
0164 spin_lock_irqsave(q->lock, flags);
0165
0166
0167
0168
0169
0170
0171 while(aac_consumer_get(dev, q, &entry))
0172 {
0173 struct fib fibctx;
0174 struct hw_fib * hw_fib;
0175 u32 index;
0176 struct fib *fib = &fibctx;
0177
0178 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
0179 hw_fib = &dev->aif_base_va[index];
0180
0181
0182
0183
0184
0185
0186 if (dev->aif_thread)
0187 if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
0188 fib = &fibctx;
0189
0190 memset(fib, 0, sizeof(struct fib));
0191 INIT_LIST_HEAD(&fib->fiblink);
0192 fib->type = FSAFS_NTC_FIB_CONTEXT;
0193 fib->size = sizeof(struct fib);
0194 fib->hw_fib_va = hw_fib;
0195 fib->data = hw_fib->data;
0196 fib->dev = dev;
0197
0198
0199 if (dev->aif_thread && fib != &fibctx) {
0200 list_add_tail(&fib->fiblink, &q->cmdq);
0201 aac_consumer_free(dev, q, HostNormCmdQueue);
0202 wake_up_interruptible(&q->cmdready);
0203 } else {
0204 aac_consumer_free(dev, q, HostNormCmdQueue);
0205 spin_unlock_irqrestore(q->lock, flags);
0206
0207
0208
0209 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
0210 aac_fib_adapter_complete(fib, sizeof(u32));
0211 spin_lock_irqsave(q->lock, flags);
0212 }
0213 }
0214 spin_unlock_irqrestore(q->lock, flags);
0215 return 0;
0216 }
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 static void aac_aif_callback(void *context, struct fib * fibptr)
0229 {
0230 struct fib *fibctx;
0231 struct aac_dev *dev;
0232 struct aac_aifcmd *cmd;
0233
0234 fibctx = (struct fib *)context;
0235 BUG_ON(fibptr == NULL);
0236 dev = fibptr->dev;
0237
0238 if ((fibptr->hw_fib_va->header.XferState &
0239 cpu_to_le32(NoMoreAifDataAvailable)) ||
0240 dev->sa_firmware) {
0241 aac_fib_complete(fibptr);
0242 aac_fib_free(fibptr);
0243 return;
0244 }
0245
0246 aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
0247
0248 aac_fib_init(fibctx);
0249 cmd = (struct aac_aifcmd *) fib_data(fibctx);
0250 cmd->command = cpu_to_le32(AifReqEvent);
0251
0252 aac_fib_send(AifRequest,
0253 fibctx,
0254 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
0255 FsaNormal,
0256 0, 1,
0257 (fib_callback)aac_aif_callback, fibctx);
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
0271 int isFastResponse, struct hw_fib *aif_fib)
0272 {
0273 unsigned long mflags;
0274 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
0275 if (isAif == 1) {
0276 struct hw_fib * hw_fib;
0277 struct fib * fib;
0278 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
0279 unsigned long flags;
0280
0281
0282
0283
0284
0285
0286 if ((!dev->aif_thread)
0287 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
0288 return 1;
0289 if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
0290 kfree (fib);
0291 return 1;
0292 }
0293 if (dev->sa_firmware) {
0294 fib->hbacmd_size = index;
0295 } else if (aif_fib != NULL) {
0296 memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
0297 } else {
0298 memcpy(hw_fib, (struct hw_fib *)
0299 (((uintptr_t)(dev->regs.sa)) + index),
0300 sizeof(struct hw_fib));
0301 }
0302 INIT_LIST_HEAD(&fib->fiblink);
0303 fib->type = FSAFS_NTC_FIB_CONTEXT;
0304 fib->size = sizeof(struct fib);
0305 fib->hw_fib_va = hw_fib;
0306 fib->data = hw_fib->data;
0307 fib->dev = dev;
0308
0309 spin_lock_irqsave(q->lock, flags);
0310 list_add_tail(&fib->fiblink, &q->cmdq);
0311 wake_up_interruptible(&q->cmdready);
0312 spin_unlock_irqrestore(q->lock, flags);
0313 return 1;
0314 } else if (isAif == 2) {
0315 struct fib *fibctx;
0316 struct aac_aifcmd *cmd;
0317
0318 fibctx = aac_fib_alloc(dev);
0319 if (!fibctx)
0320 return 1;
0321 aac_fib_init(fibctx);
0322
0323 cmd = (struct aac_aifcmd *) fib_data(fibctx);
0324 cmd->command = cpu_to_le32(AifReqEvent);
0325
0326 return aac_fib_send(AifRequest,
0327 fibctx,
0328 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
0329 FsaNormal,
0330 0, 1,
0331 (fib_callback)aac_aif_callback, fibctx);
0332 } else {
0333 struct fib *fib = &dev->fibs[index];
0334 int start_callback = 0;
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
0345
0346 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
0347 aac_fib_complete(fib);
0348 aac_fib_free(fib);
0349 return 0;
0350 }
0351
0352 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
0353
0354 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
0355
0356 if (isFastResponse)
0357 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
0358
0359 if (fib->callback) {
0360 start_callback = 1;
0361 } else {
0362 unsigned long flagv;
0363 int completed = 0;
0364
0365 dprintk((KERN_INFO "event_wait up\n"));
0366 spin_lock_irqsave(&fib->event_lock, flagv);
0367 if (fib->done == 2) {
0368 fib->done = 1;
0369 completed = 1;
0370 } else {
0371 fib->done = 1;
0372 complete(&fib->event_wait);
0373 }
0374 spin_unlock_irqrestore(&fib->event_lock, flagv);
0375
0376 spin_lock_irqsave(&dev->manage_lock, mflags);
0377 dev->management_fib_count--;
0378 spin_unlock_irqrestore(&dev->manage_lock,
0379 mflags);
0380
0381 FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
0382 if (completed)
0383 aac_fib_complete(fib);
0384 }
0385 } else {
0386 struct hw_fib *hwfib = fib->hw_fib_va;
0387
0388 if (isFastResponse) {
0389
0390 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
0391 hwfib->header.XferState |=
0392 cpu_to_le32(AdapterProcessed);
0393 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
0394 }
0395
0396 if (hwfib->header.Command ==
0397 cpu_to_le16(NuFileSystem)) {
0398 __le32 *pstatus = (__le32 *)hwfib->data;
0399
0400 if (*pstatus & cpu_to_le32(0xffff0000))
0401 *pstatus = cpu_to_le32(ST_OK);
0402 }
0403 if (hwfib->header.XferState &
0404 cpu_to_le32(NoResponseExpected | Async)) {
0405 if (hwfib->header.XferState & cpu_to_le32(
0406 NoResponseExpected)) {
0407 FIB_COUNTER_INCREMENT(
0408 aac_config.NoResponseRecved);
0409 } else {
0410 FIB_COUNTER_INCREMENT(
0411 aac_config.AsyncRecved);
0412 }
0413 start_callback = 1;
0414 } else {
0415 unsigned long flagv;
0416 int completed = 0;
0417
0418 dprintk((KERN_INFO "event_wait up\n"));
0419 spin_lock_irqsave(&fib->event_lock, flagv);
0420 if (fib->done == 2) {
0421 fib->done = 1;
0422 completed = 1;
0423 } else {
0424 fib->done = 1;
0425 complete(&fib->event_wait);
0426 }
0427 spin_unlock_irqrestore(&fib->event_lock, flagv);
0428
0429 spin_lock_irqsave(&dev->manage_lock, mflags);
0430 dev->management_fib_count--;
0431 spin_unlock_irqrestore(&dev->manage_lock,
0432 mflags);
0433
0434 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
0435 if (completed)
0436 aac_fib_complete(fib);
0437 }
0438 }
0439
0440
0441 if (start_callback) {
0442
0443
0444
0445
0446 if (likely(fib->callback && fib->callback_data)) {
0447 fib->callback(fib->callback_data, fib);
0448 } else {
0449 aac_fib_complete(fib);
0450 aac_fib_free(fib);
0451 }
0452
0453 }
0454 return 0;
0455 }
0456 }