0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/export.h>
0022 #include <linux/errno.h>
0023 #include <linux/hardirq.h>
0024 #include <linux/sched.h>
0025 #include <linux/kernel.h>
0026 #include <linux/mm.h>
0027 #include <linux/vmalloc.h>
0028 #include <linux/smp.h>
0029 #include <linux/stddef.h>
0030 #include <linux/unistd.h>
0031
0032 #include <asm/io.h>
0033 #include <asm/spu.h>
0034 #include <asm/spu_priv1.h>
0035 #include <asm/spu_csa.h>
0036 #include <asm/mmu_context.h>
0037
0038 #include "spufs.h"
0039
0040 #include "spu_save_dump.h"
0041 #include "spu_restore_dump.h"
0042
0043 #if 0
0044 #define POLL_WHILE_TRUE(_c) { \
0045 do { \
0046 } while (_c); \
0047 }
0048 #else
0049 #define RELAX_SPIN_COUNT 1000
0050 #define POLL_WHILE_TRUE(_c) { \
0051 do { \
0052 int _i; \
0053 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
0054 cpu_relax(); \
0055 } \
0056 if (unlikely(_c)) yield(); \
0057 else break; \
0058 } while (_c); \
0059 }
0060 #endif
0061
0062 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
0063
0064 static inline void acquire_spu_lock(struct spu *spu)
0065 {
0066
0067
0068
0069
0070
0071 }
0072
0073 static inline void release_spu_lock(struct spu *spu)
0074 {
0075
0076
0077
0078
0079 }
0080
0081 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
0082 {
0083 struct spu_problem __iomem *prob = spu->problem;
0084 u32 isolate_state;
0085
0086
0087
0088
0089
0090
0091
0092 isolate_state = SPU_STATUS_ISOLATED_STATE |
0093 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
0094 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
0095 }
0096
0097 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
0098 {
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 spin_lock_irq(&spu->register_lock);
0111 if (csa) {
0112 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
0113 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
0114 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
0115 }
0116 spu_int_mask_set(spu, 0, 0ul);
0117 spu_int_mask_set(spu, 1, 0ul);
0118 spu_int_mask_set(spu, 2, 0ul);
0119 eieio();
0120 spin_unlock_irq(&spu->register_lock);
0121
0122
0123
0124
0125
0126
0127 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
0128 clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
0129 synchronize_irq(spu->irqs[0]);
0130 synchronize_irq(spu->irqs[1]);
0131 synchronize_irq(spu->irqs[2]);
0132 }
0133
0134 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
0135 {
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 }
0146
0147 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
0148 {
0149
0150
0151
0152
0153
0154
0155
0156 }
0157
0158 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
0159 {
0160
0161
0162
0163
0164
0165 }
0166
0167 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
0168 {
0169 struct spu_priv2 __iomem *priv2 = spu->priv2;
0170
0171
0172
0173
0174 switch (in_be64(&priv2->mfc_control_RW) &
0175 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
0176 case MFC_CNTL_SUSPEND_IN_PROGRESS:
0177 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
0178 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
0179 MFC_CNTL_SUSPEND_COMPLETE);
0180 fallthrough;
0181 case MFC_CNTL_SUSPEND_COMPLETE:
0182 if (csa)
0183 csa->priv2.mfc_control_RW =
0184 in_be64(&priv2->mfc_control_RW) |
0185 MFC_CNTL_SUSPEND_DMA_QUEUE;
0186 break;
0187 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
0188 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
0189 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
0190 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
0191 MFC_CNTL_SUSPEND_COMPLETE);
0192 if (csa)
0193 csa->priv2.mfc_control_RW =
0194 in_be64(&priv2->mfc_control_RW) &
0195 ~MFC_CNTL_SUSPEND_DMA_QUEUE &
0196 ~MFC_CNTL_SUSPEND_MASK;
0197 break;
0198 }
0199 }
0200
0201 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
0202 {
0203 struct spu_problem __iomem *prob = spu->problem;
0204
0205
0206
0207
0208
0209 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
0210 }
0211
0212 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
0213 {
0214
0215
0216
0217 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
0218 }
0219
0220 static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
0221 {
0222 struct spu_problem __iomem *prob = spu->problem;
0223
0224
0225
0226
0227 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
0228 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
0229 } else {
0230 u32 stopped;
0231
0232 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
0233 eieio();
0234 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
0235 SPU_STATUS_RUNNING);
0236 stopped =
0237 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
0238 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
0239 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
0240 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
0241 else
0242 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
0243 }
0244 }
0245
0246 static inline void save_mfc_stopped_status(struct spu_state *csa,
0247 struct spu *spu)
0248 {
0249 struct spu_priv2 __iomem *priv2 = spu->priv2;
0250 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
0251 MFC_CNTL_DMA_QUEUES_EMPTY;
0252
0253
0254
0255
0256
0257
0258
0259 csa->priv2.mfc_control_RW &= ~mask;
0260 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
0261 }
0262
0263 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
0264 {
0265 struct spu_priv2 __iomem *priv2 = spu->priv2;
0266
0267
0268
0269
0270
0271 out_be64(&priv2->mfc_control_RW,
0272 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
0273 eieio();
0274 }
0275
0276 static inline void save_timebase(struct spu_state *csa, struct spu *spu)
0277 {
0278
0279
0280
0281
0282 csa->suspend_time = get_cycles();
0283 }
0284
0285 static inline void remove_other_spu_access(struct spu_state *csa,
0286 struct spu *spu)
0287 {
0288
0289
0290
0291
0292 }
0293
0294 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
0295 {
0296 struct spu_problem __iomem *prob = spu->problem;
0297
0298
0299
0300
0301
0302
0303 out_be64(&prob->spc_mssync_RW, 1UL);
0304 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
0305 }
0306
0307 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
0308 {
0309
0310
0311
0312
0313
0314
0315 spu_tlb_invalidate(spu);
0316 mb();
0317 }
0318
0319 static inline void handle_pending_interrupts(struct spu_state *csa,
0320 struct spu *spu)
0321 {
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 }
0332
0333 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
0334 {
0335 struct spu_priv2 __iomem *priv2 = spu->priv2;
0336 int i;
0337
0338
0339
0340
0341
0342 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
0343 for (i = 0; i < 8; i++) {
0344 csa->priv2.puq[i].mfc_cq_data0_RW =
0345 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
0346 csa->priv2.puq[i].mfc_cq_data1_RW =
0347 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
0348 csa->priv2.puq[i].mfc_cq_data2_RW =
0349 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
0350 csa->priv2.puq[i].mfc_cq_data3_RW =
0351 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
0352 }
0353 for (i = 0; i < 16; i++) {
0354 csa->priv2.spuq[i].mfc_cq_data0_RW =
0355 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
0356 csa->priv2.spuq[i].mfc_cq_data1_RW =
0357 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
0358 csa->priv2.spuq[i].mfc_cq_data2_RW =
0359 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
0360 csa->priv2.spuq[i].mfc_cq_data3_RW =
0361 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
0362 }
0363 }
0364 }
0365
0366 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
0367 {
0368 struct spu_problem __iomem *prob = spu->problem;
0369
0370
0371
0372
0373
0374 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
0375 }
0376
0377 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
0378 {
0379 struct spu_problem __iomem *prob = spu->problem;
0380
0381
0382
0383
0384
0385 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
0386 }
0387
0388 static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
0389 {
0390 struct spu_problem __iomem *prob = spu->problem;
0391
0392
0393
0394
0395
0396
0397
0398 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
0399 }
0400
0401 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
0402 {
0403 struct spu_priv2 __iomem *priv2 = spu->priv2;
0404
0405
0406
0407
0408
0409 csa->priv2.spu_tag_status_query_RW =
0410 in_be64(&priv2->spu_tag_status_query_RW);
0411 }
0412
0413 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
0414 {
0415 struct spu_priv2 __iomem *priv2 = spu->priv2;
0416
0417
0418
0419
0420
0421 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
0422 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
0423 }
0424
0425 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
0426 {
0427 struct spu_priv2 __iomem *priv2 = spu->priv2;
0428
0429
0430
0431
0432
0433 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
0434 }
0435
0436 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
0437 {
0438
0439
0440
0441
0442 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
0443 }
0444
0445 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
0446 {
0447
0448
0449
0450
0451
0452 spu_mfc_tclass_id_set(spu, 0x10000000);
0453 eieio();
0454 }
0455
0456 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
0457 {
0458 struct spu_priv2 __iomem *priv2 = spu->priv2;
0459
0460
0461
0462
0463
0464 out_be64(&priv2->mfc_control_RW,
0465 MFC_CNTL_PURGE_DMA_REQUEST |
0466 MFC_CNTL_SUSPEND_MASK);
0467 eieio();
0468 }
0469
0470 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
0471 {
0472 struct spu_priv2 __iomem *priv2 = spu->priv2;
0473
0474
0475
0476
0477
0478 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
0479 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
0480 MFC_CNTL_PURGE_DMA_COMPLETE);
0481 }
0482
0483 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
0484 {
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
0497 MFC_STATE1_RELOCATE_MASK |
0498 MFC_STATE1_BUS_TLBIE_MASK));
0499 }
0500
0501 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
0502 {
0503 struct spu_problem __iomem *prob = spu->problem;
0504
0505
0506
0507
0508 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
0509 }
0510
0511 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
0512 {
0513 struct spu_priv2 __iomem *priv2 = spu->priv2;
0514
0515
0516
0517
0518 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
0519 }
0520
0521 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
0522 {
0523 struct spu_priv2 __iomem *priv2 = spu->priv2;
0524
0525
0526
0527
0528
0529 out_be64(&priv2->spu_privcntl_RW, 0UL);
0530 eieio();
0531 }
0532
0533 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
0534 {
0535 struct spu_priv2 __iomem *priv2 = spu->priv2;
0536
0537
0538
0539
0540 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
0541 }
0542
0543 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
0544 {
0545 struct spu_priv2 __iomem *priv2 = spu->priv2;
0546
0547
0548
0549
0550
0551 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
0552 eieio();
0553 }
0554
0555 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
0556 {
0557 struct spu_priv2 __iomem *priv2 = spu->priv2;
0558
0559
0560
0561
0562 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
0563 }
0564
0565 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
0566 {
0567
0568
0569
0570
0571 }
0572
0573 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
0574 {
0575
0576
0577
0578
0579 csa->priv1.resource_allocation_groupID_RW =
0580 spu_resource_allocation_groupID_get(spu);
0581 csa->priv1.resource_allocation_enable_RW =
0582 spu_resource_allocation_enable_get(spu);
0583 }
0584
0585 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
0586 {
0587 struct spu_problem __iomem *prob = spu->problem;
0588
0589
0590
0591
0592 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
0593 }
0594
0595 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
0596 {
0597 struct spu_problem __iomem *prob = spu->problem;
0598
0599
0600
0601
0602 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
0603 }
0604
0605 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
0606 {
0607 struct spu_priv2 __iomem *priv2 = spu->priv2;
0608
0609
0610
0611
0612 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
0613 }
0614
0615 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
0616 {
0617 struct spu_priv2 __iomem *priv2 = spu->priv2;
0618 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
0619 int i;
0620
0621
0622
0623
0624
0625 out_be64(&priv2->spu_chnlcntptr_RW, 1);
0626 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
0627
0628
0629 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
0630 idx = ch_indices[i];
0631 out_be64(&priv2->spu_chnlcntptr_RW, idx);
0632 eieio();
0633 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
0634 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
0635 out_be64(&priv2->spu_chnldata_RW, 0UL);
0636 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
0637 eieio();
0638 }
0639 }
0640
0641 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
0642 {
0643 struct spu_priv2 __iomem *priv2 = spu->priv2;
0644 int i;
0645
0646
0647
0648
0649 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
0650 eieio();
0651 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
0652 for (i = 0; i < 4; i++) {
0653 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
0654 }
0655 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
0656 eieio();
0657 }
0658
0659 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
0660 {
0661 struct spu_priv2 __iomem *priv2 = spu->priv2;
0662
0663
0664
0665
0666 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
0667 eieio();
0668 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
0669 eieio();
0670 }
0671
0672 static inline void reset_ch(struct spu_state *csa, struct spu *spu)
0673 {
0674 struct spu_priv2 __iomem *priv2 = spu->priv2;
0675 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
0676 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
0677 u64 idx;
0678 int i;
0679
0680
0681
0682
0683 for (i = 0; i < 4; i++) {
0684 idx = ch_indices[i];
0685 out_be64(&priv2->spu_chnlcntptr_RW, idx);
0686 eieio();
0687 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
0688 eieio();
0689 }
0690 }
0691
0692 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
0693 {
0694 struct spu_priv2 __iomem *priv2 = spu->priv2;
0695
0696
0697
0698
0699
0700 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
0701 }
0702
0703 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
0704 unsigned int *code, int code_size)
0705 {
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720 spu_invalidate_slbs(spu);
0721 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
0722 }
0723
0724 static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
0725 {
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735 if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
0736 csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
0737 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
0738 mb();
0739 }
0740
0741 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
0742 {
0743 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
0744 CLASS1_ENABLE_STORAGE_FAULT_INTR;
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 spin_lock_irq(&spu->register_lock);
0755 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
0756 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
0757 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
0758 spu_int_mask_set(spu, 0, 0ul);
0759 spu_int_mask_set(spu, 1, class1_mask);
0760 spu_int_mask_set(spu, 2, 0ul);
0761 spin_unlock_irq(&spu->register_lock);
0762 }
0763
0764 static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
0765 unsigned int ls_offset, unsigned int size,
0766 unsigned int tag, unsigned int rclass,
0767 unsigned int cmd)
0768 {
0769 struct spu_problem __iomem *prob = spu->problem;
0770 union mfc_tag_size_class_cmd command;
0771 unsigned int transfer_size;
0772 volatile unsigned int status = 0x0;
0773
0774 while (size > 0) {
0775 transfer_size =
0776 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
0777 command.u.mfc_size = transfer_size;
0778 command.u.mfc_tag = tag;
0779 command.u.mfc_rclassid = rclass;
0780 command.u.mfc_cmd = cmd;
0781 do {
0782 out_be32(&prob->mfc_lsa_W, ls_offset);
0783 out_be64(&prob->mfc_ea_W, ea);
0784 out_be64(&prob->mfc_union_W.all64, command.all64);
0785 status =
0786 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
0787 if (unlikely(status & 0x2)) {
0788 cpu_relax();
0789 }
0790 } while (status & 0x3);
0791 size -= transfer_size;
0792 ea += transfer_size;
0793 ls_offset += transfer_size;
0794 }
0795 return 0;
0796 }
0797
0798 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
0799 {
0800 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
0801 unsigned int ls_offset = 0x0;
0802 unsigned int size = 16384;
0803 unsigned int tag = 0;
0804 unsigned int rclass = 0;
0805 unsigned int cmd = MFC_PUT_CMD;
0806
0807
0808
0809
0810
0811 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
0812 }
0813
0814 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
0815 {
0816 struct spu_problem __iomem *prob = spu->problem;
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827 out_be32(&prob->spu_npc_RW, 0);
0828 eieio();
0829 }
0830
0831 static inline void set_signot1(struct spu_state *csa, struct spu *spu)
0832 {
0833 struct spu_problem __iomem *prob = spu->problem;
0834 union {
0835 u64 ull;
0836 u32 ui[2];
0837 } addr64;
0838
0839
0840
0841
0842
0843
0844 addr64.ull = (u64) csa->lscsa;
0845 out_be32(&prob->signal_notify1, addr64.ui[0]);
0846 eieio();
0847 }
0848
0849 static inline void set_signot2(struct spu_state *csa, struct spu *spu)
0850 {
0851 struct spu_problem __iomem *prob = spu->problem;
0852 union {
0853 u64 ull;
0854 u32 ui[2];
0855 } addr64;
0856
0857
0858
0859
0860
0861
0862 addr64.ull = (u64) csa->lscsa;
0863 out_be32(&prob->signal_notify2, addr64.ui[1]);
0864 eieio();
0865 }
0866
0867 static inline void send_save_code(struct spu_state *csa, struct spu *spu)
0868 {
0869 unsigned long addr = (unsigned long)&spu_save_code[0];
0870 unsigned int ls_offset = 0x0;
0871 unsigned int size = sizeof(spu_save_code);
0872 unsigned int tag = 0;
0873 unsigned int rclass = 0;
0874 unsigned int cmd = MFC_GETFS_CMD;
0875
0876
0877
0878
0879
0880 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
0881 }
0882
0883 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
0884 {
0885 struct spu_problem __iomem *prob = spu->problem;
0886
0887
0888
0889
0890
0891
0892 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
0893 eieio();
0894 }
0895
0896 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
0897 {
0898 struct spu_problem __iomem *prob = spu->problem;
0899 u32 mask = MFC_TAGID_TO_TAGMASK(0);
0900 unsigned long flags;
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
0912
0913 local_irq_save(flags);
0914 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
0915 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
0916 local_irq_restore(flags);
0917 }
0918
0919 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
0920 {
0921 struct spu_problem __iomem *prob = spu->problem;
0922 unsigned long flags;
0923
0924
0925
0926
0927
0928
0929
0930 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
0931
0932 local_irq_save(flags);
0933 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
0934 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
0935 local_irq_restore(flags);
0936 }
0937
0938 static inline int check_save_status(struct spu_state *csa, struct spu *spu)
0939 {
0940 struct spu_problem __iomem *prob = spu->problem;
0941 u32 complete;
0942
0943
0944
0945
0946
0947
0948 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
0949 SPU_STATUS_STOPPED_BY_STOP);
0950 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
0951 }
0952
0953 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
0954 {
0955
0956
0957
0958
0959 }
0960
0961 static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
0962 struct spu *spu)
0963 {
0964 struct spu_priv2 __iomem *priv2 = spu->priv2;
0965
0966
0967
0968
0969
0970 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
0971 MFC_CNTL_DECREMENTER_HALTED);
0972 eieio();
0973 }
0974
0975 static inline void wait_suspend_mfc_complete(struct spu_state *csa,
0976 struct spu *spu)
0977 {
0978 struct spu_priv2 __iomem *priv2 = spu->priv2;
0979
0980
0981
0982
0983
0984 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
0985 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
0986 MFC_CNTL_SUSPEND_COMPLETE);
0987 }
0988
0989 static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
0990 {
0991 struct spu_problem __iomem *prob = spu->problem;
0992
0993
0994
0995
0996
0997
0998
0999
1000 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1001 if (in_be32(&prob->spu_status_R) &
1002 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1003 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1004 SPU_STATUS_RUNNING);
1005 }
1006 if ((in_be32(&prob->spu_status_R) &
1007 SPU_STATUS_ISOLATED_LOAD_STATUS)
1008 || (in_be32(&prob->spu_status_R) &
1009 SPU_STATUS_ISOLATED_STATE)) {
1010 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1011 eieio();
1012 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1013 SPU_STATUS_RUNNING);
1014 out_be32(&prob->spu_runcntl_RW, 0x2);
1015 eieio();
1016 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1017 SPU_STATUS_RUNNING);
1018 }
1019 if (in_be32(&prob->spu_status_R) &
1020 SPU_STATUS_WAITING_FOR_CHANNEL) {
1021 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1022 eieio();
1023 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1024 SPU_STATUS_RUNNING);
1025 }
1026 return 1;
1027 }
1028 return 0;
1029 }
1030
1031 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1032 {
1033 struct spu_problem __iomem *prob = spu->problem;
1034
1035
1036
1037
1038
1039 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1040 if (in_be32(&prob->spu_status_R) &
1041 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1042 spu_mfc_sr1_set(spu,
1043 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1044 eieio();
1045 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1046 eieio();
1047 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1048 SPU_STATUS_RUNNING);
1049 }
1050 if ((in_be32(&prob->spu_status_R) &
1051 SPU_STATUS_ISOLATED_LOAD_STATUS)
1052 || (in_be32(&prob->spu_status_R) &
1053 SPU_STATUS_ISOLATED_STATE)) {
1054 spu_mfc_sr1_set(spu,
1055 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1056 eieio();
1057 out_be32(&prob->spu_runcntl_RW, 0x2);
1058 eieio();
1059 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1060 SPU_STATUS_RUNNING);
1061 }
1062 }
1063 }
1064
1065 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1066 {
1067 struct spu_priv2 __iomem *priv2 = spu->priv2;
1068 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1069 u64 idx;
1070 int i;
1071
1072
1073
1074
1075
1076 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1077 out_be64(&priv2->spu_chnldata_RW, 0UL);
1078
1079
1080 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1081 idx = ch_indices[i];
1082 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1083 eieio();
1084 out_be64(&priv2->spu_chnldata_RW, 0UL);
1085 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1086 eieio();
1087 }
1088 }
1089
1090 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1091 {
1092 struct spu_priv2 __iomem *priv2 = spu->priv2;
1093 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1094 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1095 u64 idx;
1096 int i;
1097
1098
1099
1100
1101 for (i = 0; i < 5; i++) {
1102 idx = ch_indices[i];
1103 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1104 eieio();
1105 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1106 eieio();
1107 }
1108 }
1109
1110 static inline void setup_spu_status_part1(struct spu_state *csa,
1111 struct spu *spu)
1112 {
1113 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1114 u32 status_I = SPU_STATUS_INVALID_INSTR;
1115 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1116 u32 status_S = SPU_STATUS_SINGLE_STEP;
1117 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1118 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1119 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1120 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1121 u32 status_code;
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 status_code =
1136 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1137 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1138
1139
1140
1141
1142
1143 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1144 csa->lscsa->stopped_status.slot[1] = status_code;
1145
1146 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1147
1148
1149
1150
1151
1152 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1153 csa->lscsa->stopped_status.slot[1] = status_code;
1154
1155 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1156
1157
1158
1159
1160 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1161 csa->lscsa->stopped_status.slot[1] = status_code;
1162
1163 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1164
1165
1166
1167
1168 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1169 csa->lscsa->stopped_status.slot[1] = status_code;
1170
1171 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1172
1173
1174
1175
1176 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1177 csa->lscsa->stopped_status.slot[1] = status_code;
1178
1179 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1180
1181
1182
1183
1184 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1185
1186 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1187
1188
1189
1190 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1191
1192 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1193
1194
1195
1196
1197 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1198
1199 }
1200 }
1201
1202 static inline void setup_spu_status_part2(struct spu_state *csa,
1203 struct spu *spu)
1204 {
1205 u32 mask;
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217 mask = SPU_STATUS_INVALID_INSTR |
1218 SPU_STATUS_SINGLE_STEP |
1219 SPU_STATUS_STOPPED_BY_HALT |
1220 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1221 if (!(csa->prob.spu_status_R & mask)) {
1222 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1223 }
1224 }
1225
1226 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1227 {
1228
1229
1230
1231
1232 spu_resource_allocation_groupID_set(spu,
1233 csa->priv1.resource_allocation_groupID_RW);
1234 spu_resource_allocation_enable_set(spu,
1235 csa->priv1.resource_allocation_enable_RW);
1236 }
1237
1238 static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1239 {
1240 unsigned long addr = (unsigned long)&spu_restore_code[0];
1241 unsigned int ls_offset = 0x0;
1242 unsigned int size = sizeof(spu_restore_code);
1243 unsigned int tag = 0;
1244 unsigned int rclass = 0;
1245 unsigned int cmd = MFC_GETFS_CMD;
1246
1247
1248
1249
1250
1251 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1252 }
1253
1254 static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1255 {
1256
1257
1258
1259
1260
1261
1262
1263 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1264 cycles_t resume_time = get_cycles();
1265 cycles_t delta_time = resume_time - csa->suspend_time;
1266
1267 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1268 if (csa->lscsa->decr.slot[0] < delta_time) {
1269 csa->lscsa->decr_status.slot[0] |=
1270 SPU_DECR_STATUS_WRAPPED;
1271 }
1272
1273 csa->lscsa->decr.slot[0] -= delta_time;
1274 } else {
1275 csa->lscsa->decr_status.slot[0] = 0;
1276 }
1277 }
1278
1279 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1280 {
1281
1282
1283
1284 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1285 }
1286
1287 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1288 {
1289
1290
1291
1292 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1293 }
1294
1295 static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1296 {
1297 struct spu_problem __iomem *prob = spu->problem;
1298 u32 complete;
1299
1300
1301
1302
1303
1304
1305 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1306 SPU_STATUS_STOPPED_BY_STOP);
1307 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1308 }
1309
1310 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1311 {
1312 struct spu_priv2 __iomem *priv2 = spu->priv2;
1313
1314
1315
1316
1317 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1318 eieio();
1319 }
1320
1321 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1322 {
1323 struct spu_problem __iomem *prob = spu->problem;
1324 u32 mask;
1325
1326
1327
1328
1329
1330 mask = SPU_STATUS_INVALID_INSTR |
1331 SPU_STATUS_SINGLE_STEP |
1332 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1333 if (csa->prob.spu_status_R & mask) {
1334 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1335 eieio();
1336 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1337 SPU_STATUS_RUNNING);
1338 }
1339 }
1340
1341 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1342 {
1343 struct spu_problem __iomem *prob = spu->problem;
1344 u32 mask;
1345
1346
1347
1348
1349
1350
1351
1352 mask = SPU_STATUS_INVALID_INSTR |
1353 SPU_STATUS_SINGLE_STEP |
1354 SPU_STATUS_STOPPED_BY_HALT |
1355 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1356 if (!(csa->prob.spu_status_R & mask)) {
1357 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1358 eieio();
1359 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1360 SPU_STATUS_RUNNING);
1361 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1362 eieio();
1363 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1364 SPU_STATUS_RUNNING);
1365 }
1366 }
1367
1368 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1369 {
1370 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1371 unsigned int ls_offset = 0x0;
1372 unsigned int size = 16384;
1373 unsigned int tag = 0;
1374 unsigned int rclass = 0;
1375 unsigned int cmd = MFC_GET_CMD;
1376
1377
1378
1379
1380
1381 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1382 }
1383
1384 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1385 {
1386 struct spu_priv2 __iomem *priv2 = spu->priv2;
1387
1388
1389
1390
1391
1392 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1393 eieio();
1394 }
1395
1396 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1397 {
1398
1399
1400
1401
1402
1403
1404
1405
1406 spin_lock_irq(&spu->register_lock);
1407 spu_int_mask_set(spu, 0, 0ul);
1408 spu_int_mask_set(spu, 1, 0ul);
1409 spu_int_mask_set(spu, 2, 0ul);
1410 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1411 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1412 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1413 spin_unlock_irq(&spu->register_lock);
1414 }
1415
1416 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1417 {
1418 struct spu_priv2 __iomem *priv2 = spu->priv2;
1419 int i;
1420
1421
1422
1423
1424
1425 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1426 for (i = 0; i < 8; i++) {
1427 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1428 csa->priv2.puq[i].mfc_cq_data0_RW);
1429 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1430 csa->priv2.puq[i].mfc_cq_data1_RW);
1431 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1432 csa->priv2.puq[i].mfc_cq_data2_RW);
1433 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1434 csa->priv2.puq[i].mfc_cq_data3_RW);
1435 }
1436 for (i = 0; i < 16; i++) {
1437 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1438 csa->priv2.spuq[i].mfc_cq_data0_RW);
1439 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1440 csa->priv2.spuq[i].mfc_cq_data1_RW);
1441 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1442 csa->priv2.spuq[i].mfc_cq_data2_RW);
1443 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1444 csa->priv2.spuq[i].mfc_cq_data3_RW);
1445 }
1446 }
1447 eieio();
1448 }
1449
1450 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1451 {
1452 struct spu_problem __iomem *prob = spu->problem;
1453
1454
1455
1456
1457 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1458 eieio();
1459 }
1460
1461 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1462 {
1463 struct spu_problem __iomem *prob = spu->problem;
1464
1465
1466
1467
1468 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1469 eieio();
1470 }
1471
1472 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1473 {
1474 struct spu_priv2 __iomem *priv2 = spu->priv2;
1475
1476
1477
1478
1479 out_be64(&priv2->spu_tag_status_query_RW,
1480 csa->priv2.spu_tag_status_query_RW);
1481 eieio();
1482 }
1483
1484 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1485 {
1486 struct spu_priv2 __iomem *priv2 = spu->priv2;
1487
1488
1489
1490
1491
1492 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1493 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1494 eieio();
1495 }
1496
1497 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1498 {
1499 struct spu_priv2 __iomem *priv2 = spu->priv2;
1500
1501
1502
1503
1504 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1505 }
1506
1507 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1508 {
1509
1510
1511
1512 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1513 eieio();
1514 }
1515
1516 static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1517 {
1518 u64 ch0_cnt, ch0_data;
1519 u64 ch1_data;
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 ch0_cnt = csa->spu_chnlcnt_RW[0];
1530 ch0_data = csa->spu_chnldata_RW[0];
1531 ch1_data = csa->spu_chnldata_RW[1];
1532 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1533 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1534 (ch1_data & MFC_LLR_LOST_EVENT)) {
1535 csa->spu_chnlcnt_RW[0] = 1;
1536 }
1537 }
1538
1539 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1540 {
1541
1542
1543
1544
1545
1546 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1547 return;
1548
1549 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1550 (csa->spu_chnldata_RW[1] & 0x20) &&
1551 !(csa->spu_chnldata_RW[0] & 0x20))
1552 csa->spu_chnlcnt_RW[0] = 1;
1553
1554 csa->spu_chnldata_RW[0] |= 0x20;
1555 }
1556
1557 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1558 {
1559 struct spu_priv2 __iomem *priv2 = spu->priv2;
1560 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1561 int i;
1562
1563
1564
1565
1566 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1567 idx = ch_indices[i];
1568 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1569 eieio();
1570 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1571 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1572 eieio();
1573 }
1574 }
1575
1576 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1577 {
1578 struct spu_priv2 __iomem *priv2 = spu->priv2;
1579 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1580 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1581 u64 idx;
1582 int i;
1583
1584
1585
1586
1587 ch_counts[0] = 1UL;
1588 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1589 ch_counts[2] = 1UL;
1590 for (i = 0; i < 3; i++) {
1591 idx = ch_indices[i];
1592 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1593 eieio();
1594 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1595 eieio();
1596 }
1597 }
1598
1599 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1600 {
1601 struct spu_priv2 __iomem *priv2 = spu->priv2;
1602
1603
1604
1605
1606 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1607 eieio();
1608 }
1609
1610 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1611 {
1612 struct spu_priv2 __iomem *priv2 = spu->priv2;
1613
1614
1615
1616
1617 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1618 eieio();
1619 }
1620
1621 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1622 {
1623
1624
1625
1626
1627 }
1628
1629 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1630 {
1631 struct spu_problem __iomem *prob = spu->problem;
1632
1633
1634
1635
1636 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1637 eieio();
1638 }
1639
1640 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1641 {
1642 struct spu_priv2 __iomem *priv2 = spu->priv2;
1643 int i;
1644
1645
1646
1647
1648 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1649 eieio();
1650 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1651 for (i = 0; i < 4; i++) {
1652 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1653 }
1654 eieio();
1655 }
1656
1657 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1658 {
1659 struct spu_problem __iomem *prob = spu->problem;
1660
1661
1662
1663
1664
1665 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1666 in_be32(&prob->pu_mb_R);
1667 eieio();
1668 }
1669 }
1670
1671 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1672 {
1673 struct spu_priv2 __iomem *priv2 = spu->priv2;
1674
1675
1676
1677
1678
1679 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1680 in_be64(&priv2->puint_mb_R);
1681 eieio();
1682 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1683 eieio();
1684 }
1685 }
1686
1687 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1688 {
1689
1690
1691
1692 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1693 eieio();
1694 }
1695
1696 static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1697 {
1698 struct spu_context *ctx = spu->ctx;
1699
1700 spu_cpu_affinity_set(spu, ctx->last_ran);
1701 }
1702
1703 static inline void restore_other_spu_access(struct spu_state *csa,
1704 struct spu *spu)
1705 {
1706
1707
1708
1709 }
1710
1711 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1712 {
1713 struct spu_problem __iomem *prob = spu->problem;
1714
1715
1716
1717
1718
1719 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1720 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1721 eieio();
1722 }
1723 }
1724
1725 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1726 {
1727 struct spu_priv2 __iomem *priv2 = spu->priv2;
1728
1729
1730
1731
1732 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1733 eieio();
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743 }
1744
1745 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1746 {
1747
1748
1749
1750
1751
1752
1753 }
1754
1755 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1756 {
1757
1758
1759
1760
1761 }
1762
1763 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1764 {
1765
1766
1767
1768 spin_lock_irq(&spu->register_lock);
1769 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1770 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1771 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1772 spin_unlock_irq(&spu->register_lock);
1773 }
1774
1775 static int quiece_spu(struct spu_state *prev, struct spu *spu)
1776 {
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 if (check_spu_isolate(prev, spu)) {
1788 return 2;
1789 }
1790 disable_interrupts(prev, spu);
1791 set_watchdog_timer(prev, spu);
1792 inhibit_user_access(prev, spu);
1793 if (check_spu_isolate(prev, spu)) {
1794 return 6;
1795 }
1796 set_switch_pending(prev, spu);
1797 save_mfc_cntl(prev, spu);
1798 save_spu_runcntl(prev, spu);
1799 save_mfc_sr1(prev, spu);
1800 save_spu_status(prev, spu);
1801 save_mfc_stopped_status(prev, spu);
1802 halt_mfc_decr(prev, spu);
1803 save_timebase(prev, spu);
1804 remove_other_spu_access(prev, spu);
1805 do_mfc_mssync(prev, spu);
1806 issue_mfc_tlbie(prev, spu);
1807 handle_pending_interrupts(prev, spu);
1808
1809 return 0;
1810 }
1811
1812 static void save_csa(struct spu_state *prev, struct spu *spu)
1813 {
1814
1815
1816
1817
1818
1819 save_mfc_queues(prev, spu);
1820 save_ppu_querymask(prev, spu);
1821 save_ppu_querytype(prev, spu);
1822 save_ppu_tagstatus(prev, spu);
1823 save_mfc_csr_tsq(prev, spu);
1824 save_mfc_csr_cmd(prev, spu);
1825 save_mfc_csr_ato(prev, spu);
1826 save_mfc_tclass_id(prev, spu);
1827 set_mfc_tclass_id(prev, spu);
1828 save_mfc_cmd(prev, spu);
1829 purge_mfc_queue(prev, spu);
1830 wait_purge_complete(prev, spu);
1831 setup_mfc_sr1(prev, spu);
1832 save_spu_npc(prev, spu);
1833 save_spu_privcntl(prev, spu);
1834 reset_spu_privcntl(prev, spu);
1835 save_spu_lslr(prev, spu);
1836 reset_spu_lslr(prev, spu);
1837 save_spu_cfg(prev, spu);
1838 save_pm_trace(prev, spu);
1839 save_mfc_rag(prev, spu);
1840 save_ppu_mb_stat(prev, spu);
1841 save_ppu_mb(prev, spu);
1842 save_ppuint_mb(prev, spu);
1843 save_ch_part1(prev, spu);
1844 save_spu_mb(prev, spu);
1845 reset_ch(prev, spu);
1846 }
1847
1848 static void save_lscsa(struct spu_state *prev, struct spu *spu)
1849 {
1850
1851
1852
1853
1854
1855
1856 resume_mfc_queue(prev, spu);
1857
1858 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1859 set_switch_active(prev, spu);
1860 enable_interrupts(prev, spu);
1861 save_ls_16kb(prev, spu);
1862 set_spu_npc(prev, spu);
1863 set_signot1(prev, spu);
1864 set_signot2(prev, spu);
1865 send_save_code(prev, spu);
1866 set_ppu_querymask(prev, spu);
1867 wait_tag_complete(prev, spu);
1868 wait_spu_stopped(prev, spu);
1869 }
1870
1871 static void force_spu_isolate_exit(struct spu *spu)
1872 {
1873 struct spu_problem __iomem *prob = spu->problem;
1874 struct spu_priv2 __iomem *priv2 = spu->priv2;
1875
1876
1877 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1878 iobarrier_rw();
1879 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1880
1881
1882 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1883 iobarrier_w();
1884
1885
1886 out_be64(&priv2->spu_privcntl_RW, 4LL);
1887 iobarrier_w();
1888 out_be32(&prob->spu_runcntl_RW, 2);
1889 iobarrier_rw();
1890 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1891 & SPU_STATUS_STOPPED_BY_STOP));
1892
1893
1894 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1895 iobarrier_w();
1896 }
1897
1898
1899
1900
1901
1902
1903 static void stop_spu_isolate(struct spu *spu)
1904 {
1905 struct spu_problem __iomem *prob = spu->problem;
1906
1907 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1908
1909
1910
1911
1912 force_spu_isolate_exit(spu);
1913 }
1914 }
1915
1916 static void harvest(struct spu_state *prev, struct spu *spu)
1917 {
1918
1919
1920
1921
1922
1923
1924 disable_interrupts(prev, spu);
1925 inhibit_user_access(prev, spu);
1926 terminate_spu_app(prev, spu);
1927 set_switch_pending(prev, spu);
1928 stop_spu_isolate(spu);
1929 remove_other_spu_access(prev, spu);
1930 suspend_mfc_and_halt_decr(prev, spu);
1931 wait_suspend_mfc_complete(prev, spu);
1932 if (!suspend_spe(prev, spu))
1933 clear_spu_status(prev, spu);
1934 do_mfc_mssync(prev, spu);
1935 issue_mfc_tlbie(prev, spu);
1936 handle_pending_interrupts(prev, spu);
1937 purge_mfc_queue(prev, spu);
1938 wait_purge_complete(prev, spu);
1939 reset_spu_privcntl(prev, spu);
1940 reset_spu_lslr(prev, spu);
1941 setup_mfc_sr1(prev, spu);
1942 spu_invalidate_slbs(spu);
1943 reset_ch_part1(prev, spu);
1944 reset_ch_part2(prev, spu);
1945 enable_interrupts(prev, spu);
1946 set_switch_active(prev, spu);
1947 set_mfc_tclass_id(prev, spu);
1948 resume_mfc_queue(prev, spu);
1949 }
1950
1951 static void restore_lscsa(struct spu_state *next, struct spu *spu)
1952 {
1953
1954
1955
1956
1957
1958
1959 set_watchdog_timer(next, spu);
1960 setup_spu_status_part1(next, spu);
1961 setup_spu_status_part2(next, spu);
1962 restore_mfc_rag(next, spu);
1963
1964 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1965 set_spu_npc(next, spu);
1966 set_signot1(next, spu);
1967 set_signot2(next, spu);
1968 setup_decr(next, spu);
1969 setup_ppu_mb(next, spu);
1970 setup_ppuint_mb(next, spu);
1971 send_restore_code(next, spu);
1972 set_ppu_querymask(next, spu);
1973 wait_tag_complete(next, spu);
1974 wait_spu_stopped(next, spu);
1975 }
1976
1977 static void restore_csa(struct spu_state *next, struct spu *spu)
1978 {
1979
1980
1981
1982
1983
1984 restore_spu_privcntl(next, spu);
1985 restore_status_part1(next, spu);
1986 restore_status_part2(next, spu);
1987 restore_ls_16kb(next, spu);
1988 wait_tag_complete(next, spu);
1989 suspend_mfc(next, spu);
1990 wait_suspend_mfc_complete(next, spu);
1991 issue_mfc_tlbie(next, spu);
1992 clear_interrupts(next, spu);
1993 restore_mfc_queues(next, spu);
1994 restore_ppu_querymask(next, spu);
1995 restore_ppu_querytype(next, spu);
1996 restore_mfc_csr_tsq(next, spu);
1997 restore_mfc_csr_cmd(next, spu);
1998 restore_mfc_csr_ato(next, spu);
1999 restore_mfc_tclass_id(next, spu);
2000 set_llr_event(next, spu);
2001 restore_decr_wrapped(next, spu);
2002 restore_ch_part1(next, spu);
2003 restore_ch_part2(next, spu);
2004 restore_spu_lslr(next, spu);
2005 restore_spu_cfg(next, spu);
2006 restore_pm_trace(next, spu);
2007 restore_spu_npc(next, spu);
2008 restore_spu_mb(next, spu);
2009 check_ppu_mb_stat(next, spu);
2010 check_ppuint_mb_stat(next, spu);
2011 spu_invalidate_slbs(spu);
2012 restore_mfc_sr1(next, spu);
2013 set_int_route(next, spu);
2014 restore_other_spu_access(next, spu);
2015 restore_spu_runcntl(next, spu);
2016 restore_mfc_cntl(next, spu);
2017 enable_user_access(next, spu);
2018 reset_switch_active(next, spu);
2019 reenable_interrupts(next, spu);
2020 }
2021
2022 static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2023 {
2024 int rc;
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038 rc = quiece_spu(prev, spu);
2039 switch (rc) {
2040 default:
2041 case 2:
2042 case 6:
2043 harvest(prev, spu);
2044 return rc;
2045 break;
2046 case 0:
2047 break;
2048 }
2049 save_csa(prev, spu);
2050 save_lscsa(prev, spu);
2051 return check_save_status(prev, spu);
2052 }
2053
2054 static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2055 {
2056 int rc;
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 restore_lscsa(next, spu);
2070 rc = check_restore_status(next, spu);
2071 switch (rc) {
2072 default:
2073
2074 return rc;
2075 break;
2076 case 0:
2077
2078 break;
2079 }
2080 restore_csa(next, spu);
2081
2082 return 0;
2083 }
2084
2085
2086
2087
2088
2089
2090
2091
2092 int spu_save(struct spu_state *prev, struct spu *spu)
2093 {
2094 int rc;
2095
2096 acquire_spu_lock(spu);
2097 rc = __do_spu_save(prev, spu);
2098 release_spu_lock(spu);
2099 if (rc != 0 && rc != 2 && rc != 6) {
2100 panic("%s failed on SPU[%d], rc=%d.\n",
2101 __func__, spu->number, rc);
2102 }
2103 return 0;
2104 }
2105 EXPORT_SYMBOL_GPL(spu_save);
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116 int spu_restore(struct spu_state *new, struct spu *spu)
2117 {
2118 int rc;
2119
2120 acquire_spu_lock(spu);
2121 harvest(NULL, spu);
2122 spu->slb_replace = 0;
2123 rc = __do_spu_restore(new, spu);
2124 release_spu_lock(spu);
2125 if (rc) {
2126 panic("%s failed on SPU[%d] rc=%d.\n",
2127 __func__, spu->number, rc);
2128 }
2129 return rc;
2130 }
2131 EXPORT_SYMBOL_GPL(spu_restore);
2132
2133 static void init_prob(struct spu_state *csa)
2134 {
2135 csa->spu_chnlcnt_RW[9] = 1;
2136 csa->spu_chnlcnt_RW[21] = 16;
2137 csa->spu_chnlcnt_RW[23] = 1;
2138 csa->spu_chnlcnt_RW[28] = 1;
2139 csa->spu_chnlcnt_RW[30] = 1;
2140 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2141 csa->prob.mb_stat_R = 0x000400;
2142 }
2143
2144 static void init_priv1(struct spu_state *csa)
2145 {
2146
2147 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2148 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2149 MFC_STATE1_PROBLEM_STATE_MASK |
2150 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2151
2152
2153 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2154 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2155 CLASS0_ENABLE_SPU_ERROR_INTR;
2156 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2157 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2158 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2159 CLASS2_ENABLE_SPU_HALT_INTR |
2160 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2161 }
2162
2163 static void init_priv2(struct spu_state *csa)
2164 {
2165 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2166 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2167 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2168 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2169 }
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182 int spu_init_csa(struct spu_state *csa)
2183 {
2184 int rc;
2185
2186 if (!csa)
2187 return -EINVAL;
2188 memset(csa, 0, sizeof(struct spu_state));
2189
2190 rc = spu_alloc_lscsa(csa);
2191 if (rc)
2192 return rc;
2193
2194 spin_lock_init(&csa->register_lock);
2195
2196 init_prob(csa);
2197 init_priv1(csa);
2198 init_priv2(csa);
2199
2200 return 0;
2201 }
2202
2203 void spu_fini_csa(struct spu_state *csa)
2204 {
2205 spu_free_lscsa(csa);
2206 }