0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/export.h>
0014 #include <linux/smp.h>
0015
0016
0017 #if defined(CONFIG_PPC_SPLPAR)
0018 #include <asm/hvcall.h>
0019 #include <asm/smp.h>
0020
0021 void splpar_spin_yield(arch_spinlock_t *lock)
0022 {
0023 unsigned int lock_value, holder_cpu, yield_count;
0024
0025 lock_value = lock->slock;
0026 if (lock_value == 0)
0027 return;
0028 holder_cpu = lock_value & 0xffff;
0029 BUG_ON(holder_cpu >= NR_CPUS);
0030
0031 yield_count = yield_count_of(holder_cpu);
0032 if ((yield_count & 1) == 0)
0033 return;
0034 rmb();
0035 if (lock->slock != lock_value)
0036 return;
0037 yield_to_preempted(holder_cpu, yield_count);
0038 }
0039 EXPORT_SYMBOL_GPL(splpar_spin_yield);
0040
0041
0042
0043
0044
0045
0046 void splpar_rw_yield(arch_rwlock_t *rw)
0047 {
0048 int lock_value;
0049 unsigned int holder_cpu, yield_count;
0050
0051 lock_value = rw->lock;
0052 if (lock_value >= 0)
0053 return;
0054 holder_cpu = lock_value & 0xffff;
0055 BUG_ON(holder_cpu >= NR_CPUS);
0056
0057 yield_count = yield_count_of(holder_cpu);
0058 if ((yield_count & 1) == 0)
0059 return;
0060 rmb();
0061 if (rw->lock != lock_value)
0062 return;
0063 yield_to_preempted(holder_cpu, yield_count);
0064 }
0065 #endif