Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 #ifndef __ASM_ARC_SMP_H
0007 #define __ASM_ARC_SMP_H
0008 
0009 #ifdef CONFIG_SMP
0010 
0011 #include <linux/types.h>
0012 #include <linux/init.h>
0013 #include <linux/threads.h>
0014 
0015 #define raw_smp_processor_id() (current_thread_info()->cpu)
0016 
0017 /* including cpumask.h leads to cyclic deps hence this Forward declaration */
0018 struct cpumask;
0019 
0020 /*
0021  * APIs provided by arch SMP code to generic code
0022  */
0023 extern void arch_send_call_function_single_ipi(int cpu);
0024 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
0025 
0026 /*
0027  * APIs provided by arch SMP code to rest of arch code
0028  */
0029 extern void __init smp_init_cpus(void);
0030 extern void first_lines_of_secondary(void);
0031 extern const char *arc_platform_smp_cpuinfo(void);
0032 
0033 /*
0034  * API expected BY platform smp code (FROM arch smp code)
0035  *
0036  * smp_ipi_irq_setup:
0037  *  Takes @cpu and @hwirq to which the arch-common ISR is hooked up
0038  */
0039 extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
0040 
0041 /*
0042  * struct plat_smp_ops  - SMP callbacks provided by platform to ARC SMP
0043  *
0044  * @info:       SoC SMP specific info for /proc/cpuinfo etc
0045  * @init_early_smp: A SMP specific h/w block can init itself
0046  *          Could be common across platforms so not covered by
0047  *          mach_desc->init_early()
0048  * @init_per_cpu:   Called for each core so SMP h/w block driver can do
0049  *          any needed setup per cpu (e.g. IPI request)
0050  * @cpu_kick:       For Master to kickstart a cpu (optionally at a PC)
0051  * @ipi_send:       To send IPI to a @cpu
0052  * @ips_clear:      To clear IPI received at @irq
0053  */
0054 struct plat_smp_ops {
0055     const char  *info;
0056     void        (*init_early_smp)(void);
0057     void        (*init_per_cpu)(int cpu);
0058     void        (*cpu_kick)(int cpu, unsigned long pc);
0059     void        (*ipi_send)(int cpu);
0060     void        (*ipi_clear)(int irq);
0061 };
0062 
0063 /* TBD: stop exporting it for direct population by platform */
0064 extern struct plat_smp_ops  plat_smp_ops;
0065 
0066 #else /* CONFIG_SMP */
0067 
0068 static inline void smp_init_cpus(void) {}
0069 static inline const char *arc_platform_smp_cpuinfo(void)
0070 {
0071     return "";
0072 }
0073 
0074 #endif  /* !CONFIG_SMP */
0075 
0076 /*
0077  * ARC700 doesn't support atomic Read-Modify-Write ops.
0078  * Originally Interrupts had to be disabled around code to gaurantee atomicity.
0079  * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
0080  * based on retry-if-irq-in-atomic (with hardware assist).
0081  * However despite these, we provide the IRQ disabling variant
0082  *
0083  * (1) These insn were introduced only in 4.10 release. So for older released
0084  *  support needed.
0085  *
0086  * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
0087  *  gaurantted by the platform (not something which core handles).
0088  *  Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
0089  *  disabling for atomicity.
0090  *
0091  *  However exported spinlock API is not usable due to cyclic hdr deps
0092  *  (even after system.h disintegration upstream)
0093  *  asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
0094  *      -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
0095  *
0096  *  So the workaround is to use the lowest level arch spinlock API.
0097  *  The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
0098  *  but same is not true for ARCH backend, hence the need for 2 variants
0099  */
0100 #ifndef CONFIG_ARC_HAS_LLSC
0101 
0102 #include <linux/irqflags.h>
0103 #ifdef CONFIG_SMP
0104 
0105 #include <asm/spinlock.h>
0106 
0107 extern arch_spinlock_t smp_atomic_ops_lock;
0108 
0109 #define atomic_ops_lock(flags)  do {        \
0110     local_irq_save(flags);          \
0111     arch_spin_lock(&smp_atomic_ops_lock);   \
0112 } while (0)
0113 
0114 #define atomic_ops_unlock(flags) do {       \
0115     arch_spin_unlock(&smp_atomic_ops_lock); \
0116     local_irq_restore(flags);       \
0117 } while (0)
0118 
0119 #else /* !CONFIG_SMP */
0120 
0121 #define atomic_ops_lock(flags)      local_irq_save(flags)
0122 #define atomic_ops_unlock(flags)    local_irq_restore(flags)
0123 
0124 #endif /* !CONFIG_SMP */
0125 
0126 #endif  /* !CONFIG_ARC_HAS_LLSC */
0127 
0128 #endif