Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * This file contains functions which manage clock event devices.
0004  *
0005  * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
0006  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
0007  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
0008  */
0009 
0010 #include <linux/clockchips.h>
0011 #include <linux/hrtimer.h>
0012 #include <linux/init.h>
0013 #include <linux/module.h>
0014 #include <linux/smp.h>
0015 #include <linux/device.h>
0016 
0017 #include "tick-internal.h"
0018 
0019 /* The registered clock event devices */
0020 static LIST_HEAD(clockevent_devices);
0021 static LIST_HEAD(clockevents_released);
0022 /* Protection for the above */
0023 static DEFINE_RAW_SPINLOCK(clockevents_lock);
0024 /* Protection for unbind operations */
0025 static DEFINE_MUTEX(clockevents_mutex);
0026 
0027 struct ce_unbind {
0028     struct clock_event_device *ce;
0029     int res;
0030 };
0031 
0032 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
0033             bool ismax)
0034 {
0035     u64 clc = (u64) latch << evt->shift;
0036     u64 rnd;
0037 
0038     if (WARN_ON(!evt->mult))
0039         evt->mult = 1;
0040     rnd = (u64) evt->mult - 1;
0041 
0042     /*
0043      * Upper bound sanity check. If the backwards conversion is
0044      * not equal latch, we know that the above shift overflowed.
0045      */
0046     if ((clc >> evt->shift) != (u64)latch)
0047         clc = ~0ULL;
0048 
0049     /*
0050      * Scaled math oddities:
0051      *
0052      * For mult <= (1 << shift) we can safely add mult - 1 to
0053      * prevent integer rounding loss. So the backwards conversion
0054      * from nsec to device ticks will be correct.
0055      *
0056      * For mult > (1 << shift), i.e. device frequency is > 1GHz we
0057      * need to be careful. Adding mult - 1 will result in a value
0058      * which when converted back to device ticks can be larger
0059      * than latch by up to (mult - 1) >> shift. For the min_delta
0060      * calculation we still want to apply this in order to stay
0061      * above the minimum device ticks limit. For the upper limit
0062      * we would end up with a latch value larger than the upper
0063      * limit of the device, so we omit the add to stay below the
0064      * device upper boundary.
0065      *
0066      * Also omit the add if it would overflow the u64 boundary.
0067      */
0068     if ((~0ULL - clc > rnd) &&
0069         (!ismax || evt->mult <= (1ULL << evt->shift)))
0070         clc += rnd;
0071 
0072     do_div(clc, evt->mult);
0073 
0074     /* Deltas less than 1usec are pointless noise */
0075     return clc > 1000 ? clc : 1000;
0076 }
0077 
0078 /**
0079  * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
0080  * @latch:  value to convert
0081  * @evt:    pointer to clock event device descriptor
0082  *
0083  * Math helper, returns latch value converted to nanoseconds (bound checked)
0084  */
0085 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
0086 {
0087     return cev_delta2ns(latch, evt, false);
0088 }
0089 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
0090 
0091 static int __clockevents_switch_state(struct clock_event_device *dev,
0092                       enum clock_event_state state)
0093 {
0094     if (dev->features & CLOCK_EVT_FEAT_DUMMY)
0095         return 0;
0096 
0097     /* Transition with new state-specific callbacks */
0098     switch (state) {
0099     case CLOCK_EVT_STATE_DETACHED:
0100         /* The clockevent device is getting replaced. Shut it down. */
0101 
0102     case CLOCK_EVT_STATE_SHUTDOWN:
0103         if (dev->set_state_shutdown)
0104             return dev->set_state_shutdown(dev);
0105         return 0;
0106 
0107     case CLOCK_EVT_STATE_PERIODIC:
0108         /* Core internal bug */
0109         if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
0110             return -ENOSYS;
0111         if (dev->set_state_periodic)
0112             return dev->set_state_periodic(dev);
0113         return 0;
0114 
0115     case CLOCK_EVT_STATE_ONESHOT:
0116         /* Core internal bug */
0117         if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
0118             return -ENOSYS;
0119         if (dev->set_state_oneshot)
0120             return dev->set_state_oneshot(dev);
0121         return 0;
0122 
0123     case CLOCK_EVT_STATE_ONESHOT_STOPPED:
0124         /* Core internal bug */
0125         if (WARN_ONCE(!clockevent_state_oneshot(dev),
0126                   "Current state: %d\n",
0127                   clockevent_get_state(dev)))
0128             return -EINVAL;
0129 
0130         if (dev->set_state_oneshot_stopped)
0131             return dev->set_state_oneshot_stopped(dev);
0132         else
0133             return -ENOSYS;
0134 
0135     default:
0136         return -ENOSYS;
0137     }
0138 }
0139 
0140 /**
0141  * clockevents_switch_state - set the operating state of a clock event device
0142  * @dev:    device to modify
0143  * @state:  new state
0144  *
0145  * Must be called with interrupts disabled !
0146  */
0147 void clockevents_switch_state(struct clock_event_device *dev,
0148                   enum clock_event_state state)
0149 {
0150     if (clockevent_get_state(dev) != state) {
0151         if (__clockevents_switch_state(dev, state))
0152             return;
0153 
0154         clockevent_set_state(dev, state);
0155 
0156         /*
0157          * A nsec2cyc multiplicator of 0 is invalid and we'd crash
0158          * on it, so fix it up and emit a warning:
0159          */
0160         if (clockevent_state_oneshot(dev)) {
0161             if (WARN_ON(!dev->mult))
0162                 dev->mult = 1;
0163         }
0164     }
0165 }
0166 
0167 /**
0168  * clockevents_shutdown - shutdown the device and clear next_event
0169  * @dev:    device to shutdown
0170  */
0171 void clockevents_shutdown(struct clock_event_device *dev)
0172 {
0173     clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
0174     dev->next_event = KTIME_MAX;
0175 }
0176 
0177 /**
0178  * clockevents_tick_resume -    Resume the tick device before using it again
0179  * @dev:            device to resume
0180  */
0181 int clockevents_tick_resume(struct clock_event_device *dev)
0182 {
0183     int ret = 0;
0184 
0185     if (dev->tick_resume)
0186         ret = dev->tick_resume(dev);
0187 
0188     return ret;
0189 }
0190 
0191 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
0192 
0193 /* Limit min_delta to a jiffie */
0194 #define MIN_DELTA_LIMIT     (NSEC_PER_SEC / HZ)
0195 
0196 /**
0197  * clockevents_increase_min_delta - raise minimum delta of a clock event device
0198  * @dev:       device to increase the minimum delta
0199  *
0200  * Returns 0 on success, -ETIME when the minimum delta reached the limit.
0201  */
0202 static int clockevents_increase_min_delta(struct clock_event_device *dev)
0203 {
0204     /* Nothing to do if we already reached the limit */
0205     if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
0206         printk_deferred(KERN_WARNING
0207                 "CE: Reprogramming failure. Giving up\n");
0208         dev->next_event = KTIME_MAX;
0209         return -ETIME;
0210     }
0211 
0212     if (dev->min_delta_ns < 5000)
0213         dev->min_delta_ns = 5000;
0214     else
0215         dev->min_delta_ns += dev->min_delta_ns >> 1;
0216 
0217     if (dev->min_delta_ns > MIN_DELTA_LIMIT)
0218         dev->min_delta_ns = MIN_DELTA_LIMIT;
0219 
0220     printk_deferred(KERN_WARNING
0221             "CE: %s increased min_delta_ns to %llu nsec\n",
0222             dev->name ? dev->name : "?",
0223             (unsigned long long) dev->min_delta_ns);
0224     return 0;
0225 }
0226 
0227 /**
0228  * clockevents_program_min_delta - Set clock event device to the minimum delay.
0229  * @dev:    device to program
0230  *
0231  * Returns 0 on success, -ETIME when the retry loop failed.
0232  */
0233 static int clockevents_program_min_delta(struct clock_event_device *dev)
0234 {
0235     unsigned long long clc;
0236     int64_t delta;
0237     int i;
0238 
0239     for (i = 0;;) {
0240         delta = dev->min_delta_ns;
0241         dev->next_event = ktime_add_ns(ktime_get(), delta);
0242 
0243         if (clockevent_state_shutdown(dev))
0244             return 0;
0245 
0246         dev->retries++;
0247         clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
0248         if (dev->set_next_event((unsigned long) clc, dev) == 0)
0249             return 0;
0250 
0251         if (++i > 2) {
0252             /*
0253              * We tried 3 times to program the device with the
0254              * given min_delta_ns. Try to increase the minimum
0255              * delta, if that fails as well get out of here.
0256              */
0257             if (clockevents_increase_min_delta(dev))
0258                 return -ETIME;
0259             i = 0;
0260         }
0261     }
0262 }
0263 
0264 #else  /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
0265 
0266 /**
0267  * clockevents_program_min_delta - Set clock event device to the minimum delay.
0268  * @dev:    device to program
0269  *
0270  * Returns 0 on success, -ETIME when the retry loop failed.
0271  */
0272 static int clockevents_program_min_delta(struct clock_event_device *dev)
0273 {
0274     unsigned long long clc;
0275     int64_t delta = 0;
0276     int i;
0277 
0278     for (i = 0; i < 10; i++) {
0279         delta += dev->min_delta_ns;
0280         dev->next_event = ktime_add_ns(ktime_get(), delta);
0281 
0282         if (clockevent_state_shutdown(dev))
0283             return 0;
0284 
0285         dev->retries++;
0286         clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
0287         if (dev->set_next_event((unsigned long) clc, dev) == 0)
0288             return 0;
0289     }
0290     return -ETIME;
0291 }
0292 
0293 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
0294 
0295 /**
0296  * clockevents_program_event - Reprogram the clock event device.
0297  * @dev:    device to program
0298  * @expires:    absolute expiry time (monotonic clock)
0299  * @force:  program minimum delay if expires can not be set
0300  *
0301  * Returns 0 on success, -ETIME when the event is in the past.
0302  */
0303 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
0304                   bool force)
0305 {
0306     unsigned long long clc;
0307     int64_t delta;
0308     int rc;
0309 
0310     if (WARN_ON_ONCE(expires < 0))
0311         return -ETIME;
0312 
0313     dev->next_event = expires;
0314 
0315     if (clockevent_state_shutdown(dev))
0316         return 0;
0317 
0318     /* We must be in ONESHOT state here */
0319     WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
0320           clockevent_get_state(dev));
0321 
0322     /* Shortcut for clockevent devices that can deal with ktime. */
0323     if (dev->features & CLOCK_EVT_FEAT_KTIME)
0324         return dev->set_next_ktime(expires, dev);
0325 
0326     delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
0327     if (delta <= 0)
0328         return force ? clockevents_program_min_delta(dev) : -ETIME;
0329 
0330     delta = min(delta, (int64_t) dev->max_delta_ns);
0331     delta = max(delta, (int64_t) dev->min_delta_ns);
0332 
0333     clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
0334     rc = dev->set_next_event((unsigned long) clc, dev);
0335 
0336     return (rc && force) ? clockevents_program_min_delta(dev) : rc;
0337 }
0338 
0339 /*
0340  * Called after a notify add to make devices available which were
0341  * released from the notifier call.
0342  */
0343 static void clockevents_notify_released(void)
0344 {
0345     struct clock_event_device *dev;
0346 
0347     while (!list_empty(&clockevents_released)) {
0348         dev = list_entry(clockevents_released.next,
0349                  struct clock_event_device, list);
0350         list_move(&dev->list, &clockevent_devices);
0351         tick_check_new_device(dev);
0352     }
0353 }
0354 
0355 /*
0356  * Try to install a replacement clock event device
0357  */
0358 static int clockevents_replace(struct clock_event_device *ced)
0359 {
0360     struct clock_event_device *dev, *newdev = NULL;
0361 
0362     list_for_each_entry(dev, &clockevent_devices, list) {
0363         if (dev == ced || !clockevent_state_detached(dev))
0364             continue;
0365 
0366         if (!tick_check_replacement(newdev, dev))
0367             continue;
0368 
0369         if (!try_module_get(dev->owner))
0370             continue;
0371 
0372         if (newdev)
0373             module_put(newdev->owner);
0374         newdev = dev;
0375     }
0376     if (newdev) {
0377         tick_install_replacement(newdev);
0378         list_del_init(&ced->list);
0379     }
0380     return newdev ? 0 : -EBUSY;
0381 }
0382 
0383 /*
0384  * Called with clockevents_mutex and clockevents_lock held
0385  */
0386 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
0387 {
0388     /* Fast track. Device is unused */
0389     if (clockevent_state_detached(ced)) {
0390         list_del_init(&ced->list);
0391         return 0;
0392     }
0393 
0394     return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
0395 }
0396 
0397 /*
0398  * SMP function call to unbind a device
0399  */
0400 static void __clockevents_unbind(void *arg)
0401 {
0402     struct ce_unbind *cu = arg;
0403     int res;
0404 
0405     raw_spin_lock(&clockevents_lock);
0406     res = __clockevents_try_unbind(cu->ce, smp_processor_id());
0407     if (res == -EAGAIN)
0408         res = clockevents_replace(cu->ce);
0409     cu->res = res;
0410     raw_spin_unlock(&clockevents_lock);
0411 }
0412 
0413 /*
0414  * Issues smp function call to unbind a per cpu device. Called with
0415  * clockevents_mutex held.
0416  */
0417 static int clockevents_unbind(struct clock_event_device *ced, int cpu)
0418 {
0419     struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
0420 
0421     smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
0422     return cu.res;
0423 }
0424 
0425 /*
0426  * Unbind a clockevents device.
0427  */
0428 int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
0429 {
0430     int ret;
0431 
0432     mutex_lock(&clockevents_mutex);
0433     ret = clockevents_unbind(ced, cpu);
0434     mutex_unlock(&clockevents_mutex);
0435     return ret;
0436 }
0437 EXPORT_SYMBOL_GPL(clockevents_unbind_device);
0438 
0439 /**
0440  * clockevents_register_device - register a clock event device
0441  * @dev:    device to register
0442  */
0443 void clockevents_register_device(struct clock_event_device *dev)
0444 {
0445     unsigned long flags;
0446 
0447     /* Initialize state to DETACHED */
0448     clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
0449 
0450     if (!dev->cpumask) {
0451         WARN_ON(num_possible_cpus() > 1);
0452         dev->cpumask = cpumask_of(smp_processor_id());
0453     }
0454 
0455     if (dev->cpumask == cpu_all_mask) {
0456         WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
0457              dev->name);
0458         dev->cpumask = cpu_possible_mask;
0459     }
0460 
0461     raw_spin_lock_irqsave(&clockevents_lock, flags);
0462 
0463     list_add(&dev->list, &clockevent_devices);
0464     tick_check_new_device(dev);
0465     clockevents_notify_released();
0466 
0467     raw_spin_unlock_irqrestore(&clockevents_lock, flags);
0468 }
0469 EXPORT_SYMBOL_GPL(clockevents_register_device);
0470 
0471 static void clockevents_config(struct clock_event_device *dev, u32 freq)
0472 {
0473     u64 sec;
0474 
0475     if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
0476         return;
0477 
0478     /*
0479      * Calculate the maximum number of seconds we can sleep. Limit
0480      * to 10 minutes for hardware which can program more than
0481      * 32bit ticks so we still get reasonable conversion values.
0482      */
0483     sec = dev->max_delta_ticks;
0484     do_div(sec, freq);
0485     if (!sec)
0486         sec = 1;
0487     else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
0488         sec = 600;
0489 
0490     clockevents_calc_mult_shift(dev, freq, sec);
0491     dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
0492     dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
0493 }
0494 
0495 /**
0496  * clockevents_config_and_register - Configure and register a clock event device
0497  * @dev:    device to register
0498  * @freq:   The clock frequency
0499  * @min_delta:  The minimum clock ticks to program in oneshot mode
0500  * @max_delta:  The maximum clock ticks to program in oneshot mode
0501  *
0502  * min/max_delta can be 0 for devices which do not support oneshot mode.
0503  */
0504 void clockevents_config_and_register(struct clock_event_device *dev,
0505                      u32 freq, unsigned long min_delta,
0506                      unsigned long max_delta)
0507 {
0508     dev->min_delta_ticks = min_delta;
0509     dev->max_delta_ticks = max_delta;
0510     clockevents_config(dev, freq);
0511     clockevents_register_device(dev);
0512 }
0513 EXPORT_SYMBOL_GPL(clockevents_config_and_register);
0514 
0515 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
0516 {
0517     clockevents_config(dev, freq);
0518 
0519     if (clockevent_state_oneshot(dev))
0520         return clockevents_program_event(dev, dev->next_event, false);
0521 
0522     if (clockevent_state_periodic(dev))
0523         return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
0524 
0525     return 0;
0526 }
0527 
0528 /**
0529  * clockevents_update_freq - Update frequency and reprogram a clock event device.
0530  * @dev:    device to modify
0531  * @freq:   new device frequency
0532  *
0533  * Reconfigure and reprogram a clock event device in oneshot
0534  * mode. Must be called on the cpu for which the device delivers per
0535  * cpu timer events. If called for the broadcast device the core takes
0536  * care of serialization.
0537  *
0538  * Returns 0 on success, -ETIME when the event is in the past.
0539  */
0540 int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
0541 {
0542     unsigned long flags;
0543     int ret;
0544 
0545     local_irq_save(flags);
0546     ret = tick_broadcast_update_freq(dev, freq);
0547     if (ret == -ENODEV)
0548         ret = __clockevents_update_freq(dev, freq);
0549     local_irq_restore(flags);
0550     return ret;
0551 }
0552 
0553 /*
0554  * Noop handler when we shut down an event device
0555  */
0556 void clockevents_handle_noop(struct clock_event_device *dev)
0557 {
0558 }
0559 
0560 /**
0561  * clockevents_exchange_device - release and request clock devices
0562  * @old:    device to release (can be NULL)
0563  * @new:    device to request (can be NULL)
0564  *
0565  * Called from various tick functions with clockevents_lock held and
0566  * interrupts disabled.
0567  */
0568 void clockevents_exchange_device(struct clock_event_device *old,
0569                  struct clock_event_device *new)
0570 {
0571     /*
0572      * Caller releases a clock event device. We queue it into the
0573      * released list and do a notify add later.
0574      */
0575     if (old) {
0576         module_put(old->owner);
0577         clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
0578         list_move(&old->list, &clockevents_released);
0579     }
0580 
0581     if (new) {
0582         BUG_ON(!clockevent_state_detached(new));
0583         clockevents_shutdown(new);
0584     }
0585 }
0586 
0587 /**
0588  * clockevents_suspend - suspend clock devices
0589  */
0590 void clockevents_suspend(void)
0591 {
0592     struct clock_event_device *dev;
0593 
0594     list_for_each_entry_reverse(dev, &clockevent_devices, list)
0595         if (dev->suspend && !clockevent_state_detached(dev))
0596             dev->suspend(dev);
0597 }
0598 
0599 /**
0600  * clockevents_resume - resume clock devices
0601  */
0602 void clockevents_resume(void)
0603 {
0604     struct clock_event_device *dev;
0605 
0606     list_for_each_entry(dev, &clockevent_devices, list)
0607         if (dev->resume && !clockevent_state_detached(dev))
0608             dev->resume(dev);
0609 }
0610 
0611 #ifdef CONFIG_HOTPLUG_CPU
0612 
0613 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
0614 /**
0615  * tick_offline_cpu - Take CPU out of the broadcast mechanism
0616  * @cpu:    The outgoing CPU
0617  *
0618  * Called on the outgoing CPU after it took itself offline.
0619  */
0620 void tick_offline_cpu(unsigned int cpu)
0621 {
0622     raw_spin_lock(&clockevents_lock);
0623     tick_broadcast_offline(cpu);
0624     raw_spin_unlock(&clockevents_lock);
0625 }
0626 # endif
0627 
0628 /**
0629  * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
0630  * @cpu:    The dead CPU
0631  */
0632 void tick_cleanup_dead_cpu(int cpu)
0633 {
0634     struct clock_event_device *dev, *tmp;
0635     unsigned long flags;
0636 
0637     raw_spin_lock_irqsave(&clockevents_lock, flags);
0638 
0639     tick_shutdown(cpu);
0640     /*
0641      * Unregister the clock event devices which were
0642      * released from the users in the notify chain.
0643      */
0644     list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
0645         list_del(&dev->list);
0646     /*
0647      * Now check whether the CPU has left unused per cpu devices
0648      */
0649     list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
0650         if (cpumask_test_cpu(cpu, dev->cpumask) &&
0651             cpumask_weight(dev->cpumask) == 1 &&
0652             !tick_is_broadcast_device(dev)) {
0653             BUG_ON(!clockevent_state_detached(dev));
0654             list_del(&dev->list);
0655         }
0656     }
0657     raw_spin_unlock_irqrestore(&clockevents_lock, flags);
0658 }
0659 #endif
0660 
0661 #ifdef CONFIG_SYSFS
0662 static struct bus_type clockevents_subsys = {
0663     .name       = "clockevents",
0664     .dev_name       = "clockevent",
0665 };
0666 
0667 static DEFINE_PER_CPU(struct device, tick_percpu_dev);
0668 static struct tick_device *tick_get_tick_dev(struct device *dev);
0669 
0670 static ssize_t current_device_show(struct device *dev,
0671                    struct device_attribute *attr,
0672                    char *buf)
0673 {
0674     struct tick_device *td;
0675     ssize_t count = 0;
0676 
0677     raw_spin_lock_irq(&clockevents_lock);
0678     td = tick_get_tick_dev(dev);
0679     if (td && td->evtdev)
0680         count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
0681     raw_spin_unlock_irq(&clockevents_lock);
0682     return count;
0683 }
0684 static DEVICE_ATTR_RO(current_device);
0685 
0686 /* We don't support the abomination of removable broadcast devices */
0687 static ssize_t unbind_device_store(struct device *dev,
0688                    struct device_attribute *attr,
0689                    const char *buf, size_t count)
0690 {
0691     char name[CS_NAME_LEN];
0692     ssize_t ret = sysfs_get_uname(buf, name, count);
0693     struct clock_event_device *ce = NULL, *iter;
0694 
0695     if (ret < 0)
0696         return ret;
0697 
0698     ret = -ENODEV;
0699     mutex_lock(&clockevents_mutex);
0700     raw_spin_lock_irq(&clockevents_lock);
0701     list_for_each_entry(iter, &clockevent_devices, list) {
0702         if (!strcmp(iter->name, name)) {
0703             ret = __clockevents_try_unbind(iter, dev->id);
0704             ce = iter;
0705             break;
0706         }
0707     }
0708     raw_spin_unlock_irq(&clockevents_lock);
0709     /*
0710      * We hold clockevents_mutex, so ce can't go away
0711      */
0712     if (ret == -EAGAIN)
0713         ret = clockevents_unbind(ce, dev->id);
0714     mutex_unlock(&clockevents_mutex);
0715     return ret ? ret : count;
0716 }
0717 static DEVICE_ATTR_WO(unbind_device);
0718 
0719 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
0720 static struct device tick_bc_dev = {
0721     .init_name  = "broadcast",
0722     .id     = 0,
0723     .bus        = &clockevents_subsys,
0724 };
0725 
0726 static struct tick_device *tick_get_tick_dev(struct device *dev)
0727 {
0728     return dev == &tick_bc_dev ? tick_get_broadcast_device() :
0729         &per_cpu(tick_cpu_device, dev->id);
0730 }
0731 
0732 static __init int tick_broadcast_init_sysfs(void)
0733 {
0734     int err = device_register(&tick_bc_dev);
0735 
0736     if (!err)
0737         err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
0738     return err;
0739 }
0740 #else
0741 static struct tick_device *tick_get_tick_dev(struct device *dev)
0742 {
0743     return &per_cpu(tick_cpu_device, dev->id);
0744 }
0745 static inline int tick_broadcast_init_sysfs(void) { return 0; }
0746 #endif
0747 
0748 static int __init tick_init_sysfs(void)
0749 {
0750     int cpu;
0751 
0752     for_each_possible_cpu(cpu) {
0753         struct device *dev = &per_cpu(tick_percpu_dev, cpu);
0754         int err;
0755 
0756         dev->id = cpu;
0757         dev->bus = &clockevents_subsys;
0758         err = device_register(dev);
0759         if (!err)
0760             err = device_create_file(dev, &dev_attr_current_device);
0761         if (!err)
0762             err = device_create_file(dev, &dev_attr_unbind_device);
0763         if (err)
0764             return err;
0765     }
0766     return tick_broadcast_init_sysfs();
0767 }
0768 
0769 static int __init clockevents_init_sysfs(void)
0770 {
0771     int err = subsys_system_register(&clockevents_subsys, NULL);
0772 
0773     if (!err)
0774         err = tick_init_sysfs();
0775     return err;
0776 }
0777 device_initcall(clockevents_init_sysfs);
0778 #endif /* SYSFS */