0001
0002
0003
0004
0005
0006
0007 #include <linux/delay.h>
0008 #include <linux/list.h>
0009 #include <linux/sched.h>
0010 #include <linux/semaphore.h>
0011 #include <linux/pci.h>
0012 #include <linux/slab.h>
0013 #include <linux/kthread.h>
0014 #include <asm/eeh_event.h>
0015 #include <asm/ppc-pci.h>
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 static DEFINE_SPINLOCK(eeh_eventlist_lock);
0026 static DECLARE_COMPLETION(eeh_eventlist_event);
0027 static LIST_HEAD(eeh_eventlist);
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 static int eeh_event_handler(void * dummy)
0040 {
0041 unsigned long flags;
0042 struct eeh_event *event;
0043
0044 while (!kthread_should_stop()) {
0045 if (wait_for_completion_interruptible(&eeh_eventlist_event))
0046 break;
0047
0048
0049 spin_lock_irqsave(&eeh_eventlist_lock, flags);
0050 event = NULL;
0051 if (!list_empty(&eeh_eventlist)) {
0052 event = list_entry(eeh_eventlist.next,
0053 struct eeh_event, list);
0054 list_del(&event->list);
0055 }
0056 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
0057 if (!event)
0058 continue;
0059
0060
0061 if (event->pe)
0062 eeh_handle_normal_event(event->pe);
0063 else
0064 eeh_handle_special_event();
0065
0066 kfree(event);
0067 }
0068
0069 return 0;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078 int eeh_event_init(void)
0079 {
0080 struct task_struct *t;
0081 int ret = 0;
0082
0083 t = kthread_run(eeh_event_handler, NULL, "eehd");
0084 if (IS_ERR(t)) {
0085 ret = PTR_ERR(t);
0086 pr_err("%s: Failed to start EEH daemon (%d)\n",
0087 __func__, ret);
0088 return ret;
0089 }
0090
0091 return 0;
0092 }
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 int __eeh_send_failure_event(struct eeh_pe *pe)
0103 {
0104 unsigned long flags;
0105 struct eeh_event *event;
0106
0107 event = kzalloc(sizeof(*event), GFP_ATOMIC);
0108 if (!event) {
0109 pr_err("EEH: out of memory, event not handled\n");
0110 return -ENOMEM;
0111 }
0112 event->pe = pe;
0113
0114
0115
0116
0117
0118
0119 if (pe) {
0120 #ifdef CONFIG_STACKTRACE
0121
0122
0123
0124
0125 pe->trace_entries = stack_trace_save(pe->stack_trace,
0126 ARRAY_SIZE(pe->stack_trace), 0);
0127 #endif
0128
0129 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
0130 }
0131
0132
0133 spin_lock_irqsave(&eeh_eventlist_lock, flags);
0134 list_add(&event->list, &eeh_eventlist);
0135 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
0136
0137
0138 complete(&eeh_eventlist_event);
0139
0140 return 0;
0141 }
0142
0143 int eeh_send_failure_event(struct eeh_pe *pe)
0144 {
0145
0146
0147
0148
0149 if (eeh_debugfs_no_recover) {
0150 pr_err("EEH: Event dropped due to no_recover setting\n");
0151 return 0;
0152 }
0153
0154 return __eeh_send_failure_event(pe);
0155 }
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 void eeh_remove_event(struct eeh_pe *pe, bool force)
0168 {
0169 unsigned long flags;
0170 struct eeh_event *event, *tmp;
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181 spin_lock_irqsave(&eeh_eventlist_lock, flags);
0182 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
0183 if (!force && event->pe &&
0184 (event->pe->state & EEH_PE_ISOLATED))
0185 continue;
0186
0187 if (!pe) {
0188 list_del(&event->list);
0189 kfree(event);
0190 } else if (pe->type & EEH_PE_PHB) {
0191 if (event->pe && event->pe->phb == pe->phb) {
0192 list_del(&event->list);
0193 kfree(event);
0194 }
0195 } else if (event->pe == pe) {
0196 list_del(&event->list);
0197 kfree(event);
0198 }
0199 }
0200 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
0201 }