aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/pseries/eeh_event.c
blob: a0b39640a00fca28bf0ee4100892989cbd956248 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
/*
 * eeh_event.c
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 *
 * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
 */

#include <linux/delay.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>

/** Overview:
 *  EEH error states may be detected within exception handlers;
 *  however, the recovery processing needs to occur asynchronously
 *  in a normal kernel context and not an interrupt context.
 *  This pair of routines creates an event and queues it onto a
 *  work-queue, where a worker thread can drive recovery.
 */

/* EEH event workqueue setup. */
static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
LIST_HEAD(eeh_eventlist);
static void eeh_thread_launcher(void *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);

/* Serialize reset sequences for a given pci device */
DEFINE_MUTEX(eeh_event_mutex);

/**
 * eeh_event_handler - dispatch EEH events.
 * @dummy - unused
 *
 * The detection of a frozen slot can occur inside an interrupt,
 * where it can be hard to do anything about it.  The goal of this
 * routine is to pull these detection events out of the context
 * of the interrupt handler, and re-dispatch them for processing
 * at a later time in a normal context.
 */
static int eeh_event_handler(void * dummy)
{
	unsigned long flags;
	struct eeh_event	*event;
	struct pci_dn *pdn;

	daemonize ("eehd");
	set_current_state(TASK_INTERRUPTIBLE);

	spin_lock_irqsave(&eeh_eventlist_lock, flags);
	event = NULL;

	/* Unqueue the event, get ready to process. */
	if (!list_empty(&eeh_eventlist)) {
		event = list_entry(eeh_eventlist.next, struct eeh_event, list);
		list_del(&event->list);
	}
	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);

	if (event == NULL)
		return 0;

	/* Serialize processing of EEH events */
	mutex_lock(&eeh_event_mutex);
	eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);

	printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
	       pci_name(event->dev));

	pdn = handle_eeh_events(event);

	eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
	pci_dev_put(event->dev);
	kfree(event);
	mutex_unlock(&eeh_event_mutex);

	/* If there are no new errors after an hour, clear the counter. */
	if (pdn && pdn->eeh_freeze_count>0) {
		msleep_interruptible (3600*1000);
		if (pdn->eeh_freeze_count>0)
			pdn->eeh_freeze_count--;
	}

	return 0;
}

/**
 * eeh_thread_launcher
 * @dummy - unused
 */
static void eeh_thread_launcher(void *dummy)
{
	if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
		printk(KERN_ERR "Failed to start EEH daemon\n");
}

/**
 * eeh_send_failure_event - generate a PCI error event
 * @dev pci device
 *
 * This routine can be called within an interrupt context;
 * the actual event will be delivered in a normal context
 * (from a workqueue).
 */
int eeh_send_failure_event (struct device_node *dn,
                            struct pci_dev *dev,
                            enum pci_channel_state state,
                            int time_unavail)
{
	unsigned long flags;
	struct eeh_event *event;

	event = kmalloc(sizeof(*event), GFP_ATOMIC);
	if (event == NULL) {
		printk (KERN_ERR "EEH: out of memory, event not handled\n");
		return 1;
 	}

	if (dev)
		pci_dev_get(dev);

	event->dn = dn;
	event->dev = dev;
	event->state = state;
	event->time_unavail = time_unavail;

	/* We may or may not be called in an interrupt context */
	spin_lock_irqsave(&eeh_eventlist_lock, flags);
	list_add(&event->list, &eeh_eventlist);
	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);

	schedule_work(&eeh_event_wq);

	return 0;
}

/********************** END OF FILE ******************************/