drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / scsi / libsas / sas_event.c
blobf3a17191a4fee11796f5eae1b42f19b87218245f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Serial Attached SCSI (SAS) Event processing
5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 */
9 #include <linux/export.h>
10 #include <scsi/scsi_host.h>
11 #include "sas_internal.h"
13 bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
15 if (!test_bit(SAS_HA_REGISTERED, &ha->state))
16 return false;
18 if (test_bit(SAS_HA_DRAINING, &ha->state)) {
19 /* add it to the defer list, if not already pending */
20 if (list_empty(&sw->drain_node))
21 list_add_tail(&sw->drain_node, &ha->defer_q);
22 return true;
25 return queue_work(ha->event_q, &sw->work);
28 static bool sas_queue_event(int event, struct sas_work *work,
29 struct sas_ha_struct *ha)
31 unsigned long flags;
32 bool rc;
34 spin_lock_irqsave(&ha->lock, flags);
35 rc = sas_queue_work(ha, work);
36 spin_unlock_irqrestore(&ha->lock, flags);
38 return rc;
41 void sas_queue_deferred_work(struct sas_ha_struct *ha)
43 struct sas_work *sw, *_sw;
45 spin_lock_irq(&ha->lock);
46 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
47 list_del_init(&sw->drain_node);
49 if (!sas_queue_work(ha, sw)) {
50 pm_runtime_put(ha->dev);
51 sas_free_event(to_asd_sas_event(&sw->work));
54 spin_unlock_irq(&ha->lock);
57 void __sas_drain_work(struct sas_ha_struct *ha)
59 set_bit(SAS_HA_DRAINING, &ha->state);
60 /* flush submitters */
61 spin_lock_irq(&ha->lock);
62 spin_unlock_irq(&ha->lock);
64 drain_workqueue(ha->event_q);
65 drain_workqueue(ha->disco_q);
67 clear_bit(SAS_HA_DRAINING, &ha->state);
68 sas_queue_deferred_work(ha);
71 int sas_drain_work(struct sas_ha_struct *ha)
73 int err;
75 err = mutex_lock_interruptible(&ha->drain_mutex);
76 if (err)
77 return err;
78 if (test_bit(SAS_HA_REGISTERED, &ha->state))
79 __sas_drain_work(ha);
80 mutex_unlock(&ha->drain_mutex);
82 return 0;
84 EXPORT_SYMBOL_GPL(sas_drain_work);
86 void sas_disable_revalidation(struct sas_ha_struct *ha)
88 mutex_lock(&ha->disco_mutex);
89 set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
90 mutex_unlock(&ha->disco_mutex);
93 void sas_enable_revalidation(struct sas_ha_struct *ha)
95 int i;
97 mutex_lock(&ha->disco_mutex);
98 clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
99 for (i = 0; i < ha->num_phys; i++) {
100 struct asd_sas_port *port = ha->sas_port[i];
101 const int ev = DISCE_REVALIDATE_DOMAIN;
102 struct sas_discovery *d = &port->disc;
103 struct asd_sas_phy *sas_phy;
105 if (!test_and_clear_bit(ev, &d->pending))
106 continue;
108 spin_lock(&port->phy_list_lock);
109 if (list_empty(&port->phy_list)) {
110 spin_unlock(&port->phy_list_lock);
111 continue;
114 sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
115 port_phy_el);
116 spin_unlock(&port->phy_list_lock);
117 sas_notify_port_event(sas_phy,
118 PORTE_BROADCAST_RCVD, GFP_KERNEL);
120 mutex_unlock(&ha->disco_mutex);
124 static void sas_port_event_worker(struct work_struct *work)
126 struct asd_sas_event *ev = to_asd_sas_event(work);
127 struct asd_sas_phy *phy = ev->phy;
128 struct sas_ha_struct *ha = phy->ha;
130 sas_port_event_fns[ev->event](work);
131 pm_runtime_put(ha->dev);
132 sas_free_event(ev);
135 static void sas_phy_event_worker(struct work_struct *work)
137 struct asd_sas_event *ev = to_asd_sas_event(work);
138 struct asd_sas_phy *phy = ev->phy;
139 struct sas_ha_struct *ha = phy->ha;
141 sas_phy_event_fns[ev->event](work);
142 pm_runtime_put(ha->dev);
143 sas_free_event(ev);
146 /* defer works of new phys during suspend */
147 static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
149 struct sas_ha_struct *ha = phy->ha;
150 unsigned long flags;
151 bool deferred = false;
153 spin_lock_irqsave(&ha->lock, flags);
154 if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
155 struct sas_work *sw = &ev->work;
157 list_add_tail(&sw->drain_node, &ha->defer_q);
158 deferred = true;
160 spin_unlock_irqrestore(&ha->lock, flags);
161 return deferred;
164 void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
165 gfp_t gfp_flags)
167 struct sas_ha_struct *ha = phy->ha;
168 struct asd_sas_event *ev;
170 BUG_ON(event >= PORT_NUM_EVENTS);
172 ev = sas_alloc_event(phy, gfp_flags);
173 if (!ev)
174 return;
176 /* Call pm_runtime_put() with pairs in sas_port_event_worker() */
177 pm_runtime_get_noresume(ha->dev);
179 INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
181 if (sas_defer_event(phy, ev))
182 return;
184 if (!sas_queue_event(event, &ev->work, ha)) {
185 pm_runtime_put(ha->dev);
186 sas_free_event(ev);
189 EXPORT_SYMBOL_GPL(sas_notify_port_event);
191 void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
192 gfp_t gfp_flags)
194 struct sas_ha_struct *ha = phy->ha;
195 struct asd_sas_event *ev;
197 BUG_ON(event >= PHY_NUM_EVENTS);
199 ev = sas_alloc_event(phy, gfp_flags);
200 if (!ev)
201 return;
203 /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
204 pm_runtime_get_noresume(ha->dev);
206 INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
208 if (sas_defer_event(phy, ev))
209 return;
211 if (!sas_queue_event(event, &ev->work, ha)) {
212 pm_runtime_put(ha->dev);
213 sas_free_event(ev);
216 EXPORT_SYMBOL_GPL(sas_notify_phy_event);