Lines Matching refs:wmi

91 struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
93 struct wmi *wmi;
95 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
96 if (!wmi)
99 wmi->drv_priv = priv;
100 wmi->stopped = false;
101 skb_queue_head_init(&wmi->wmi_event_queue);
102 spin_lock_init(&wmi->wmi_lock);
103 spin_lock_init(&wmi->event_lock);
104 mutex_init(&wmi->op_mutex);
105 mutex_init(&wmi->multi_write_mutex);
106 mutex_init(&wmi->multi_rmw_mutex);
107 init_completion(&wmi->cmd_wait);
108 INIT_LIST_HEAD(&wmi->pending_tx_events);
109 tasklet_setup(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet);
111 return wmi;
116 struct wmi *wmi = priv->wmi;
118 mutex_lock(&wmi->op_mutex);
119 wmi->stopped = true;
120 mutex_unlock(&wmi->op_mutex);
125 kfree(priv->wmi);
132 tasklet_kill(&priv->wmi->wmi_event_tasklet);
133 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
134 __skb_queue_purge(&priv->wmi->wmi_event_queue);
135 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
140 struct wmi *wmi = from_tasklet(wmi, t, wmi_event_tasklet);
141 struct ath9k_htc_priv *priv = wmi->drv_priv;
150 spin_lock_irqsave(&wmi->wmi_lock, flags);
151 skb = __skb_dequeue(&wmi->wmi_event_queue);
153 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
156 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
168 ieee80211_queue_work(wmi->drv_priv->hw,
169 &wmi->drv_priv->fatal_work);
203 static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
207 if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
208 memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
210 complete(&wmi->cmd_wait);
216 struct wmi *wmi = priv;
221 if (unlikely(wmi->stopped))
232 spin_lock_irqsave(&wmi->wmi_lock, flags);
233 __skb_queue_tail(&wmi->wmi_event_queue, skb);
234 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
235 tasklet_schedule(&wmi->wmi_event_tasklet);
240 spin_lock_irqsave(&wmi->wmi_lock, flags);
241 if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
242 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
247 ath9k_wmi_rsp_callback(wmi, skb);
248 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
260 int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
266 wmi->htc = htc;
270 connect.ep_callbacks.priv = wmi;
275 ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
279 *wmi_ctrl_epid = wmi->ctrl_epid;
284 static int ath9k_wmi_cmd_issue(struct wmi *wmi,
294 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
296 spin_lock_irqsave(&wmi->wmi_lock, flags);
299 wmi->cmd_rsp_buf = rsp_buf;
300 wmi->cmd_rsp_len = rsp_len;
302 wmi->last_seq_id = wmi->tx_seq_id;
303 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
305 return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
308 int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
313 struct ath_hw *ah = wmi->drv_priv->ah;
334 mutex_lock(&wmi->op_mutex);
336 /* check if wmi stopped flag is set */
337 if (unlikely(wmi->stopped)) {
342 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len);
346 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
350 spin_lock_irqsave(&wmi->wmi_lock, flags);
351 wmi->last_seq_id = 0;
352 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
353 mutex_unlock(&wmi->op_mutex);
357 mutex_unlock(&wmi->op_mutex);
363 mutex_unlock(&wmi->op_mutex);