1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Surface Book (gen. 2 and later) detachment system (DTX) driver.
4 *
5 * Provides a user-space interface to properly handle clipboard/tablet
6 * (containing screen and processor) detachment from the base of the device
7 * (containing the keyboard and optionally a discrete GPU). Allows to
8 * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
9 * use), or request detachment via user-space.
10 *
11 * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
12 */
13
14#include <linux/fs.h>
15#include <linux/input.h>
16#include <linux/ioctl.h>
17#include <linux/kernel.h>
18#include <linux/kfifo.h>
19#include <linux/kref.h>
20#include <linux/miscdevice.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/platform_device.h>
24#include <linux/poll.h>
25#include <linux/rwsem.h>
26#include <linux/slab.h>
27#include <linux/workqueue.h>
28
29#include <linux/surface_aggregator/controller.h>
30#include <linux/surface_aggregator/device.h>
31#include <linux/surface_aggregator/dtx.h>
32
33
34/* -- SSAM interface. ------------------------------------------------------- */
35
36enum sam_event_cid_bas {
37	SAM_EVENT_CID_DTX_CONNECTION			= 0x0c,
38	SAM_EVENT_CID_DTX_REQUEST			= 0x0e,
39	SAM_EVENT_CID_DTX_CANCEL			= 0x0f,
40	SAM_EVENT_CID_DTX_LATCH_STATUS			= 0x11,
41};
42
43enum ssam_bas_base_state {
44	SSAM_BAS_BASE_STATE_DETACH_SUCCESS		= 0x00,
45	SSAM_BAS_BASE_STATE_ATTACHED			= 0x01,
46	SSAM_BAS_BASE_STATE_NOT_FEASIBLE		= 0x02,
47};
48
49enum ssam_bas_latch_status {
50	SSAM_BAS_LATCH_STATUS_CLOSED			= 0x00,
51	SSAM_BAS_LATCH_STATUS_OPENED			= 0x01,
52	SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN		= 0x02,
53	SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN	= 0x03,
54	SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE		= 0x04,
55};
56
57enum ssam_bas_cancel_reason {
58	SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE		= 0x00,  /* Low battery. */
59	SSAM_BAS_CANCEL_REASON_TIMEOUT			= 0x02,
60	SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN		= 0x03,
61	SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN	= 0x04,
62	SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE		= 0x05,
63};
64
65struct ssam_bas_base_info {
66	u8 state;
67	u8 base_id;
68} __packed;
69
70static_assert(sizeof(struct ssam_bas_base_info) == 2);
71
72SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
73	.target_category = SSAM_SSH_TC_BAS,
74	.target_id       = SSAM_SSH_TID_SAM,
75	.command_id      = 0x06,
76	.instance_id     = 0x00,
77});
78
79SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
80	.target_category = SSAM_SSH_TC_BAS,
81	.target_id       = SSAM_SSH_TID_SAM,
82	.command_id      = 0x07,
83	.instance_id     = 0x00,
84});
85
86SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
87	.target_category = SSAM_SSH_TC_BAS,
88	.target_id       = SSAM_SSH_TID_SAM,
89	.command_id      = 0x08,
90	.instance_id     = 0x00,
91});
92
93SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
94	.target_category = SSAM_SSH_TC_BAS,
95	.target_id       = SSAM_SSH_TID_SAM,
96	.command_id      = 0x09,
97	.instance_id     = 0x00,
98});
99
100SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
101	.target_category = SSAM_SSH_TC_BAS,
102	.target_id       = SSAM_SSH_TID_SAM,
103	.command_id      = 0x0a,
104	.instance_id     = 0x00,
105});
106
107SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
108	.target_category = SSAM_SSH_TC_BAS,
109	.target_id       = SSAM_SSH_TID_SAM,
110	.command_id      = 0x0b,
111	.instance_id     = 0x00,
112});
113
114SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
115	.target_category = SSAM_SSH_TC_BAS,
116	.target_id       = SSAM_SSH_TID_SAM,
117	.command_id      = 0x0c,
118	.instance_id     = 0x00,
119});
120
121SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
122	.target_category = SSAM_SSH_TC_BAS,
123	.target_id       = SSAM_SSH_TID_SAM,
124	.command_id      = 0x0d,
125	.instance_id     = 0x00,
126});
127
128SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
129	.target_category = SSAM_SSH_TC_BAS,
130	.target_id       = SSAM_SSH_TID_SAM,
131	.command_id      = 0x11,
132	.instance_id     = 0x00,
133});
134
135
136/* -- Main structures. ------------------------------------------------------ */
137
138enum sdtx_device_state {
139	SDTX_DEVICE_SHUTDOWN_BIT    = BIT(0),
140	SDTX_DEVICE_DIRTY_BASE_BIT  = BIT(1),
141	SDTX_DEVICE_DIRTY_MODE_BIT  = BIT(2),
142	SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
143};
144
145struct sdtx_device {
146	struct kref kref;
147	struct rw_semaphore lock;         /* Guards device and controller reference. */
148
149	struct device *dev;
150	struct ssam_controller *ctrl;
151	unsigned long flags;
152
153	struct miscdevice mdev;
154	wait_queue_head_t waitq;
155	struct mutex write_lock;          /* Guards order of events/notifications. */
156	struct rw_semaphore client_lock;  /* Guards client list.                   */
157	struct list_head client_list;
158
159	struct delayed_work state_work;
160	struct {
161		struct ssam_bas_base_info base;
162		u8 device_mode;
163		u8 latch_status;
164	} state;
165
166	struct delayed_work mode_work;
167	struct input_dev *mode_switch;
168
169	struct ssam_event_notifier notif;
170};
171
172enum sdtx_client_state {
173	SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
174};
175
176struct sdtx_client {
177	struct sdtx_device *ddev;
178	struct list_head node;
179	unsigned long flags;
180
181	struct fasync_struct *fasync;
182
183	struct mutex read_lock;           /* Guards FIFO buffer read access. */
184	DECLARE_KFIFO(buffer, u8, 512);
185};
186
187static void __sdtx_device_release(struct kref *kref)
188{
189	struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
190
191	mutex_destroy(&ddev->write_lock);
192	kfree(ddev);
193}
194
195static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
196{
197	if (ddev)
198		kref_get(&ddev->kref);
199
200	return ddev;
201}
202
203static void sdtx_device_put(struct sdtx_device *ddev)
204{
205	if (ddev)
206		kref_put(&ddev->kref, __sdtx_device_release);
207}
208
209
210/* -- Firmware value translations. ------------------------------------------ */
211
212static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
213{
214	switch (state) {
215	case SSAM_BAS_BASE_STATE_ATTACHED:
216		return SDTX_BASE_ATTACHED;
217
218	case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
219		return SDTX_BASE_DETACHED;
220
221	case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
222		return SDTX_DETACH_NOT_FEASIBLE;
223
224	default:
225		dev_err(ddev->dev, "unknown base state: %#04x\n", state);
226		return SDTX_UNKNOWN(state);
227	}
228}
229
230static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
231{
232	switch (status) {
233	case SSAM_BAS_LATCH_STATUS_CLOSED:
234		return SDTX_LATCH_CLOSED;
235
236	case SSAM_BAS_LATCH_STATUS_OPENED:
237		return SDTX_LATCH_OPENED;
238
239	case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
240		return SDTX_ERR_FAILED_TO_OPEN;
241
242	case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
243		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
244
245	case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
246		return SDTX_ERR_FAILED_TO_CLOSE;
247
248	default:
249		dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
250		return SDTX_UNKNOWN(status);
251	}
252}
253
254static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
255{
256	switch (reason) {
257	case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
258		return SDTX_DETACH_NOT_FEASIBLE;
259
260	case SSAM_BAS_CANCEL_REASON_TIMEOUT:
261		return SDTX_DETACH_TIMEDOUT;
262
263	case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
264		return SDTX_ERR_FAILED_TO_OPEN;
265
266	case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
267		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
268
269	case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
270		return SDTX_ERR_FAILED_TO_CLOSE;
271
272	default:
273		dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
274		return SDTX_UNKNOWN(reason);
275	}
276}
277
278
279/* -- IOCTLs. --------------------------------------------------------------- */
280
281static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
282				    struct sdtx_base_info __user *buf)
283{
284	struct ssam_bas_base_info raw;
285	struct sdtx_base_info info;
286	int status;
287
288	lockdep_assert_held_read(&ddev->lock);
289
290	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
291	if (status < 0)
292		return status;
293
294	info.state = sdtx_translate_base_state(ddev, raw.state);
295	info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
296
297	if (copy_to_user(buf, &info, sizeof(info)))
298		return -EFAULT;
299
300	return 0;
301}
302
303static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
304{
305	u8 mode;
306	int status;
307
308	lockdep_assert_held_read(&ddev->lock);
309
310	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
311	if (status < 0)
312		return status;
313
314	return put_user(mode, buf);
315}
316
317static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
318{
319	u8 latch;
320	int status;
321
322	lockdep_assert_held_read(&ddev->lock);
323
324	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
325	if (status < 0)
326		return status;
327
328	return put_user(sdtx_translate_latch_status(ddev, latch), buf);
329}
330
331static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
332{
333	struct sdtx_device *ddev = client->ddev;
334
335	lockdep_assert_held_read(&ddev->lock);
336
337	switch (cmd) {
338	case SDTX_IOCTL_EVENTS_ENABLE:
339		set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
340		return 0;
341
342	case SDTX_IOCTL_EVENTS_DISABLE:
343		clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
344		return 0;
345
346	case SDTX_IOCTL_LATCH_LOCK:
347		return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
348
349	case SDTX_IOCTL_LATCH_UNLOCK:
350		return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
351
352	case SDTX_IOCTL_LATCH_REQUEST:
353		return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
354
355	case SDTX_IOCTL_LATCH_CONFIRM:
356		return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
357
358	case SDTX_IOCTL_LATCH_HEARTBEAT:
359		return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
360
361	case SDTX_IOCTL_LATCH_CANCEL:
362		return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
363
364	case SDTX_IOCTL_GET_BASE_INFO:
365		return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
366
367	case SDTX_IOCTL_GET_DEVICE_MODE:
368		return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
369
370	case SDTX_IOCTL_GET_LATCH_STATUS:
371		return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
372
373	default:
374		return -EINVAL;
375	}
376}
377
378static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
379{
380	struct sdtx_client *client = file->private_data;
381	long status;
382
383	if (down_read_killable(&client->ddev->lock))
384		return -ERESTARTSYS;
385
386	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
387		up_read(&client->ddev->lock);
388		return -ENODEV;
389	}
390
391	status = __surface_dtx_ioctl(client, cmd, arg);
392
393	up_read(&client->ddev->lock);
394	return status;
395}
396
397
398/* -- File operations. ------------------------------------------------------ */
399
400static int surface_dtx_open(struct inode *inode, struct file *file)
401{
402	struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
403	struct sdtx_client *client;
404
405	/* Initialize client. */
406	client = kzalloc(sizeof(*client), GFP_KERNEL);
407	if (!client)
408		return -ENOMEM;
409
410	client->ddev = sdtx_device_get(ddev);
411
412	INIT_LIST_HEAD(&client->node);
413
414	mutex_init(&client->read_lock);
415	INIT_KFIFO(client->buffer);
416
417	file->private_data = client;
418
419	/* Attach client. */
420	down_write(&ddev->client_lock);
421
422	/*
423	 * Do not add a new client if the device has been shut down. Note that
424	 * it's enough to hold the client_lock here as, during shutdown, we
425	 * only acquire that lock and remove clients after marking the device
426	 * as shut down.
427	 */
428	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
429		up_write(&ddev->client_lock);
430		mutex_destroy(&client->read_lock);
431		sdtx_device_put(client->ddev);
432		kfree(client);
433		return -ENODEV;
434	}
435
436	list_add_tail(&client->node, &ddev->client_list);
437	up_write(&ddev->client_lock);
438
439	stream_open(inode, file);
440	return 0;
441}
442
443static int surface_dtx_release(struct inode *inode, struct file *file)
444{
445	struct sdtx_client *client = file->private_data;
446
447	/* Detach client. */
448	down_write(&client->ddev->client_lock);
449	list_del(&client->node);
450	up_write(&client->ddev->client_lock);
451
452	/* Free client. */
453	sdtx_device_put(client->ddev);
454	mutex_destroy(&client->read_lock);
455	kfree(client);
456
457	return 0;
458}
459
460static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
461{
462	struct sdtx_client *client = file->private_data;
463	struct sdtx_device *ddev = client->ddev;
464	unsigned int copied;
465	int status = 0;
466
467	if (down_read_killable(&ddev->lock))
468		return -ERESTARTSYS;
469
470	/* Make sure we're not shut down. */
471	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
472		up_read(&ddev->lock);
473		return -ENODEV;
474	}
475
476	do {
477		/* Check availability, wait if necessary. */
478		if (kfifo_is_empty(&client->buffer)) {
479			up_read(&ddev->lock);
480
481			if (file->f_flags & O_NONBLOCK)
482				return -EAGAIN;
483
484			status = wait_event_interruptible(ddev->waitq,
485							  !kfifo_is_empty(&client->buffer) ||
486							  test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
487								   &ddev->flags));
488			if (status < 0)
489				return status;
490
491			if (down_read_killable(&ddev->lock))
492				return -ERESTARTSYS;
493
494			/* Need to check that we're not shut down again. */
495			if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
496				up_read(&ddev->lock);
497				return -ENODEV;
498			}
499		}
500
501		/* Try to read from FIFO. */
502		if (mutex_lock_interruptible(&client->read_lock)) {
503			up_read(&ddev->lock);
504			return -ERESTARTSYS;
505		}
506
507		status = kfifo_to_user(&client->buffer, buf, count, &copied);
508		mutex_unlock(&client->read_lock);
509
510		if (status < 0) {
511			up_read(&ddev->lock);
512			return status;
513		}
514
515		/* We might not have gotten anything, check this here. */
516		if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
517			up_read(&ddev->lock);
518			return -EAGAIN;
519		}
520	} while (copied == 0);
521
522	up_read(&ddev->lock);
523	return copied;
524}
525
526static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
527{
528	struct sdtx_client *client = file->private_data;
529	__poll_t events = 0;
530
531	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
532		return EPOLLHUP | EPOLLERR;
533
534	poll_wait(file, &client->ddev->waitq, pt);
535
536	if (!kfifo_is_empty(&client->buffer))
537		events |= EPOLLIN | EPOLLRDNORM;
538
539	return events;
540}
541
542static int surface_dtx_fasync(int fd, struct file *file, int on)
543{
544	struct sdtx_client *client = file->private_data;
545
546	return fasync_helper(fd, file, on, &client->fasync);
547}
548
549static const struct file_operations surface_dtx_fops = {
550	.owner          = THIS_MODULE,
551	.open           = surface_dtx_open,
552	.release        = surface_dtx_release,
553	.read           = surface_dtx_read,
554	.poll           = surface_dtx_poll,
555	.fasync         = surface_dtx_fasync,
556	.unlocked_ioctl = surface_dtx_ioctl,
557	.compat_ioctl   = surface_dtx_ioctl,
558	.llseek         = no_llseek,
559};
560
561
562/* -- Event handling/forwarding. -------------------------------------------- */
563
564/*
565 * The device operation mode is not immediately updated on the EC when the
566 * base has been connected, i.e. querying the device mode inside the
567 * connection event callback yields an outdated value. Thus, we can only
568 * determine the new tablet-mode switch and device mode values after some
569 * time.
570 *
571 * These delays have been chosen by experimenting. We first delay on connect
572 * events, then check and validate the device mode against the base state and
573 * if invalid delay again by the "recheck" delay.
574 */
575#define SDTX_DEVICE_MODE_DELAY_CONNECT	msecs_to_jiffies(100)
576#define SDTX_DEVICE_MODE_DELAY_RECHECK	msecs_to_jiffies(100)
577
578struct sdtx_status_event {
579	struct sdtx_event e;
580	__u16 v;
581} __packed;
582
583struct sdtx_base_info_event {
584	struct sdtx_event e;
585	struct sdtx_base_info v;
586} __packed;
587
588union sdtx_generic_event {
589	struct sdtx_event common;
590	struct sdtx_status_event status;
591	struct sdtx_base_info_event base;
592};
593
594static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
595
596/* Must be executed with ddev->write_lock held. */
597static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
598{
599	const size_t len = sizeof(struct sdtx_event) + evt->length;
600	struct sdtx_client *client;
601
602	lockdep_assert_held(&ddev->write_lock);
603
604	down_read(&ddev->client_lock);
605	list_for_each_entry(client, &ddev->client_list, node) {
606		if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
607			continue;
608
609		if (likely(kfifo_avail(&client->buffer) >= len))
610			kfifo_in(&client->buffer, (const u8 *)evt, len);
611		else
612			dev_warn(ddev->dev, "event buffer overrun\n");
613
614		kill_fasync(&client->fasync, SIGIO, POLL_IN);
615	}
616	up_read(&ddev->client_lock);
617
618	wake_up_interruptible(&ddev->waitq);
619}
620
621static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
622{
623	struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
624	union sdtx_generic_event event;
625	size_t len;
626
627	/* Validate event payload length. */
628	switch (in->command_id) {
629	case SAM_EVENT_CID_DTX_CONNECTION:
630		len = 2 * sizeof(u8);
631		break;
632
633	case SAM_EVENT_CID_DTX_REQUEST:
634		len = 0;
635		break;
636
637	case SAM_EVENT_CID_DTX_CANCEL:
638		len = sizeof(u8);
639		break;
640
641	case SAM_EVENT_CID_DTX_LATCH_STATUS:
642		len = sizeof(u8);
643		break;
644
645	default:
646		return 0;
647	}
648
649	if (in->length != len) {
650		dev_err(ddev->dev,
651			"unexpected payload size for event %#04x: got %u, expected %zu\n",
652			in->command_id, in->length, len);
653		return 0;
654	}
655
656	mutex_lock(&ddev->write_lock);
657
658	/* Translate event. */
659	switch (in->command_id) {
660	case SAM_EVENT_CID_DTX_CONNECTION:
661		clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
662
663		/* If state has not changed: do not send new event. */
664		if (ddev->state.base.state == in->data[0] &&
665		    ddev->state.base.base_id == in->data[1])
666			goto out;
667
668		ddev->state.base.state = in->data[0];
669		ddev->state.base.base_id = in->data[1];
670
671		event.base.e.length = sizeof(struct sdtx_base_info);
672		event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
673		event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
674		event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
675		break;
676
677	case SAM_EVENT_CID_DTX_REQUEST:
678		event.common.code = SDTX_EVENT_REQUEST;
679		event.common.length = 0;
680		break;
681
682	case SAM_EVENT_CID_DTX_CANCEL:
683		event.status.e.length = sizeof(u16);
684		event.status.e.code = SDTX_EVENT_CANCEL;
685		event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
686		break;
687
688	case SAM_EVENT_CID_DTX_LATCH_STATUS:
689		clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
690
691		/* If state has not changed: do not send new event. */
692		if (ddev->state.latch_status == in->data[0])
693			goto out;
694
695		ddev->state.latch_status = in->data[0];
696
697		event.status.e.length = sizeof(u16);
698		event.status.e.code = SDTX_EVENT_LATCH_STATUS;
699		event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
700		break;
701	}
702
703	sdtx_push_event(ddev, &event.common);
704
705	/* Update device mode on base connection change. */
706	if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
707		unsigned long delay;
708
709		delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
710		sdtx_update_device_mode(ddev, delay);
711	}
712
713out:
714	mutex_unlock(&ddev->write_lock);
715	return SSAM_NOTIF_HANDLED;
716}
717
718
719/* -- State update functions. ----------------------------------------------- */
720
721static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
722{
723	return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
724		(mode == SDTX_DEVICE_MODE_TABLET)) ||
725	       ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
726		(mode != SDTX_DEVICE_MODE_TABLET));
727}
728
729static void sdtx_device_mode_workfn(struct work_struct *work)
730{
731	struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
732	struct sdtx_status_event event;
733	struct ssam_bas_base_info base;
734	int status, tablet;
735	u8 mode;
736
737	/* Get operation mode. */
738	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
739	if (status) {
740		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
741		return;
742	}
743
744	/* Get base info. */
745	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
746	if (status) {
747		dev_err(ddev->dev, "failed to get base info: %d\n", status);
748		return;
749	}
750
751	/*
752	 * In some cases (specifically when attaching the base), the device
753	 * mode isn't updated right away. Thus we check if the device mode
754	 * makes sense for the given base state and try again later if it
755	 * doesn't.
756	 */
757	if (sdtx_device_mode_invalid(mode, base.state)) {
758		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
759		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
760		return;
761	}
762
763	mutex_lock(&ddev->write_lock);
764	clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
765
766	/* Avoid sending duplicate device-mode events. */
767	if (ddev->state.device_mode == mode) {
768		mutex_unlock(&ddev->write_lock);
769		return;
770	}
771
772	ddev->state.device_mode = mode;
773
774	event.e.length = sizeof(u16);
775	event.e.code = SDTX_EVENT_DEVICE_MODE;
776	event.v = mode;
777
778	sdtx_push_event(ddev, &event.e);
779
780	/* Send SW_TABLET_MODE event. */
781	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
782	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
783	input_sync(ddev->mode_switch);
784
785	mutex_unlock(&ddev->write_lock);
786}
787
788static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
789{
790	schedule_delayed_work(&ddev->mode_work, delay);
791}
792
793/* Must be executed with ddev->write_lock held. */
794static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
795					    struct ssam_bas_base_info info)
796{
797	struct sdtx_base_info_event event;
798
799	lockdep_assert_held(&ddev->write_lock);
800
801	/* Prevent duplicate events. */
802	if (ddev->state.base.state == info.state &&
803	    ddev->state.base.base_id == info.base_id)
804		return;
805
806	ddev->state.base = info;
807
808	event.e.length = sizeof(struct sdtx_base_info);
809	event.e.code = SDTX_EVENT_BASE_CONNECTION;
810	event.v.state = sdtx_translate_base_state(ddev, info.state);
811	event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
812
813	sdtx_push_event(ddev, &event.e);
814}
815
816/* Must be executed with ddev->write_lock held. */
817static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
818{
819	struct sdtx_status_event event;
820	int tablet;
821
822	/*
823	 * Note: This function must be called after updating the base state
824	 * via __sdtx_device_state_update_base(), as we rely on the updated
825	 * base state value in the validity check below.
826	 */
827
828	lockdep_assert_held(&ddev->write_lock);
829
830	if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
831		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
832		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
833		return;
834	}
835
836	/* Prevent duplicate events. */
837	if (ddev->state.device_mode == mode)
838		return;
839
840	ddev->state.device_mode = mode;
841
842	/* Send event. */
843	event.e.length = sizeof(u16);
844	event.e.code = SDTX_EVENT_DEVICE_MODE;
845	event.v = mode;
846
847	sdtx_push_event(ddev, &event.e);
848
849	/* Send SW_TABLET_MODE event. */
850	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
851	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
852	input_sync(ddev->mode_switch);
853}
854
855/* Must be executed with ddev->write_lock held. */
856static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
857{
858	struct sdtx_status_event event;
859
860	lockdep_assert_held(&ddev->write_lock);
861
862	/* Prevent duplicate events. */
863	if (ddev->state.latch_status == status)
864		return;
865
866	ddev->state.latch_status = status;
867
868	event.e.length = sizeof(struct sdtx_base_info);
869	event.e.code = SDTX_EVENT_BASE_CONNECTION;
870	event.v = sdtx_translate_latch_status(ddev, status);
871
872	sdtx_push_event(ddev, &event.e);
873}
874
875static void sdtx_device_state_workfn(struct work_struct *work)
876{
877	struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
878	struct ssam_bas_base_info base;
879	u8 mode, latch;
880	int status;
881
882	/* Mark everything as dirty. */
883	set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
884	set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
885	set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
886
887	/*
888	 * Ensure that the state gets marked as dirty before continuing to
889	 * query it. Necessary to ensure that clear_bit() calls in
890	 * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
891	 * bits if an event is received while updating the state here.
892	 */
893	smp_mb__after_atomic();
894
895	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
896	if (status) {
897		dev_err(ddev->dev, "failed to get base state: %d\n", status);
898		return;
899	}
900
901	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
902	if (status) {
903		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
904		return;
905	}
906
907	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
908	if (status) {
909		dev_err(ddev->dev, "failed to get latch status: %d\n", status);
910		return;
911	}
912
913	mutex_lock(&ddev->write_lock);
914
915	/*
916	 * If the respective dirty-bit has been cleared, an event has been
917	 * received, updating this state. The queried state may thus be out of
918	 * date. At this point, we can safely assume that the state provided
919	 * by the event is either up to date, or we're about to receive
920	 * another event updating it.
921	 */
922
923	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
924		__sdtx_device_state_update_base(ddev, base);
925
926	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
927		__sdtx_device_state_update_mode(ddev, mode);
928
929	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
930		__sdtx_device_state_update_latch(ddev, latch);
931
932	mutex_unlock(&ddev->write_lock);
933}
934
935static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
936{
937	schedule_delayed_work(&ddev->state_work, delay);
938}
939
940
941/* -- Common device initialization. ----------------------------------------- */
942
943static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
944			    struct ssam_controller *ctrl)
945{
946	int status, tablet_mode;
947
948	/* Basic initialization. */
949	kref_init(&ddev->kref);
950	init_rwsem(&ddev->lock);
951	ddev->dev = dev;
952	ddev->ctrl = ctrl;
953
954	ddev->mdev.minor = MISC_DYNAMIC_MINOR;
955	ddev->mdev.name = "surface_dtx";
956	ddev->mdev.nodename = "surface/dtx";
957	ddev->mdev.fops = &surface_dtx_fops;
958
959	ddev->notif.base.priority = 1;
960	ddev->notif.base.fn = sdtx_notifier;
961	ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
962	ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
963	ddev->notif.event.id.instance = 0;
964	ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
965	ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
966
967	init_waitqueue_head(&ddev->waitq);
968	mutex_init(&ddev->write_lock);
969	init_rwsem(&ddev->client_lock);
970	INIT_LIST_HEAD(&ddev->client_list);
971
972	INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
973	INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
974
975	/*
976	 * Get current device state. We want to guarantee that events are only
977	 * sent when state actually changes. Thus we cannot use special
978	 * "uninitialized" values, as that would cause problems when manually
979	 * querying the state in surface_dtx_pm_complete(). I.e. we would not
980	 * be able to detect state changes there if no change event has been
981	 * received between driver initialization and first device suspension.
982	 *
983	 * Note that we also need to do this before registering the event
984	 * notifier, as that may access the state values.
985	 */
986	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
987	if (status)
988		return status;
989
990	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
991	if (status)
992		return status;
993
994	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
995	if (status)
996		return status;
997
998	/* Set up tablet mode switch. */
999	ddev->mode_switch = input_allocate_device();
1000	if (!ddev->mode_switch)
1001		return -ENOMEM;
1002
1003	ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
1004	ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
1005	ddev->mode_switch->id.bustype = BUS_HOST;
1006	ddev->mode_switch->dev.parent = ddev->dev;
1007
1008	tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
1009	input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
1010	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
1011
1012	status = input_register_device(ddev->mode_switch);
1013	if (status) {
1014		input_free_device(ddev->mode_switch);
1015		return status;
1016	}
1017
1018	/* Set up event notifier. */
1019	status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
1020	if (status)
1021		goto err_notif;
1022
1023	/* Register miscdevice. */
1024	status = misc_register(&ddev->mdev);
1025	if (status)
1026		goto err_mdev;
1027
1028	/*
1029	 * Update device state in case it has changed between getting the
1030	 * initial mode and registering the event notifier.
1031	 */
1032	sdtx_update_device_state(ddev, 0);
1033	return 0;
1034
1035err_notif:
1036	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1037	cancel_delayed_work_sync(&ddev->mode_work);
1038err_mdev:
1039	input_unregister_device(ddev->mode_switch);
1040	return status;
1041}
1042
1043static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
1044{
1045	struct sdtx_device *ddev;
1046	int status;
1047
1048	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
1049	if (!ddev)
1050		return ERR_PTR(-ENOMEM);
1051
1052	status = sdtx_device_init(ddev, dev, ctrl);
1053	if (status) {
1054		sdtx_device_put(ddev);
1055		return ERR_PTR(status);
1056	}
1057
1058	return ddev;
1059}
1060
1061static void sdtx_device_destroy(struct sdtx_device *ddev)
1062{
1063	struct sdtx_client *client;
1064
1065	/*
1066	 * Mark device as shut-down. Prevent new clients from being added and
1067	 * new operations from being executed.
1068	 */
1069	set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
1070
1071	/* Disable notifiers, prevent new events from arriving. */
1072	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1073
1074	/* Stop mode_work, prevent access to mode_switch. */
1075	cancel_delayed_work_sync(&ddev->mode_work);
1076
1077	/* Stop state_work. */
1078	cancel_delayed_work_sync(&ddev->state_work);
1079
1080	/* With mode_work canceled, we can unregister the mode_switch. */
1081	input_unregister_device(ddev->mode_switch);
1082
1083	/* Wake up async clients. */
1084	down_write(&ddev->client_lock);
1085	list_for_each_entry(client, &ddev->client_list, node) {
1086		kill_fasync(&client->fasync, SIGIO, POLL_HUP);
1087	}
1088	up_write(&ddev->client_lock);
1089
1090	/* Wake up blocking clients. */
1091	wake_up_interruptible(&ddev->waitq);
1092
1093	/*
1094	 * Wait for clients to finish their current operation. After this, the
1095	 * controller and device references are guaranteed to be no longer in
1096	 * use.
1097	 */
1098	down_write(&ddev->lock);
1099	ddev->dev = NULL;
1100	ddev->ctrl = NULL;
1101	up_write(&ddev->lock);
1102
1103	/* Finally remove the misc-device. */
1104	misc_deregister(&ddev->mdev);
1105
1106	/*
1107	 * We're now guaranteed that sdtx_device_open() won't be called any
1108	 * more, so we can now drop out reference.
1109	 */
1110	sdtx_device_put(ddev);
1111}
1112
1113
1114/* -- PM ops. --------------------------------------------------------------- */
1115
1116#ifdef CONFIG_PM_SLEEP
1117
1118static void surface_dtx_pm_complete(struct device *dev)
1119{
1120	struct sdtx_device *ddev = dev_get_drvdata(dev);
1121
1122	/*
1123	 * Normally, the EC will store events while suspended (i.e. in
1124	 * display-off state) and release them when resumed (i.e. transitioned
1125	 * to display-on state). During hibernation, however, the EC will be
1126	 * shut down and does not store events. Furthermore, events might be
1127	 * dropped during prolonged suspension (it is currently unknown how
1128	 * big this event buffer is and how it behaves on overruns).
1129	 *
1130	 * To prevent any problems, we update the device state here. We do
1131	 * this delayed to ensure that any events sent by the EC directly
1132	 * after resuming will be handled first. The delay below has been
1133	 * chosen (experimentally), so that there should be ample time for
1134	 * these events to be handled, before we check and, if necessary,
1135	 * update the state.
1136	 */
1137	sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
1138}
1139
1140static const struct dev_pm_ops surface_dtx_pm_ops = {
1141	.complete = surface_dtx_pm_complete,
1142};
1143
1144#else /* CONFIG_PM_SLEEP */
1145
1146static const struct dev_pm_ops surface_dtx_pm_ops = {};
1147
1148#endif /* CONFIG_PM_SLEEP */
1149
1150
1151/* -- Platform driver. ------------------------------------------------------ */
1152
1153static int surface_dtx_platform_probe(struct platform_device *pdev)
1154{
1155	struct ssam_controller *ctrl;
1156	struct sdtx_device *ddev;
1157
1158	/* Link to EC. */
1159	ctrl = ssam_client_bind(&pdev->dev);
1160	if (IS_ERR(ctrl))
1161		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
1162
1163	ddev = sdtx_device_create(&pdev->dev, ctrl);
1164	if (IS_ERR(ddev))
1165		return PTR_ERR(ddev);
1166
1167	platform_set_drvdata(pdev, ddev);
1168	return 0;
1169}
1170
1171static int surface_dtx_platform_remove(struct platform_device *pdev)
1172{
1173	sdtx_device_destroy(platform_get_drvdata(pdev));
1174	return 0;
1175}
1176
1177static const struct acpi_device_id surface_dtx_acpi_match[] = {
1178	{ "MSHW0133", 0 },
1179	{ },
1180};
1181MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
1182
1183static struct platform_driver surface_dtx_platform_driver = {
1184	.probe = surface_dtx_platform_probe,
1185	.remove = surface_dtx_platform_remove,
1186	.driver = {
1187		.name = "surface_dtx_pltf",
1188		.acpi_match_table = surface_dtx_acpi_match,
1189		.pm = &surface_dtx_pm_ops,
1190		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1191	},
1192};
1193
1194
1195/* -- SSAM device driver. --------------------------------------------------- */
1196
1197#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
1198
1199static int surface_dtx_ssam_probe(struct ssam_device *sdev)
1200{
1201	struct sdtx_device *ddev;
1202
1203	ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
1204	if (IS_ERR(ddev))
1205		return PTR_ERR(ddev);
1206
1207	ssam_device_set_drvdata(sdev, ddev);
1208	return 0;
1209}
1210
1211static void surface_dtx_ssam_remove(struct ssam_device *sdev)
1212{
1213	sdtx_device_destroy(ssam_device_get_drvdata(sdev));
1214}
1215
1216static const struct ssam_device_id surface_dtx_ssam_match[] = {
1217	{ SSAM_SDEV(BAS, SAM, 0x00, 0x00) },
1218	{ },
1219};
1220MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
1221
1222static struct ssam_device_driver surface_dtx_ssam_driver = {
1223	.probe = surface_dtx_ssam_probe,
1224	.remove = surface_dtx_ssam_remove,
1225	.match_table = surface_dtx_ssam_match,
1226	.driver = {
1227		.name = "surface_dtx",
1228		.pm = &surface_dtx_pm_ops,
1229		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1230	},
1231};
1232
1233static int ssam_dtx_driver_register(void)
1234{
1235	return ssam_device_driver_register(&surface_dtx_ssam_driver);
1236}
1237
1238static void ssam_dtx_driver_unregister(void)
1239{
1240	ssam_device_driver_unregister(&surface_dtx_ssam_driver);
1241}
1242
1243#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
1244
1245static int ssam_dtx_driver_register(void)
1246{
1247	return 0;
1248}
1249
1250static void ssam_dtx_driver_unregister(void)
1251{
1252}
1253
1254#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
1255
1256
1257/* -- Module setup. --------------------------------------------------------- */
1258
1259static int __init surface_dtx_init(void)
1260{
1261	int status;
1262
1263	status = ssam_dtx_driver_register();
1264	if (status)
1265		return status;
1266
1267	status = platform_driver_register(&surface_dtx_platform_driver);
1268	if (status)
1269		ssam_dtx_driver_unregister();
1270
1271	return status;
1272}
1273module_init(surface_dtx_init);
1274
1275static void __exit surface_dtx_exit(void)
1276{
1277	platform_driver_unregister(&surface_dtx_platform_driver);
1278	ssam_dtx_driver_unregister();
1279}
1280module_exit(surface_dtx_exit);
1281
1282MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
1283MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
1284MODULE_LICENSE("GPL");
1285