1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
4 * All rights reserved.
5 */
6
7#include <linux/acpi.h>
8#include <linux/crash_dump.h>
9#include <linux/visorbus.h>
10
11#include "visorbus_private.h"
12
13/* {72120008-4AAB-11DC-8530-444553544200} */
14#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
15				   0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
16
17static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
18static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
19static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
20
21#define POLLJIFFIES_CONTROLVM_FAST 1
22#define POLLJIFFIES_CONTROLVM_SLOW 100
23
24#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
25
26#define UNISYS_VISOR_LEAF_ID 0x40000000
27
28/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
29#define UNISYS_VISOR_ID_EBX 0x73696e55
30#define UNISYS_VISOR_ID_ECX 0x70537379
31#define UNISYS_VISOR_ID_EDX 0x34367261
32
33/*
34 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
35 * to slow polling mode. As soon as we get a controlvm message, we switch back
36 * to fast polling mode.
37 */
38#define MIN_IDLE_SECONDS 10
39
40struct parser_context {
41	unsigned long allocbytes;
42	unsigned long param_bytes;
43	u8 *curr;
44	unsigned long bytes_remaining;
45	bool byte_stream;
46	struct visor_controlvm_parameters_header data;
47};
48
49/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
50#define VMCALL_CONTROLVM_ADDR 0x0501
51
52enum vmcall_result {
53	VMCALL_RESULT_SUCCESS = 0,
54	VMCALL_RESULT_INVALID_PARAM = 1,
55	VMCALL_RESULT_DATA_UNAVAILABLE = 2,
56	VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
57	VMCALL_RESULT_DEVICE_ERROR = 4,
58	VMCALL_RESULT_DEVICE_NOT_READY = 5
59};
60
61/*
62 * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
63 *					    parameters to VMCALL_CONTROLVM_ADDR
64 *					    interface.
65 * @address:	   The Guest-relative physical address of the ControlVm channel.
66 *		   This VMCall fills this in with the appropriate address.
67 *		   Contents provided by this VMCALL (OUT).
68 * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
69 *		   this in with the appropriate address. Contents provided by
70 *		   this VMCALL (OUT).
71 * @unused:	   Unused Bytes in the 64-Bit Aligned Struct.
72 */
73struct vmcall_io_controlvm_addr_params {
74	u64 address;
75	u32 channel_bytes;
76	u8 unused[4];
77} __packed;
78
79struct visorchipset_device {
80	struct acpi_device *acpi_device;
81	unsigned long poll_jiffies;
82	/* when we got our last controlvm message */
83	unsigned long most_recent_message_jiffies;
84	struct delayed_work periodic_controlvm_work;
85	struct visorchannel *controlvm_channel;
86	unsigned long controlvm_payload_bytes_buffered;
87	/*
88	 * The following variables are used to handle the scenario where we are
89	 * unable to offload the payload from a controlvm message due to memory
90	 * requirements. In this scenario, we simply stash the controlvm
91	 * message, then attempt to process it again the next time
92	 * controlvm_periodic_work() runs.
93	 */
94	struct controlvm_message controlvm_pending_msg;
95	bool controlvm_pending_msg_valid;
96	struct vmcall_io_controlvm_addr_params controlvm_params;
97};
98
99static struct visorchipset_device *chipset_dev;
100
101struct parahotplug_request {
102	struct list_head list;
103	int id;
104	unsigned long expiration;
105	struct controlvm_message msg;
106};
107
108/* prototypes for attributes */
109static ssize_t toolaction_show(struct device *dev,
110			       struct device_attribute *attr,
111			       char *buf)
112{
113	u8 tool_action = 0;
114	int err;
115
116	err = visorchannel_read(chipset_dev->controlvm_channel,
117				offsetof(struct visor_controlvm_channel,
118					 tool_action),
119				&tool_action, sizeof(u8));
120	if (err)
121		return err;
122	return sprintf(buf, "%u\n", tool_action);
123}
124
125static ssize_t toolaction_store(struct device *dev,
126				struct device_attribute *attr,
127				const char *buf, size_t count)
128{
129	u8 tool_action;
130	int err;
131
132	if (kstrtou8(buf, 10, &tool_action))
133		return -EINVAL;
134	err = visorchannel_write(chipset_dev->controlvm_channel,
135				 offsetof(struct visor_controlvm_channel,
136					  tool_action),
137				 &tool_action, sizeof(u8));
138	if (err)
139		return err;
140	return count;
141}
142static DEVICE_ATTR_RW(toolaction);
143
144static ssize_t boottotool_show(struct device *dev,
145			       struct device_attribute *attr,
146			       char *buf)
147{
148	struct efi_visor_indication efi_visor_indication;
149	int err;
150
151	err = visorchannel_read(chipset_dev->controlvm_channel,
152				offsetof(struct visor_controlvm_channel,
153					 efi_visor_ind),
154				&efi_visor_indication,
155				sizeof(struct efi_visor_indication));
156	if (err)
157		return err;
158	return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
159}
160
161static ssize_t boottotool_store(struct device *dev,
162				struct device_attribute *attr,
163				const char *buf, size_t count)
164{
165	int val, err;
166	struct efi_visor_indication efi_visor_indication;
167
168	if (kstrtoint(buf, 10, &val))
169		return -EINVAL;
170	efi_visor_indication.boot_to_tool = val;
171	err = visorchannel_write(chipset_dev->controlvm_channel,
172				 offsetof(struct visor_controlvm_channel,
173					  efi_visor_ind),
174				 &(efi_visor_indication),
175				 sizeof(struct efi_visor_indication));
176	if (err)
177		return err;
178	return count;
179}
180static DEVICE_ATTR_RW(boottotool);
181
182static ssize_t error_show(struct device *dev, struct device_attribute *attr,
183			  char *buf)
184{
185	u32 error = 0;
186	int err;
187
188	err = visorchannel_read(chipset_dev->controlvm_channel,
189				offsetof(struct visor_controlvm_channel,
190					 installation_error),
191				&error, sizeof(u32));
192	if (err)
193		return err;
194	return sprintf(buf, "%u\n", error);
195}
196
197static ssize_t error_store(struct device *dev, struct device_attribute *attr,
198			   const char *buf, size_t count)
199{
200	u32 error;
201	int err;
202
203	if (kstrtou32(buf, 10, &error))
204		return -EINVAL;
205	err = visorchannel_write(chipset_dev->controlvm_channel,
206				 offsetof(struct visor_controlvm_channel,
207					  installation_error),
208				 &error, sizeof(u32));
209	if (err)
210		return err;
211	return count;
212}
213static DEVICE_ATTR_RW(error);
214
215static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
216			   char *buf)
217{
218	u32 text_id = 0;
219	int err;
220
221	err = visorchannel_read(chipset_dev->controlvm_channel,
222				offsetof(struct visor_controlvm_channel,
223					 installation_text_id),
224				&text_id, sizeof(u32));
225	if (err)
226		return err;
227	return sprintf(buf, "%u\n", text_id);
228}
229
230static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
231			    const char *buf, size_t count)
232{
233	u32 text_id;
234	int err;
235
236	if (kstrtou32(buf, 10, &text_id))
237		return -EINVAL;
238	err = visorchannel_write(chipset_dev->controlvm_channel,
239				 offsetof(struct visor_controlvm_channel,
240					  installation_text_id),
241				 &text_id, sizeof(u32));
242	if (err)
243		return err;
244	return count;
245}
246static DEVICE_ATTR_RW(textid);
247
248static ssize_t remaining_steps_show(struct device *dev,
249				    struct device_attribute *attr, char *buf)
250{
251	u16 remaining_steps = 0;
252	int err;
253
254	err = visorchannel_read(chipset_dev->controlvm_channel,
255				offsetof(struct visor_controlvm_channel,
256					 installation_remaining_steps),
257				&remaining_steps, sizeof(u16));
258	if (err)
259		return err;
260	return sprintf(buf, "%hu\n", remaining_steps);
261}
262
263static ssize_t remaining_steps_store(struct device *dev,
264				     struct device_attribute *attr,
265				     const char *buf, size_t count)
266{
267	u16 remaining_steps;
268	int err;
269
270	if (kstrtou16(buf, 10, &remaining_steps))
271		return -EINVAL;
272	err = visorchannel_write(chipset_dev->controlvm_channel,
273				 offsetof(struct visor_controlvm_channel,
274					  installation_remaining_steps),
275				 &remaining_steps, sizeof(u16));
276	if (err)
277		return err;
278	return count;
279}
280static DEVICE_ATTR_RW(remaining_steps);
281
282static void controlvm_init_response(struct controlvm_message *msg,
283				    struct controlvm_message_header *msg_hdr,
284				    int response)
285{
286	memset(msg, 0, sizeof(struct controlvm_message));
287	memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
288	msg->hdr.payload_bytes = 0;
289	msg->hdr.payload_vm_offset = 0;
290	msg->hdr.payload_max_bytes = 0;
291	if (response < 0) {
292		msg->hdr.flags.failed = 1;
293		msg->hdr.completion_status = (u32)(-response);
294	}
295}
296
297static int controlvm_respond_chipset_init(
298				struct controlvm_message_header *msg_hdr,
299				int response,
300				enum visor_chipset_feature features)
301{
302	struct controlvm_message outmsg;
303
304	controlvm_init_response(&outmsg, msg_hdr, response);
305	outmsg.cmd.init_chipset.features = features;
306	return visorchannel_signalinsert(chipset_dev->controlvm_channel,
307					 CONTROLVM_QUEUE_REQUEST, &outmsg);
308}
309
310static int chipset_init(struct controlvm_message *inmsg)
311{
312	static int chipset_inited;
313	enum visor_chipset_feature features = 0;
314	int rc = CONTROLVM_RESP_SUCCESS;
315	int res = 0;
316
317	if (chipset_inited) {
318		rc = -CONTROLVM_RESP_ALREADY_DONE;
319		res = -EIO;
320		goto out_respond;
321	}
322	chipset_inited = 1;
323	/*
324	 * Set features to indicate we support parahotplug (if Command also
325	 * supports it). Set the "reply" bit so Command knows this is a
326	 * features-aware driver.
327	 */
328	features = inmsg->cmd.init_chipset.features &
329		   VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
330	features |= VISOR_CHIPSET_FEATURE_REPLY;
331
332out_respond:
333	if (inmsg->hdr.flags.response_expected)
334		res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
335
336	return res;
337}
338
339static int controlvm_respond(struct controlvm_message_header *msg_hdr,
340			     int response, struct visor_segment_state *state)
341{
342	struct controlvm_message outmsg;
343
344	controlvm_init_response(&outmsg, msg_hdr, response);
345	if (outmsg.hdr.flags.test_message == 1)
346		return -EINVAL;
347	if (state) {
348		outmsg.cmd.device_change_state.state = *state;
349		outmsg.cmd.device_change_state.flags.phys_device = 1;
350	}
351	return visorchannel_signalinsert(chipset_dev->controlvm_channel,
352					 CONTROLVM_QUEUE_REQUEST, &outmsg);
353}
354
355enum crash_obj_type {
356	CRASH_DEV,
357	CRASH_BUS,
358};
359
360static int save_crash_message(struct controlvm_message *msg,
361			      enum crash_obj_type cr_type)
362{
363	u32 local_crash_msg_offset;
364	u16 local_crash_msg_count;
365	int err;
366
367	err = visorchannel_read(chipset_dev->controlvm_channel,
368				offsetof(struct visor_controlvm_channel,
369					 saved_crash_message_count),
370				&local_crash_msg_count, sizeof(u16));
371	if (err) {
372		dev_err(&chipset_dev->acpi_device->dev,
373			"failed to read message count\n");
374		return err;
375	}
376	if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
377		dev_err(&chipset_dev->acpi_device->dev,
378			"invalid number of messages\n");
379		return -EIO;
380	}
381	err = visorchannel_read(chipset_dev->controlvm_channel,
382				offsetof(struct visor_controlvm_channel,
383					 saved_crash_message_offset),
384				&local_crash_msg_offset, sizeof(u32));
385	if (err) {
386		dev_err(&chipset_dev->acpi_device->dev,
387			"failed to read offset\n");
388		return err;
389	}
390	switch (cr_type) {
391	case CRASH_DEV:
392		local_crash_msg_offset += sizeof(struct controlvm_message);
393		err = visorchannel_write(chipset_dev->controlvm_channel,
394					 local_crash_msg_offset, msg,
395					 sizeof(struct controlvm_message));
396		if (err) {
397			dev_err(&chipset_dev->acpi_device->dev,
398				"failed to write dev msg\n");
399			return err;
400		}
401		break;
402	case CRASH_BUS:
403		err = visorchannel_write(chipset_dev->controlvm_channel,
404					 local_crash_msg_offset, msg,
405					 sizeof(struct controlvm_message));
406		if (err) {
407			dev_err(&chipset_dev->acpi_device->dev,
408				"failed to write bus msg\n");
409			return err;
410		}
411		break;
412	default:
413		dev_err(&chipset_dev->acpi_device->dev,
414			"Invalid crash_obj_type\n");
415		break;
416	}
417	return 0;
418}
419
420static int controlvm_responder(enum controlvm_id cmd_id,
421			       struct controlvm_message_header *pending_msg_hdr,
422			       int response)
423{
424	if (pending_msg_hdr->id != (u32)cmd_id)
425		return -EINVAL;
426
427	return controlvm_respond(pending_msg_hdr, response, NULL);
428}
429
430static int device_changestate_responder(enum controlvm_id cmd_id,
431					struct visor_device *p, int response,
432					struct visor_segment_state state)
433{
434	struct controlvm_message outmsg;
435
436	if (p->pending_msg_hdr->id != cmd_id)
437		return -EINVAL;
438
439	controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
440	outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
441	outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
442	outmsg.cmd.device_change_state.state = state;
443	return visorchannel_signalinsert(chipset_dev->controlvm_channel,
444					 CONTROLVM_QUEUE_REQUEST, &outmsg);
445}
446
447static int visorbus_create(struct controlvm_message *inmsg)
448{
449	struct controlvm_message_packet *cmd = &inmsg->cmd;
450	struct controlvm_message_header *pmsg_hdr;
451	u32 bus_no = cmd->create_bus.bus_no;
452	struct visor_device *bus_info;
453	struct visorchannel *visorchannel;
454	int err;
455
456	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
457	if (bus_info && bus_info->state.created == 1) {
458		dev_err(&chipset_dev->acpi_device->dev,
459			"failed %s: already exists\n", __func__);
460		err = -EEXIST;
461		goto err_respond;
462	}
463	bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
464	if (!bus_info) {
465		err = -ENOMEM;
466		goto err_respond;
467	}
468	INIT_LIST_HEAD(&bus_info->list_all);
469	bus_info->chipset_bus_no = bus_no;
470	bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
471	if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
472		err = save_crash_message(inmsg, CRASH_BUS);
473		if (err)
474			goto err_free_bus_info;
475	}
476	if (inmsg->hdr.flags.response_expected == 1) {
477		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
478		if (!pmsg_hdr) {
479			err = -ENOMEM;
480			goto err_free_bus_info;
481		}
482		memcpy(pmsg_hdr, &inmsg->hdr,
483		       sizeof(struct controlvm_message_header));
484		bus_info->pending_msg_hdr = pmsg_hdr;
485	}
486	visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
487					   GFP_KERNEL,
488					   &cmd->create_bus.bus_data_type_guid,
489					   false);
490	if (!visorchannel) {
491		err = -ENOMEM;
492		goto err_free_pending_msg;
493	}
494	bus_info->visorchannel = visorchannel;
495	/* Response will be handled by visorbus_create_instance on success */
496	err = visorbus_create_instance(bus_info);
497	if (err)
498		goto err_destroy_channel;
499	return 0;
500
501err_destroy_channel:
502	visorchannel_destroy(visorchannel);
503
504err_free_pending_msg:
505	kfree(bus_info->pending_msg_hdr);
506
507err_free_bus_info:
508	kfree(bus_info);
509
510err_respond:
511	if (inmsg->hdr.flags.response_expected == 1)
512		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
513	return err;
514}
515
516static int visorbus_destroy(struct controlvm_message *inmsg)
517{
518	struct controlvm_message_header *pmsg_hdr;
519	u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
520	struct visor_device *bus_info;
521	int err;
522
523	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
524	if (!bus_info) {
525		err = -ENODEV;
526		goto err_respond;
527	}
528	if (bus_info->state.created == 0) {
529		err = -ENOENT;
530		goto err_respond;
531	}
532	if (bus_info->pending_msg_hdr) {
533		/* only non-NULL if dev is still waiting on a response */
534		err = -EEXIST;
535		goto err_respond;
536	}
537	if (inmsg->hdr.flags.response_expected == 1) {
538		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
539		if (!pmsg_hdr) {
540			err = -ENOMEM;
541			goto err_respond;
542		}
543		memcpy(pmsg_hdr, &inmsg->hdr,
544		       sizeof(struct controlvm_message_header));
545		bus_info->pending_msg_hdr = pmsg_hdr;
546	}
547	/* Response will be handled by visorbus_remove_instance */
548	visorbus_remove_instance(bus_info);
549	return 0;
550
551err_respond:
552	if (inmsg->hdr.flags.response_expected == 1)
553		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
554	return err;
555}
556
557static const guid_t *parser_id_get(struct parser_context *ctx)
558{
559	return &ctx->data.id;
560}
561
562static void *parser_string_get(u8 *pscan, int nscan)
563{
564	int value_length;
565	void *value;
566
567	if (nscan == 0)
568		return NULL;
569
570	value_length = strnlen(pscan, nscan);
571	value = kzalloc(value_length + 1, GFP_KERNEL);
572	if (!value)
573		return NULL;
574	if (value_length > 0)
575		memcpy(value, pscan, value_length);
576	return value;
577}
578
579static void *parser_name_get(struct parser_context *ctx)
580{
581	struct visor_controlvm_parameters_header *phdr;
582
583	phdr = &ctx->data;
584	if ((unsigned long)phdr->name_offset +
585	    (unsigned long)phdr->name_length > ctx->param_bytes)
586		return NULL;
587	ctx->curr = (char *)&phdr + phdr->name_offset;
588	ctx->bytes_remaining = phdr->name_length;
589	return parser_string_get(ctx->curr, phdr->name_length);
590}
591
592static int visorbus_configure(struct controlvm_message *inmsg,
593			      struct parser_context *parser_ctx)
594{
595	struct controlvm_message_packet *cmd = &inmsg->cmd;
596	u32 bus_no;
597	struct visor_device *bus_info;
598	int err = 0;
599
600	bus_no = cmd->configure_bus.bus_no;
601	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
602	if (!bus_info) {
603		err = -EINVAL;
604		goto err_respond;
605	}
606	if (bus_info->state.created == 0) {
607		err = -EINVAL;
608		goto err_respond;
609	}
610	if (bus_info->pending_msg_hdr) {
611		err = -EIO;
612		goto err_respond;
613	}
614	err = visorchannel_set_clientpartition(bus_info->visorchannel,
615					       cmd->configure_bus.guest_handle);
616	if (err)
617		goto err_respond;
618	if (parser_ctx) {
619		const guid_t *partition_guid = parser_id_get(parser_ctx);
620
621		guid_copy(&bus_info->partition_guid, partition_guid);
622		bus_info->name = parser_name_get(parser_ctx);
623	}
624	if (inmsg->hdr.flags.response_expected == 1)
625		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
626	return 0;
627
628err_respond:
629	dev_err(&chipset_dev->acpi_device->dev,
630		"%s exited with err: %d\n", __func__, err);
631	if (inmsg->hdr.flags.response_expected == 1)
632		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
633	return err;
634}
635
636static int visorbus_device_create(struct controlvm_message *inmsg)
637{
638	struct controlvm_message_packet *cmd = &inmsg->cmd;
639	struct controlvm_message_header *pmsg_hdr;
640	u32 bus_no = cmd->create_device.bus_no;
641	u32 dev_no = cmd->create_device.dev_no;
642	struct visor_device *dev_info;
643	struct visor_device *bus_info;
644	struct visorchannel *visorchannel;
645	int err;
646
647	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
648	if (!bus_info) {
649		dev_err(&chipset_dev->acpi_device->dev,
650			"failed to get bus by id: %d\n", bus_no);
651		err = -ENODEV;
652		goto err_respond;
653	}
654	if (bus_info->state.created == 0) {
655		dev_err(&chipset_dev->acpi_device->dev,
656			"bus not created, id: %d\n", bus_no);
657		err = -EINVAL;
658		goto err_respond;
659	}
660	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
661	if (dev_info && dev_info->state.created == 1) {
662		dev_err(&chipset_dev->acpi_device->dev,
663			"failed to get bus by id: %d/%d\n", bus_no, dev_no);
664		err = -EEXIST;
665		goto err_respond;
666	}
667
668	dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
669	if (!dev_info) {
670		err = -ENOMEM;
671		goto err_respond;
672	}
673	dev_info->chipset_bus_no = bus_no;
674	dev_info->chipset_dev_no = dev_no;
675	guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
676	dev_info->device.parent = &bus_info->device;
677	visorchannel = visorchannel_create(cmd->create_device.channel_addr,
678					   GFP_KERNEL,
679					   &cmd->create_device.data_type_guid,
680					   true);
681	if (!visorchannel) {
682		dev_err(&chipset_dev->acpi_device->dev,
683			"failed to create visorchannel: %d/%d\n",
684			bus_no, dev_no);
685		err = -ENOMEM;
686		goto err_free_dev_info;
687	}
688	dev_info->visorchannel = visorchannel;
689	guid_copy(&dev_info->channel_type_guid,
690		  &cmd->create_device.data_type_guid);
691	if (guid_equal(&cmd->create_device.data_type_guid,
692		       &visor_vhba_channel_guid)) {
693		err = save_crash_message(inmsg, CRASH_DEV);
694		if (err)
695			goto err_destroy_visorchannel;
696	}
697	if (inmsg->hdr.flags.response_expected == 1) {
698		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
699		if (!pmsg_hdr) {
700			err = -ENOMEM;
701			goto err_destroy_visorchannel;
702		}
703		memcpy(pmsg_hdr, &inmsg->hdr,
704		       sizeof(struct controlvm_message_header));
705		dev_info->pending_msg_hdr = pmsg_hdr;
706	}
707	/* create_visor_device will send response */
708	err = create_visor_device(dev_info);
709	if (err)
710		goto err_destroy_visorchannel;
711
712	return 0;
713
714err_destroy_visorchannel:
715	visorchannel_destroy(visorchannel);
716
717err_free_dev_info:
718	kfree(dev_info);
719
720err_respond:
721	if (inmsg->hdr.flags.response_expected == 1)
722		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
723	return err;
724}
725
726static int visorbus_device_changestate(struct controlvm_message *inmsg)
727{
728	struct controlvm_message_packet *cmd = &inmsg->cmd;
729	struct controlvm_message_header *pmsg_hdr;
730	u32 bus_no = cmd->device_change_state.bus_no;
731	u32 dev_no = cmd->device_change_state.dev_no;
732	struct visor_segment_state state = cmd->device_change_state.state;
733	struct visor_device *dev_info;
734	int err = 0;
735
736	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
737	if (!dev_info) {
738		err = -ENODEV;
739		goto err_respond;
740	}
741	if (dev_info->state.created == 0) {
742		err = -EINVAL;
743		goto err_respond;
744	}
745	if (dev_info->pending_msg_hdr) {
746		/* only non-NULL if dev is still waiting on a response */
747		err = -EIO;
748		goto err_respond;
749	}
750
751	if (inmsg->hdr.flags.response_expected == 1) {
752		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
753		if (!pmsg_hdr) {
754			err = -ENOMEM;
755			goto err_respond;
756		}
757		memcpy(pmsg_hdr, &inmsg->hdr,
758		       sizeof(struct controlvm_message_header));
759		dev_info->pending_msg_hdr = pmsg_hdr;
760	}
761	if (state.alive == segment_state_running.alive &&
762	    state.operating == segment_state_running.operating)
763		/* Response will be sent from visorchipset_device_resume */
764		err = visorchipset_device_resume(dev_info);
765	/* ServerNotReady / ServerLost / SegmentStateStandby */
766	else if (state.alive == segment_state_standby.alive &&
767		 state.operating == segment_state_standby.operating)
768		/*
769		 * technically this is standby case where server is lost.
770		 * Response will be sent from visorchipset_device_pause.
771		 */
772		err = visorchipset_device_pause(dev_info);
773	if (err)
774		goto err_respond;
775	return 0;
776
777err_respond:
778	dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
779	if (inmsg->hdr.flags.response_expected == 1)
780		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
781	return err;
782}
783
784static int visorbus_device_destroy(struct controlvm_message *inmsg)
785{
786	struct controlvm_message_packet *cmd = &inmsg->cmd;
787	struct controlvm_message_header *pmsg_hdr;
788	u32 bus_no = cmd->destroy_device.bus_no;
789	u32 dev_no = cmd->destroy_device.dev_no;
790	struct visor_device *dev_info;
791	int err;
792
793	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
794	if (!dev_info) {
795		err = -ENODEV;
796		goto err_respond;
797	}
798	if (dev_info->state.created == 0) {
799		err = -EINVAL;
800		goto err_respond;
801	}
802	if (dev_info->pending_msg_hdr) {
803		/* only non-NULL if dev is still waiting on a response */
804		err = -EIO;
805		goto err_respond;
806	}
807	if (inmsg->hdr.flags.response_expected == 1) {
808		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
809		if (!pmsg_hdr) {
810			err = -ENOMEM;
811			goto err_respond;
812		}
813
814		memcpy(pmsg_hdr, &inmsg->hdr,
815		       sizeof(struct controlvm_message_header));
816		dev_info->pending_msg_hdr = pmsg_hdr;
817	}
818	kfree(dev_info->name);
819	remove_visor_device(dev_info);
820	return 0;
821
822err_respond:
823	if (inmsg->hdr.flags.response_expected == 1)
824		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
825	return err;
826}
827
828/*
829 * The general parahotplug flow works as follows. The visorchipset receives
830 * a DEVICE_CHANGESTATE message from Command specifying a physical device
831 * to enable or disable. The CONTROLVM message handler calls
832 * parahotplug_process_message, which then adds the message to a global list
833 * and kicks off a udev event which causes a user level script to enable or
834 * disable the specified device. The udev script then writes to
835 * /sys/devices/platform/visorchipset/parahotplug, which causes the
836 * parahotplug store functions to get called, at which point the
837 * appropriate CONTROLVM message is retrieved from the list and responded to.
838 */
839
840#define PARAHOTPLUG_TIMEOUT_MS 2000
841
842/*
843 * parahotplug_next_id() - generate unique int to match an outstanding
844 *                         CONTROLVM message with a udev script /sys
845 *                         response
846 *
847 * Return: a unique integer value
848 */
849static int parahotplug_next_id(void)
850{
851	static atomic_t id = ATOMIC_INIT(0);
852
853	return atomic_inc_return(&id);
854}
855
856/*
857 * parahotplug_next_expiration() - returns the time (in jiffies) when a
858 *                                 CONTROLVM message on the list should expire
859 *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
860 *
861 * Return: expected expiration time (in jiffies)
862 */
863static unsigned long parahotplug_next_expiration(void)
864{
865	return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
866}
867
868/*
869 * parahotplug_request_create() - create a parahotplug_request, which is
870 *                                basically a wrapper for a CONTROLVM_MESSAGE
871 *                                that we can stick on a list
872 * @msg: the message to insert in the request
873 *
874 * Return: the request containing the provided message
875 */
876static struct parahotplug_request *parahotplug_request_create(
877						struct controlvm_message *msg)
878{
879	struct parahotplug_request *req;
880
881	req = kmalloc(sizeof(*req), GFP_KERNEL);
882	if (!req)
883		return NULL;
884	req->id = parahotplug_next_id();
885	req->expiration = parahotplug_next_expiration();
886	req->msg = *msg;
887	return req;
888}
889
890/*
891 * parahotplug_request_destroy() - free a parahotplug_request
892 * @req: the request to deallocate
893 */
894static void parahotplug_request_destroy(struct parahotplug_request *req)
895{
896	kfree(req);
897}
898
899static LIST_HEAD(parahotplug_request_list);
900/* lock for above */
901static DEFINE_SPINLOCK(parahotplug_request_list_lock);
902
903/*
904 * parahotplug_request_complete() - mark request as complete
905 * @id:     the id of the request
906 * @active: indicates whether the request is assigned to active partition
907 *
908 * Called from the /sys handler, which means the user script has
909 * finished the enable/disable. Find the matching identifier, and
910 * respond to the CONTROLVM message with success.
911 *
912 * Return: 0 on success or -EINVAL on failure
913 */
914static int parahotplug_request_complete(int id, u16 active)
915{
916	struct list_head *pos;
917	struct list_head *tmp;
918	struct parahotplug_request *req;
919
920	spin_lock(&parahotplug_request_list_lock);
921	/* Look for a request matching "id". */
922	list_for_each_safe(pos, tmp, &parahotplug_request_list) {
923		req = list_entry(pos, struct parahotplug_request, list);
924		if (req->id == id) {
925			/*
926			 * Found a match. Remove it from the list and
927			 * respond.
928			 */
929			list_del(pos);
930			spin_unlock(&parahotplug_request_list_lock);
931			req->msg.cmd.device_change_state.state.active = active;
932			if (req->msg.hdr.flags.response_expected)
933				controlvm_respond(
934				       &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
935				       &req->msg.cmd.device_change_state.state);
936			parahotplug_request_destroy(req);
937			return 0;
938		}
939	}
940	spin_unlock(&parahotplug_request_list_lock);
941	return -EINVAL;
942}
943
944/*
945 * devicedisabled_store() - disables the hotplug device
946 * @dev:   sysfs interface variable not utilized in this function
947 * @attr:  sysfs interface variable not utilized in this function
948 * @buf:   buffer containing the device id
949 * @count: the size of the buffer
950 *
951 * The parahotplug/devicedisabled interface gets called by our support script
952 * when an SR-IOV device has been shut down. The ID is passed to the script
953 * and then passed back when the device has been removed.
954 *
955 * Return: the size of the buffer for success or negative for error
956 */
957static ssize_t devicedisabled_store(struct device *dev,
958				    struct device_attribute *attr,
959				    const char *buf, size_t count)
960{
961	unsigned int id;
962	int err;
963
964	if (kstrtouint(buf, 10, &id))
965		return -EINVAL;
966	err = parahotplug_request_complete(id, 0);
967	if (err < 0)
968		return err;
969	return count;
970}
971static DEVICE_ATTR_WO(devicedisabled);
972
973/*
974 * deviceenabled_store() - enables the hotplug device
975 * @dev:   sysfs interface variable not utilized in this function
976 * @attr:  sysfs interface variable not utilized in this function
977 * @buf:   buffer containing the device id
978 * @count: the size of the buffer
979 *
980 * The parahotplug/deviceenabled interface gets called by our support script
981 * when an SR-IOV device has been recovered. The ID is passed to the script
982 * and then passed back when the device has been brought back up.
983 *
984 * Return: the size of the buffer for success or negative for error
985 */
986static ssize_t deviceenabled_store(struct device *dev,
987				   struct device_attribute *attr,
988				   const char *buf, size_t count)
989{
990	unsigned int id;
991
992	if (kstrtouint(buf, 10, &id))
993		return -EINVAL;
994	parahotplug_request_complete(id, 1);
995	return count;
996}
997static DEVICE_ATTR_WO(deviceenabled);
998
999static struct attribute *visorchipset_install_attrs[] = {
1000	&dev_attr_toolaction.attr,
1001	&dev_attr_boottotool.attr,
1002	&dev_attr_error.attr,
1003	&dev_attr_textid.attr,
1004	&dev_attr_remaining_steps.attr,
1005	NULL
1006};
1007
1008static const struct attribute_group visorchipset_install_group = {
1009	.name = "install",
1010	.attrs = visorchipset_install_attrs
1011};
1012
1013static struct attribute *visorchipset_parahotplug_attrs[] = {
1014	&dev_attr_devicedisabled.attr,
1015	&dev_attr_deviceenabled.attr,
1016	NULL
1017};
1018
1019static const struct attribute_group visorchipset_parahotplug_group = {
1020	.name = "parahotplug",
1021	.attrs = visorchipset_parahotplug_attrs
1022};
1023
1024static const struct attribute_group *visorchipset_dev_groups[] = {
1025	&visorchipset_install_group,
1026	&visorchipset_parahotplug_group,
1027	NULL
1028};
1029
1030/*
1031 * parahotplug_request_kickoff() - initiate parahotplug request
1032 * @req: the request to initiate
1033 *
1034 * Cause uevent to run the user level script to do the disable/enable specified
1035 * in the parahotplug_request.
1036 */
1037static int parahotplug_request_kickoff(struct parahotplug_request *req)
1038{
1039	struct controlvm_message_packet *cmd = &req->msg.cmd;
1040	char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1041	     env_func[40];
1042	char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
1043			 env_func, NULL
1044	};
1045
1046	sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
1047	sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
1048	sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
1049		cmd->device_change_state.state.active);
1050	sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
1051		cmd->device_change_state.bus_no);
1052	sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
1053		cmd->device_change_state.dev_no >> 3);
1054	sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
1055		cmd->device_change_state.dev_no & 0x7);
1056	return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1057				  KOBJ_CHANGE, envp);
1058}
1059
1060/*
1061 * parahotplug_process_message() - enables or disables a PCI device by kicking
1062 *                                 off a udev script
1063 * @inmsg: the message indicating whether to enable or disable
1064 */
1065static int parahotplug_process_message(struct controlvm_message *inmsg)
1066{
1067	struct parahotplug_request *req;
1068	int err;
1069
1070	req = parahotplug_request_create(inmsg);
1071	if (!req)
1072		return -ENOMEM;
1073	/*
1074	 * For enable messages, just respond with success right away, we don't
1075	 * need to wait to see if the enable was successful.
1076	 */
1077	if (inmsg->cmd.device_change_state.state.active) {
1078		err = parahotplug_request_kickoff(req);
1079		if (err)
1080			goto err_respond;
1081		controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1082				  &inmsg->cmd.device_change_state.state);
1083		parahotplug_request_destroy(req);
1084		return 0;
1085	}
1086	/*
1087	 * For disable messages, add the request to the request list before
1088	 * kicking off the udev script. It won't get responded to until the
1089	 * script has indicated it's done.
1090	 */
1091	spin_lock(&parahotplug_request_list_lock);
1092	list_add_tail(&req->list, &parahotplug_request_list);
1093	spin_unlock(&parahotplug_request_list_lock);
1094	err = parahotplug_request_kickoff(req);
1095	if (err)
1096		goto err_respond;
1097	return 0;
1098
1099err_respond:
1100	controlvm_respond(&inmsg->hdr, err,
1101			  &inmsg->cmd.device_change_state.state);
1102	return err;
1103}
1104
1105/*
1106 * chipset_ready_uevent() - sends chipset_ready action
1107 *
1108 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1109 *
1110 * Return: 0 on success, negative on failure
1111 */
1112static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1113{
1114	int res;
1115
1116	res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
1117	if (msg_hdr->flags.response_expected)
1118		controlvm_respond(msg_hdr, res, NULL);
1119	return res;
1120}
1121
1122/*
1123 * chipset_selftest_uevent() - sends chipset_selftest action
1124 *
1125 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1126 *
1127 * Return: 0 on success, negative on failure
1128 */
1129static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1130{
1131	char env_selftest[20];
1132	char *envp[] = { env_selftest, NULL };
1133	int res;
1134
1135	sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1136	res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1137				 KOBJ_CHANGE, envp);
1138	if (msg_hdr->flags.response_expected)
1139		controlvm_respond(msg_hdr, res, NULL);
1140	return res;
1141}
1142
1143/*
1144 * chipset_notready_uevent() - sends chipset_notready action
1145 *
1146 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1147 *
1148 * Return: 0 on success, negative on failure
1149 */
1150static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1151{
1152	int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1153				 KOBJ_OFFLINE);
1154
1155	if (msg_hdr->flags.response_expected)
1156		controlvm_respond(msg_hdr, res, NULL);
1157	return res;
1158}
1159
1160static int unisys_vmcall(unsigned long tuple, unsigned long param)
1161{
1162	int result = 0;
1163	unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1164	unsigned long reg_ebx;
1165	unsigned long reg_ecx;
1166
1167	reg_ebx = param & 0xFFFFFFFF;
1168	reg_ecx = param >> 32;
1169	cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1170	if (!(cpuid_ecx & 0x80000000))
1171		return -EPERM;
1172	__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
1173			     "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
1174	if (result)
1175		goto error;
1176	return 0;
1177
1178/* Need to convert from VMCALL error codes to Linux */
1179error:
1180	switch (result) {
1181	case VMCALL_RESULT_INVALID_PARAM:
1182		return -EINVAL;
1183	case VMCALL_RESULT_DATA_UNAVAILABLE:
1184		return -ENODEV;
1185	default:
1186		return -EFAULT;
1187	}
1188}
1189
1190static int controlvm_channel_create(struct visorchipset_device *dev)
1191{
1192	struct visorchannel *chan;
1193	u64 addr;
1194	int err;
1195
1196	err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
1197			    virt_to_phys(&dev->controlvm_params));
1198	if (err)
1199		return err;
1200	addr = dev->controlvm_params.address;
1201	chan = visorchannel_create(addr, GFP_KERNEL,
1202				   &visor_controlvm_channel_guid, true);
1203	if (!chan)
1204		return -ENOMEM;
1205	dev->controlvm_channel = chan;
1206	return 0;
1207}
1208
1209static void setup_crash_devices_work_queue(struct work_struct *work)
1210{
1211	struct controlvm_message local_crash_bus_msg;
1212	struct controlvm_message local_crash_dev_msg;
1213	struct controlvm_message msg = {
1214		.hdr.id = CONTROLVM_CHIPSET_INIT,
1215		.cmd.init_chipset = {
1216			.bus_count = 23,
1217			.switch_count = 0,
1218		},
1219	};
1220	u32 local_crash_msg_offset;
1221	u16 local_crash_msg_count;
1222
1223	/* send init chipset msg */
1224	chipset_init(&msg);
1225	/* get saved message count */
1226	if (visorchannel_read(chipset_dev->controlvm_channel,
1227			      offsetof(struct visor_controlvm_channel,
1228				       saved_crash_message_count),
1229			      &local_crash_msg_count, sizeof(u16)) < 0) {
1230		dev_err(&chipset_dev->acpi_device->dev,
1231			"failed to read channel\n");
1232		return;
1233	}
1234	if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1235		dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
1236		return;
1237	}
1238	/* get saved crash message offset */
1239	if (visorchannel_read(chipset_dev->controlvm_channel,
1240			      offsetof(struct visor_controlvm_channel,
1241				       saved_crash_message_offset),
1242			      &local_crash_msg_offset, sizeof(u32)) < 0) {
1243		dev_err(&chipset_dev->acpi_device->dev,
1244			"failed to read channel\n");
1245		return;
1246	}
1247	/* read create device message for storage bus offset */
1248	if (visorchannel_read(chipset_dev->controlvm_channel,
1249			      local_crash_msg_offset,
1250			      &local_crash_bus_msg,
1251			      sizeof(struct controlvm_message)) < 0) {
1252		dev_err(&chipset_dev->acpi_device->dev,
1253			"failed to read channel\n");
1254		return;
1255	}
1256	/* read create device message for storage device */
1257	if (visorchannel_read(chipset_dev->controlvm_channel,
1258			      local_crash_msg_offset +
1259			      sizeof(struct controlvm_message),
1260			      &local_crash_dev_msg,
1261			      sizeof(struct controlvm_message)) < 0) {
1262		dev_err(&chipset_dev->acpi_device->dev,
1263			"failed to read channel\n");
1264		return;
1265	}
1266	/* reuse IOVM create bus message */
1267	if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
1268		dev_err(&chipset_dev->acpi_device->dev,
1269			"no valid create_bus message\n");
1270		return;
1271	}
1272	visorbus_create(&local_crash_bus_msg);
1273	/* reuse create device message for storage device */
1274	if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
1275		dev_err(&chipset_dev->acpi_device->dev,
1276			"no valid create_device message\n");
1277		return;
1278	}
1279	visorbus_device_create(&local_crash_dev_msg);
1280}
1281
1282void visorbus_response(struct visor_device *bus_info, int response,
1283		       int controlvm_id)
1284{
1285	if (!bus_info->pending_msg_hdr)
1286		return;
1287
1288	controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
1289	kfree(bus_info->pending_msg_hdr);
1290	bus_info->pending_msg_hdr = NULL;
1291}
1292
1293void visorbus_device_changestate_response(struct visor_device *dev_info,
1294					  int response,
1295					  struct visor_segment_state state)
1296{
1297	if (!dev_info->pending_msg_hdr)
1298		return;
1299
1300	device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
1301				     response, state);
1302	kfree(dev_info->pending_msg_hdr);
1303	dev_info->pending_msg_hdr = NULL;
1304}
1305
1306static void parser_done(struct parser_context *ctx)
1307{
1308	chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
1309	kfree(ctx);
1310}
1311
1312static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
1313						 bool *retry)
1314{
1315	unsigned long allocbytes;
1316	struct parser_context *ctx;
1317	void *mapping;
1318
1319	*retry = false;
1320	/* alloc an extra byte to ensure payload is \0 terminated */
1321	allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
1322		     sizeof(struct visor_controlvm_parameters_header));
1323	if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
1324	     MAX_CONTROLVM_PAYLOAD_BYTES) {
1325		*retry = true;
1326		return NULL;
1327	}
1328	ctx = kzalloc(allocbytes, GFP_KERNEL);
1329	if (!ctx) {
1330		*retry = true;
1331		return NULL;
1332	}
1333	ctx->allocbytes = allocbytes;
1334	ctx->param_bytes = bytes;
1335	mapping = memremap(addr, bytes, MEMREMAP_WB);
1336	if (!mapping)
1337		goto err_finish_ctx;
1338	memcpy(&ctx->data, mapping, bytes);
1339	memunmap(mapping);
1340	ctx->byte_stream = true;
1341	chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1342	return ctx;
1343
1344err_finish_ctx:
1345	kfree(ctx);
1346	return NULL;
1347}
1348
1349/*
1350 * handle_command() - process a controlvm message
1351 * @inmsg:        the message to process
1352 * @channel_addr: address of the controlvm channel
1353 *
1354 * Return:
1355 *	0	- Successfully processed the message
1356 *	-EAGAIN - ControlVM message was not processed and should be retried
1357 *		  reading the next controlvm message; a scenario where this can
1358 *		  occur is when we need to throttle the allocation of memory in
1359 *		  which to copy out controlvm payload data.
1360 *	< 0	- error: ControlVM message was processed but an error occurred.
1361 */
1362static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
1363{
1364	struct controlvm_message_packet *cmd = &inmsg.cmd;
1365	u64 parm_addr;
1366	u32 parm_bytes;
1367	struct parser_context *parser_ctx = NULL;
1368	struct controlvm_message ackmsg;
1369	int err = 0;
1370
1371	/* create parsing context if necessary */
1372	parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1373	parm_bytes = inmsg.hdr.payload_bytes;
1374	/*
1375	 * Parameter and channel addresses within test messages actually lie
1376	 * within our OS-controlled memory. We need to know that, because it
1377	 * makes a difference in how we compute the virtual address.
1378	 */
1379	if (parm_bytes) {
1380		bool retry;
1381
1382		parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
1383		if (!parser_ctx && retry)
1384			return -EAGAIN;
1385	}
1386	controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1387	err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1388					CONTROLVM_QUEUE_ACK, &ackmsg);
1389	if (err)
1390		return err;
1391	switch (inmsg.hdr.id) {
1392	case CONTROLVM_CHIPSET_INIT:
1393		err = chipset_init(&inmsg);
1394		break;
1395	case CONTROLVM_BUS_CREATE:
1396		err = visorbus_create(&inmsg);
1397		break;
1398	case CONTROLVM_BUS_DESTROY:
1399		err = visorbus_destroy(&inmsg);
1400		break;
1401	case CONTROLVM_BUS_CONFIGURE:
1402		err = visorbus_configure(&inmsg, parser_ctx);
1403		break;
1404	case CONTROLVM_DEVICE_CREATE:
1405		err = visorbus_device_create(&inmsg);
1406		break;
1407	case CONTROLVM_DEVICE_CHANGESTATE:
1408		if (cmd->device_change_state.flags.phys_device) {
1409			err = parahotplug_process_message(&inmsg);
1410		} else {
1411			/*
1412			 * save the hdr and cmd structures for later use when
1413			 * sending back the response to Command
1414			 */
1415			err = visorbus_device_changestate(&inmsg);
1416			break;
1417		}
1418		break;
1419	case CONTROLVM_DEVICE_DESTROY:
1420		err = visorbus_device_destroy(&inmsg);
1421		break;
1422	case CONTROLVM_DEVICE_CONFIGURE:
1423		/* no op just send a respond that we passed */
1424		if (inmsg.hdr.flags.response_expected)
1425			controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1426					  NULL);
1427		break;
1428	case CONTROLVM_CHIPSET_READY:
1429		err = chipset_ready_uevent(&inmsg.hdr);
1430		break;
1431	case CONTROLVM_CHIPSET_SELFTEST:
1432		err = chipset_selftest_uevent(&inmsg.hdr);
1433		break;
1434	case CONTROLVM_CHIPSET_STOP:
1435		err = chipset_notready_uevent(&inmsg.hdr);
1436		break;
1437	default:
1438		err = -ENOMSG;
1439		if (inmsg.hdr.flags.response_expected)
1440			controlvm_respond(&inmsg.hdr,
1441					  -CONTROLVM_RESP_ID_UNKNOWN, NULL);
1442		break;
1443	}
1444	if (parser_ctx) {
1445		parser_done(parser_ctx);
1446		parser_ctx = NULL;
1447	}
1448	return err;
1449}
1450
1451/*
1452 * read_controlvm_event() - retreives the next message from the
1453 *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
1454 *                          channel
1455 * @msg: pointer to the retrieved message
1456 *
1457 * Return: 0 if valid message was retrieved or -error
1458 */
1459static int read_controlvm_event(struct controlvm_message *msg)
1460{
1461	int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1462					    CONTROLVM_QUEUE_EVENT, msg);
1463
1464	if (err)
1465		return err;
1466	/* got a message */
1467	if (msg->hdr.flags.test_message == 1)
1468		return -EINVAL;
1469	return 0;
1470}
1471
1472/*
1473 * parahotplug_process_list() - remove any request from the list that's been on
1474 *                              there too long and respond with an error
1475 */
1476static void parahotplug_process_list(void)
1477{
1478	struct list_head *pos;
1479	struct list_head *tmp;
1480
1481	spin_lock(&parahotplug_request_list_lock);
1482	list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1483		struct parahotplug_request *req =
1484		    list_entry(pos, struct parahotplug_request, list);
1485
1486		if (!time_after_eq(jiffies, req->expiration))
1487			continue;
1488		list_del(pos);
1489		if (req->msg.hdr.flags.response_expected)
1490			controlvm_respond(
1491				&req->msg.hdr,
1492				CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1493				&req->msg.cmd.device_change_state.state);
1494		parahotplug_request_destroy(req);
1495	}
1496	spin_unlock(&parahotplug_request_list_lock);
1497}
1498
1499static void controlvm_periodic_work(struct work_struct *work)
1500{
1501	struct controlvm_message inmsg;
1502	int count = 0;
1503	int err;
1504
1505	/* Drain the RESPONSE queue make it empty */
1506	do {
1507		err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1508						CONTROLVM_QUEUE_RESPONSE,
1509						&inmsg);
1510	} while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
1511	if (err != -EAGAIN)
1512		goto schedule_out;
1513	if (chipset_dev->controlvm_pending_msg_valid) {
1514		/*
1515		 * we throttled processing of a prior msg, so try to process
1516		 * it again rather than reading a new one
1517		 */
1518		inmsg = chipset_dev->controlvm_pending_msg;
1519		chipset_dev->controlvm_pending_msg_valid = false;
1520		err = 0;
1521	} else {
1522		err = read_controlvm_event(&inmsg);
1523	}
1524	while (!err) {
1525		chipset_dev->most_recent_message_jiffies = jiffies;
1526		err = handle_command(inmsg,
1527				     visorchannel_get_physaddr
1528				     (chipset_dev->controlvm_channel));
1529		if (err == -EAGAIN) {
1530			chipset_dev->controlvm_pending_msg = inmsg;
1531			chipset_dev->controlvm_pending_msg_valid = true;
1532			break;
1533		}
1534
1535		err = read_controlvm_event(&inmsg);
1536	}
1537	/* parahotplug_worker */
1538	parahotplug_process_list();
1539
1540/*
1541 * The controlvm messages are sent in a bulk. If we start receiving messages, we
1542 * want the polling to be fast. If we do not receive any message for
1543 * MIN_IDLE_SECONDS, we can slow down the polling.
1544 */
1545schedule_out:
1546	if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1547				(HZ * MIN_IDLE_SECONDS))) {
1548		/*
1549		 * it's been longer than MIN_IDLE_SECONDS since we processed
1550		 * our last controlvm message; slow down the polling
1551		 */
1552		if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
1553			chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
1554	} else {
1555		if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
1556			chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
1557	}
1558	schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1559			      chipset_dev->poll_jiffies);
1560}
1561
1562static int visorchipset_init(struct acpi_device *acpi_device)
1563{
1564	int err = -ENOMEM;
1565	struct visorchannel *controlvm_channel;
1566
1567	chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1568	if (!chipset_dev)
1569		goto error;
1570	err = controlvm_channel_create(chipset_dev);
1571	if (err)
1572		goto error_free_chipset_dev;
1573	acpi_device->driver_data = chipset_dev;
1574	chipset_dev->acpi_device = acpi_device;
1575	chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
1576	err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1577				  visorchipset_dev_groups);
1578	if (err < 0)
1579		goto error_destroy_channel;
1580	controlvm_channel = chipset_dev->controlvm_channel;
1581	if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
1582				 &chipset_dev->acpi_device->dev,
1583				 &visor_controlvm_channel_guid,
1584				 "controlvm",
1585				 sizeof(struct visor_controlvm_channel),
1586				 VISOR_CONTROLVM_CHANNEL_VERSIONID,
1587				 VISOR_CHANNEL_SIGNATURE)) {
1588		err = -ENODEV;
1589		goto error_delete_groups;
1590	}
1591	/* if booting in a crash kernel */
1592	if (is_kdump_kernel())
1593		INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1594				  setup_crash_devices_work_queue);
1595	else
1596		INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1597				  controlvm_periodic_work);
1598	chipset_dev->most_recent_message_jiffies = jiffies;
1599	chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
1600	schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1601			      chipset_dev->poll_jiffies);
1602	err = visorbus_init();
1603	if (err < 0)
1604		goto error_cancel_work;
1605	return 0;
1606
1607error_cancel_work:
1608	cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1609
1610error_delete_groups:
1611	sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1612			    visorchipset_dev_groups);
1613
1614error_destroy_channel:
1615	visorchannel_destroy(chipset_dev->controlvm_channel);
1616
1617error_free_chipset_dev:
1618	kfree(chipset_dev);
1619
1620error:
1621	dev_err(&acpi_device->dev, "failed with error %d\n", err);
1622	return err;
1623}
1624
1625static int visorchipset_exit(struct acpi_device *acpi_device)
1626{
1627	visorbus_exit();
1628	cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1629	sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1630			    visorchipset_dev_groups);
1631	visorchannel_destroy(chipset_dev->controlvm_channel);
1632	kfree(chipset_dev);
1633	return 0;
1634}
1635
1636static const struct acpi_device_id unisys_device_ids[] = {
1637	{"PNP0A07", 0},
1638	{"", 0},
1639};
1640
1641static struct acpi_driver unisys_acpi_driver = {
1642	.name = "unisys_acpi",
1643	.class = "unisys_acpi_class",
1644	.owner = THIS_MODULE,
1645	.ids = unisys_device_ids,
1646	.ops = {
1647		.add = visorchipset_init,
1648		.remove = visorchipset_exit,
1649	},
1650};
1651
1652MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1653
1654static __init int visorutil_spar_detect(void)
1655{
1656	unsigned int eax, ebx, ecx, edx;
1657
1658	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1659		/* check the ID */
1660		cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1661		return  (ebx == UNISYS_VISOR_ID_EBX) &&
1662			(ecx == UNISYS_VISOR_ID_ECX) &&
1663			(edx == UNISYS_VISOR_ID_EDX);
1664	}
1665	return 0;
1666}
1667
1668static int __init init_unisys(void)
1669{
1670	int result;
1671
1672	if (!visorutil_spar_detect())
1673		return -ENODEV;
1674	result = acpi_bus_register_driver(&unisys_acpi_driver);
1675	if (result)
1676		return -ENODEV;
1677	pr_info("Unisys Visorchipset Driver Loaded.\n");
1678	return 0;
1679};
1680
1681static void __exit exit_unisys(void)
1682{
1683	acpi_bus_unregister_driver(&unisys_acpi_driver);
1684}
1685
1686module_init(init_unisys);
1687module_exit(exit_unisys);
1688
1689MODULE_AUTHOR("Unisys");
1690MODULE_LICENSE("GPL");
1691MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
1692