]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/visorbus/visorchipset.c
Btrfs: fix failure to persist compression property xattr deletion on fsync
[thirdparty/linux.git] / drivers / visorbus / visorchipset.c
CommitLineData
b79c0f4f 1// SPDX-License-Identifier: GPL-2.0
e517857b 2/*
6f14cc18 3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
12e364b9 4 * All rights reserved.
12e364b9
KC
5 */
6
55c67dca 7#include <linux/acpi.h>
1ba00980 8#include <linux/crash_dump.h>
93d3ad90 9#include <linux/visorbus.h>
12e364b9 10
55c67dca
PB
11#include "visorbus_private.h"
12
f79e1dfd 13/* {72120008-4AAB-11DC-8530-444553544200} */
1604ebec
DK
14#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
15 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
f79e1dfd 16
b32c5cb8
AS
17static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
18static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
19static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
20
3fbee197
DK
21#define POLLJIFFIES_CONTROLVM_FAST 1
22#define POLLJIFFIES_CONTROLVM_SLOW 100
12e364b9 23
2c7e1d4e 24#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
2ee0deec 25
a27ded92 26#define UNISYS_VISOR_LEAF_ID 0x40000000
d5b3f1dc
EA
27
28/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
a27ded92
SW
29#define UNISYS_VISOR_ID_EBX 0x73696e55
30#define UNISYS_VISOR_ID_ECX 0x70537379
31#define UNISYS_VISOR_ID_EDX 0x34367261
d5b3f1dc 32
ec17f452 33/*
6577cbf1
DK
34 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
35 * to slow polling mode. As soon as we get a controlvm message, we switch back
36 * to fast polling mode.
ec17f452 37 */
12e364b9 38#define MIN_IDLE_SECONDS 10
12e364b9 39
46168810
EA
40struct parser_context {
41 unsigned long allocbytes;
42 unsigned long param_bytes;
43 u8 *curr;
44 unsigned long bytes_remaining;
45 bool byte_stream;
26a42c25 46 struct visor_controlvm_parameters_header data;
46168810
EA
47};
48
12cbd490 49/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
c8684a9d
DB
50#define VMCALL_CONTROLVM_ADDR 0x0501
51
52enum vmcall_result {
53 VMCALL_RESULT_SUCCESS = 0,
54 VMCALL_RESULT_INVALID_PARAM = 1,
55 VMCALL_RESULT_DATA_UNAVAILABLE = 2,
56 VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
57 VMCALL_RESULT_DEVICE_ERROR = 4,
58 VMCALL_RESULT_DEVICE_NOT_READY = 5
59};
60
61/*
62 * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
63 * parameters to VMCALL_CONTROLVM_ADDR
64 * interface.
65 * @address: The Guest-relative physical address of the ControlVm channel.
66 * This VMCall fills this in with the appropriate address.
67 * Contents provided by this VMCALL (OUT).
68 * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
69 * this in with the appropriate address. Contents provided by
70 * this VMCALL (OUT).
71 * @unused: Unused Bytes in the 64-Bit Aligned Struct.
72 */
73struct vmcall_io_controlvm_addr_params {
74 u64 address;
75 u32 channel_bytes;
76 u8 unused[4];
77} __packed;
78
765b2f82
SW
79struct visorchipset_device {
80 struct acpi_device *acpi_device;
81 unsigned long poll_jiffies;
82 /* when we got our last controlvm message */
83 unsigned long most_recent_message_jiffies;
84 struct delayed_work periodic_controlvm_work;
765b2f82
SW
85 struct visorchannel *controlvm_channel;
86 unsigned long controlvm_payload_bytes_buffered;
87 /*
88 * The following variables are used to handle the scenario where we are
89 * unable to offload the payload from a controlvm message due to memory
90 * requirements. In this scenario, we simply stash the controlvm
91 * message, then attempt to process it again the next time
92 * controlvm_periodic_work() runs.
93 */
94 struct controlvm_message controlvm_pending_msg;
95 bool controlvm_pending_msg_valid;
800da5fb 96 struct vmcall_io_controlvm_addr_params controlvm_params;
765b2f82 97};
12e364b9 98
765b2f82 99static struct visorchipset_device *chipset_dev;
12e364b9 100
12e364b9
KC
101struct parahotplug_request {
102 struct list_head list;
103 int id;
104 unsigned long expiration;
3ab47701 105 struct controlvm_message msg;
12e364b9
KC
106};
107
19f6634f
BR
108/* prototypes for attributes */
109static ssize_t toolaction_show(struct device *dev,
84efd207
DK
110 struct device_attribute *attr,
111 char *buf)
112{
113 u8 tool_action = 0;
002a5abb
DK
114 int err;
115
116 err = visorchannel_read(chipset_dev->controlvm_channel,
545f0913 117 offsetof(struct visor_controlvm_channel,
002a5abb
DK
118 tool_action),
119 &tool_action, sizeof(u8));
120 if (err)
121 return err;
746fb137 122 return sprintf(buf, "%u\n", tool_action);
84efd207
DK
123}
124
19f6634f 125static ssize_t toolaction_store(struct device *dev,
8e76e695 126 struct device_attribute *attr,
84efd207
DK
127 const char *buf, size_t count)
128{
129 u8 tool_action;
dc35cdf3 130 int err;
84efd207
DK
131
132 if (kstrtou8(buf, 10, &tool_action))
133 return -EINVAL;
545f0913
SW
134 err = visorchannel_write(chipset_dev->controlvm_channel,
135 offsetof(struct visor_controlvm_channel,
136 tool_action),
137 &tool_action, sizeof(u8));
dc35cdf3
DK
138 if (err)
139 return err;
84efd207
DK
140 return count;
141}
19f6634f
BR
142static DEVICE_ATTR_RW(toolaction);
143
54b31229 144static ssize_t boottotool_show(struct device *dev,
1b1d463d
DK
145 struct device_attribute *attr,
146 char *buf)
147{
545f0913 148 struct efi_visor_indication efi_visor_indication;
0b01c6ce
DK
149 int err;
150
151 err = visorchannel_read(chipset_dev->controlvm_channel,
545f0913
SW
152 offsetof(struct visor_controlvm_channel,
153 efi_visor_ind),
154 &efi_visor_indication,
155 sizeof(struct efi_visor_indication));
0b01c6ce
DK
156 if (err)
157 return err;
545f0913 158 return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
1b1d463d
DK
159}
160
54b31229 161static ssize_t boottotool_store(struct device *dev,
1b1d463d
DK
162 struct device_attribute *attr,
163 const char *buf, size_t count)
164{
b309266e 165 int val, err;
545f0913 166 struct efi_visor_indication efi_visor_indication;
1b1d463d
DK
167
168 if (kstrtoint(buf, 10, &val))
169 return -EINVAL;
545f0913
SW
170 efi_visor_indication.boot_to_tool = val;
171 err = visorchannel_write(chipset_dev->controlvm_channel,
172 offsetof(struct visor_controlvm_channel,
173 efi_visor_ind),
174 &(efi_visor_indication),
175 sizeof(struct efi_visor_indication));
b309266e
DK
176 if (err)
177 return err;
1b1d463d
DK
178 return count;
179}
54b31229
BR
180static DEVICE_ATTR_RW(boottotool);
181
422af17c 182static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8a4a8a03
DK
183 char *buf)
184{
185 u32 error = 0;
d9857c79 186 int err;
8a4a8a03 187
d9857c79 188 err = visorchannel_read(chipset_dev->controlvm_channel,
545f0913 189 offsetof(struct visor_controlvm_channel,
d9857c79
DK
190 installation_error),
191 &error, sizeof(u32));
192 if (err)
193 return err;
6df555c1 194 return sprintf(buf, "%u\n", error);
8a4a8a03
DK
195}
196
422af17c 197static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8a4a8a03
DK
198 const char *buf, size_t count)
199{
200 u32 error;
ea295857 201 int err;
8a4a8a03
DK
202
203 if (kstrtou32(buf, 10, &error))
204 return -EINVAL;
545f0913
SW
205 err = visorchannel_write(chipset_dev->controlvm_channel,
206 offsetof(struct visor_controlvm_channel,
207 installation_error),
208 &error, sizeof(u32));
ea295857
DK
209 if (err)
210 return err;
8a4a8a03
DK
211 return count;
212}
422af17c
BR
213static DEVICE_ATTR_RW(error);
214
215static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
79730c7c
DK
216 char *buf)
217{
218 u32 text_id = 0;
0d406436
DK
219 int err;
220
545f0913
SW
221 err = visorchannel_read(chipset_dev->controlvm_channel,
222 offsetof(struct visor_controlvm_channel,
223 installation_text_id),
224 &text_id, sizeof(u32));
0d406436
DK
225 if (err)
226 return err;
6df555c1 227 return sprintf(buf, "%u\n", text_id);
79730c7c
DK
228}
229
422af17c 230static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
79730c7c
DK
231 const char *buf, size_t count)
232{
233 u32 text_id;
08a55d2d 234 int err;
79730c7c
DK
235
236 if (kstrtou32(buf, 10, &text_id))
237 return -EINVAL;
545f0913
SW
238 err = visorchannel_write(chipset_dev->controlvm_channel,
239 offsetof(struct visor_controlvm_channel,
240 installation_text_id),
241 &text_id, sizeof(u32));
08a55d2d
DK
242 if (err)
243 return err;
79730c7c
DK
244 return count;
245}
422af17c
BR
246static DEVICE_ATTR_RW(textid);
247
248static ssize_t remaining_steps_show(struct device *dev,
97f792ee
DK
249 struct device_attribute *attr, char *buf)
250{
251 u16 remaining_steps = 0;
c53578bd
DK
252 int err;
253
254 err = visorchannel_read(chipset_dev->controlvm_channel,
545f0913 255 offsetof(struct visor_controlvm_channel,
c53578bd
DK
256 installation_remaining_steps),
257 &remaining_steps, sizeof(u16));
258 if (err)
259 return err;
746fb137 260 return sprintf(buf, "%hu\n", remaining_steps);
97f792ee
DK
261}
262
422af17c 263static ssize_t remaining_steps_store(struct device *dev,
8e76e695 264 struct device_attribute *attr,
97f792ee
DK
265 const char *buf, size_t count)
266{
267 u16 remaining_steps;
e030d39d 268 int err;
97f792ee
DK
269
270 if (kstrtou16(buf, 10, &remaining_steps))
271 return -EINVAL;
545f0913
SW
272 err = visorchannel_write(chipset_dev->controlvm_channel,
273 offsetof(struct visor_controlvm_channel,
274 installation_remaining_steps),
275 &remaining_steps, sizeof(u16));
e030d39d
DK
276 if (err)
277 return err;
97f792ee
DK
278 return count;
279}
422af17c
BR
280static DEVICE_ATTR_RW(remaining_steps);
281
e80ffd4b
CD
282static void controlvm_init_response(struct controlvm_message *msg,
283 struct controlvm_message_header *msg_hdr,
284 int response)
5f251395
DK
285{
286 memset(msg, 0, sizeof(struct controlvm_message));
287 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
288 msg->hdr.payload_bytes = 0;
289 msg->hdr.payload_vm_offset = 0;
290 msg->hdr.payload_max_bytes = 0;
291 if (response < 0) {
292 msg->hdr.flags.failed = 1;
293 msg->hdr.completion_status = (u32)(-response);
294 }
295}
296
e80ffd4b
CD
297static int controlvm_respond_chipset_init(
298 struct controlvm_message_header *msg_hdr,
299 int response,
300 enum visor_chipset_feature features)
5f251395
DK
301{
302 struct controlvm_message outmsg;
303
304 controlvm_init_response(&outmsg, msg_hdr, response);
305 outmsg.cmd.init_chipset.features = features;
765b2f82 306 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
1d7f5522 307 CONTROLVM_QUEUE_REQUEST, &outmsg);
5f251395
DK
308}
309
e80ffd4b 310static int chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
311{
312 static int chipset_inited;
d3ad6e69 313 enum visor_chipset_feature features = 0;
12e364b9 314 int rc = CONTROLVM_RESP_SUCCESS;
79c3f971 315 int res = 0;
12e364b9 316
12e364b9 317 if (chipset_inited) {
98f9ed9e 318 rc = -CONTROLVM_RESP_ALREADY_DONE;
79c3f971 319 res = -EIO;
5233d1eb 320 goto out_respond;
12e364b9
KC
321 }
322 chipset_inited = 1;
ec17f452 323 /*
6577cbf1 324 * Set features to indicate we support parahotplug (if Command also
977980ac
DK
325 * supports it). Set the "reply" bit so Command knows this is a
326 * features-aware driver.
2ee0d052 327 */
0762188b 328 features = inmsg->cmd.init_chipset.features &
d3ad6e69 329 VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
d3ad6e69 330 features |= VISOR_CHIPSET_FEATURE_REPLY;
12e364b9 331
5233d1eb 332out_respond:
98d7b594 333 if (inmsg->hdr.flags.response_expected)
79c3f971
DK
334 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
335
336 return res;
12e364b9
KC
337}
338
e80ffd4b 339static int controlvm_respond(struct controlvm_message_header *msg_hdr,
040b78f7 340 int response, struct visor_segment_state *state)
12e364b9 341{
3ab47701 342 struct controlvm_message outmsg;
26eb2c0c 343
b3168c70 344 controlvm_init_response(&outmsg, msg_hdr, response);
2098dbd1 345 if (outmsg.hdr.flags.test_message == 1)
2d26aeb7 346 return -EINVAL;
4c0e65f8
DK
347 if (state) {
348 outmsg.cmd.device_change_state.state = *state;
349 outmsg.cmd.device_change_state.flags.phys_device = 1;
350 }
765b2f82 351 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
2c4ef563 352 CONTROLVM_QUEUE_REQUEST, &outmsg);
12e364b9
KC
353}
354
2ee0deec
PB
355enum crash_obj_type {
356 CRASH_DEV,
357 CRASH_BUS,
358};
359
e80ffd4b
CD
360static int save_crash_message(struct controlvm_message *msg,
361 enum crash_obj_type cr_type)
12c957dc
TS
362{
363 u32 local_crash_msg_offset;
364 u16 local_crash_msg_count;
8dff01f7 365 int err;
12c957dc 366
765b2f82 367 err = visorchannel_read(chipset_dev->controlvm_channel,
545f0913 368 offsetof(struct visor_controlvm_channel,
8dff01f7
DK
369 saved_crash_message_count),
370 &local_crash_msg_count, sizeof(u16));
371 if (err) {
35301b87
DK
372 dev_err(&chipset_dev->acpi_device->dev,
373 "failed to read message count\n");
8dff01f7 374 return err;
12c957dc 375 }
12c957dc 376 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
35301b87
DK
377 dev_err(&chipset_dev->acpi_device->dev,
378 "invalid number of messages\n");
8dff01f7 379 return -EIO;
12c957dc 380 }
765b2f82 381 err = visorchannel_read(chipset_dev->controlvm_channel,
545f0913 382 offsetof(struct visor_controlvm_channel,
8dff01f7
DK
383 saved_crash_message_offset),
384 &local_crash_msg_offset, sizeof(u32));
385 if (err) {
35301b87
DK
386 dev_err(&chipset_dev->acpi_device->dev,
387 "failed to read offset\n");
8dff01f7 388 return err;
12c957dc 389 }
603a1989 390 switch (cr_type) {
36309d3b
DB
391 case CRASH_DEV:
392 local_crash_msg_offset += sizeof(struct controlvm_message);
765b2f82 393 err = visorchannel_write(chipset_dev->controlvm_channel,
040b78f7 394 local_crash_msg_offset, msg,
36309d3b 395 sizeof(struct controlvm_message));
8dff01f7 396 if (err) {
35301b87
DK
397 dev_err(&chipset_dev->acpi_device->dev,
398 "failed to write dev msg\n");
8dff01f7 399 return err;
12c957dc 400 }
36309d3b
DB
401 break;
402 case CRASH_BUS:
765b2f82 403 err = visorchannel_write(chipset_dev->controlvm_channel,
040b78f7 404 local_crash_msg_offset, msg,
8dff01f7
DK
405 sizeof(struct controlvm_message));
406 if (err) {
35301b87
DK
407 dev_err(&chipset_dev->acpi_device->dev,
408 "failed to write bus msg\n");
8dff01f7 409 return err;
12c957dc 410 }
36309d3b
DB
411 break;
412 default:
35301b87
DK
413 dev_err(&chipset_dev->acpi_device->dev,
414 "Invalid crash_obj_type\n");
36309d3b 415 break;
12c957dc 416 }
8dff01f7 417 return 0;
12c957dc
TS
418}
419
e80ffd4b
CD
420static int controlvm_responder(enum controlvm_id cmd_id,
421 struct controlvm_message_header *pending_msg_hdr,
422 int response)
12e364b9 423{
0274b5ae 424 if (pending_msg_hdr->id != (u32)cmd_id)
734ad93a 425 return -EINVAL;
0aca7844 426
4c0e65f8 427 return controlvm_respond(pending_msg_hdr, response, NULL);
12e364b9
KC
428}
429
da56cb04
DK
430static int device_changestate_responder(enum controlvm_id cmd_id,
431 struct visor_device *p, int response,
432 struct visor_segment_state state)
12e364b9 433{
3ab47701 434 struct controlvm_message outmsg;
12e364b9 435
0274b5ae 436 if (p->pending_msg_hdr->id != cmd_id)
68f99d49 437 return -EINVAL;
12e364b9 438
0274b5ae 439 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
b253ff5b
DK
440 outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
441 outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
da56cb04 442 outmsg.cmd.device_change_state.state = state;
765b2f82 443 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
68f99d49 444 CONTROLVM_QUEUE_REQUEST, &outmsg);
12e364b9
KC
445}
446
e80ffd4b 447static int visorbus_create(struct controlvm_message *inmsg)
12e364b9 448{
2ea5117b 449 struct controlvm_message_packet *cmd = &inmsg->cmd;
ef7b9dcb 450 struct controlvm_message_header *pmsg_hdr;
52063eca 451 u32 bus_no = cmd->create_bus.bus_no;
d32517e3 452 struct visor_device *bus_info;
b32c4997 453 struct visorchannel *visorchannel;
33161a29 454 int err;
12e364b9 455
d32517e3 456 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
614b083d 457 if (bus_info && bus_info->state.created == 1) {
055bc909 458 dev_err(&chipset_dev->acpi_device->dev,
87408fe0 459 "failed %s: already exists\n", __func__);
33161a29
DK
460 err = -EEXIST;
461 goto err_respond;
12e364b9 462 }
6c5fed35
BR
463 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
464 if (!bus_info) {
33161a29
DK
465 err = -ENOMEM;
466 goto err_respond;
12e364b9 467 }
4abce83d 468 INIT_LIST_HEAD(&bus_info->list_all);
d32517e3
DZ
469 bus_info->chipset_bus_no = bus_no;
470 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
b32c5cb8 471 if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
300ed612
DK
472 err = save_crash_message(inmsg, CRASH_BUS);
473 if (err)
474 goto err_free_bus_info;
475 }
8f334e30 476 if (inmsg->hdr.flags.response_expected == 1) {
040b78f7 477 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
8f334e30 478 if (!pmsg_hdr) {
33161a29
DK
479 err = -ENOMEM;
480 goto err_free_bus_info;
8f334e30 481 }
8f334e30
DK
482 memcpy(pmsg_hdr, &inmsg->hdr,
483 sizeof(struct controlvm_message_header));
484 bus_info->pending_msg_hdr = pmsg_hdr;
485 }
33161a29 486 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
33161a29 487 GFP_KERNEL,
90476670
SW
488 &cmd->create_bus.bus_data_type_guid,
489 false);
33161a29 490 if (!visorchannel) {
33161a29
DK
491 err = -ENOMEM;
492 goto err_free_pending_msg;
493 }
494 bus_info->visorchannel = visorchannel;
fdf5b9ac
DK
495 /* Response will be handled by visorbus_create_instance on success */
496 err = visorbus_create_instance(bus_info);
621f5e18
DK
497 if (err)
498 goto err_destroy_channel;
33161a29
DK
499 return 0;
500
621f5e18
DK
501err_destroy_channel:
502 visorchannel_destroy(visorchannel);
503
33161a29
DK
504err_free_pending_msg:
505 kfree(bus_info->pending_msg_hdr);
8f334e30 506
33161a29 507err_free_bus_info:
8f334e30 508 kfree(bus_info);
12e364b9 509
33161a29 510err_respond:
8f334e30 511 if (inmsg->hdr.flags.response_expected == 1)
4fb2539c 512 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
33161a29 513 return err;
12e364b9
KC
514}
515
e80ffd4b 516static int visorbus_destroy(struct controlvm_message *inmsg)
12e364b9 517{
ef7b9dcb 518 struct controlvm_message_header *pmsg_hdr;
3f5a562b 519 u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
d32517e3 520 struct visor_device *bus_info;
30f6c3f5 521 int err;
12e364b9 522
d32517e3 523 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
3e0e8db9 524 if (!bus_info) {
30f6c3f5
DK
525 err = -ENODEV;
526 goto err_respond;
3e0e8db9
DK
527 }
528 if (bus_info->state.created == 0) {
30f6c3f5
DK
529 err = -ENOENT;
530 goto err_respond;
3e0e8db9
DK
531 }
532 if (bus_info->pending_msg_hdr) {
533 /* only non-NULL if dev is still waiting on a response */
30f6c3f5
DK
534 err = -EEXIST;
535 goto err_respond;
3e0e8db9
DK
536 }
537 if (inmsg->hdr.flags.response_expected == 1) {
538 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
539 if (!pmsg_hdr) {
30f6c3f5
DK
540 err = -ENOMEM;
541 goto err_respond;
3e0e8db9 542 }
3e0e8db9
DK
543 memcpy(pmsg_hdr, &inmsg->hdr,
544 sizeof(struct controlvm_message_header));
545 bus_info->pending_msg_hdr = pmsg_hdr;
546 }
a7093ba1
SW
547 /* Response will be handled by visorbus_remove_instance */
548 visorbus_remove_instance(bus_info);
30f6c3f5 549 return 0;
3e0e8db9 550
30f6c3f5 551err_respond:
3e0e8db9 552 if (inmsg->hdr.flags.response_expected == 1)
4fb2539c 553 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
30f6c3f5 554 return err;
12e364b9
KC
555}
556
39b486d6
DK
557static const guid_t *parser_id_get(struct parser_context *ctx)
558{
559 return &ctx->data.id;
560}
561
90d1ecf0 562static void *parser_string_get(u8 *pscan, int nscan)
39b486d6 563{
39b486d6
DK
564 int value_length;
565 void *value;
39b486d6 566
39b486d6
DK
567 if (nscan == 0)
568 return NULL;
569
90d1ecf0
DK
570 value_length = strnlen(pscan, nscan);
571 value = kzalloc(value_length + 1, GFP_KERNEL);
39b486d6
DK
572 if (!value)
573 return NULL;
574 if (value_length > 0)
575 memcpy(value, pscan, value_length);
39b486d6
DK
576 return value;
577}
578
579static void *parser_name_get(struct parser_context *ctx)
580{
ef7b9dcb 581 struct visor_controlvm_parameters_header *phdr;
39b486d6
DK
582
583 phdr = &ctx->data;
a5eb2188
TS
584 if ((unsigned long)phdr->name_offset +
585 (unsigned long)phdr->name_length > ctx->param_bytes)
39b486d6 586 return NULL;
39b486d6
DK
587 ctx->curr = (char *)&phdr + phdr->name_offset;
588 ctx->bytes_remaining = phdr->name_length;
90d1ecf0 589 return parser_string_get(ctx->curr, phdr->name_length);
39b486d6
DK
590}
591
e80ffd4b
CD
592static int visorbus_configure(struct controlvm_message *inmsg,
593 struct parser_context *parser_ctx)
12e364b9 594{
2ea5117b 595 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e 596 u32 bus_no;
d32517e3 597 struct visor_device *bus_info;
c71529fe 598 int err = 0;
12e364b9 599
654bada0 600 bus_no = cmd->configure_bus.bus_no;
d32517e3 601 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
654bada0 602 if (!bus_info) {
c71529fe
DK
603 err = -EINVAL;
604 goto err_respond;
af53ce41
DK
605 }
606 if (bus_info->state.created == 0) {
c71529fe
DK
607 err = -EINVAL;
608 goto err_respond;
af53ce41
DK
609 }
610 if (bus_info->pending_msg_hdr) {
c71529fe
DK
611 err = -EIO;
612 goto err_respond;
12e364b9 613 }
34fbf6a0
DK
614 err = visorchannel_set_clientpartition(bus_info->visorchannel,
615 cmd->configure_bus.guest_handle);
c71529fe
DK
616 if (err)
617 goto err_respond;
046f93dc 618 if (parser_ctx) {
b32c5cb8
AS
619 const guid_t *partition_guid = parser_id_get(parser_ctx);
620
621 guid_copy(&bus_info->partition_guid, partition_guid);
046f93dc
DK
622 bus_info->name = parser_name_get(parser_ctx);
623 }
c71529fe 624 if (inmsg->hdr.flags.response_expected == 1)
4fb2539c 625 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
c71529fe
DK
626 return 0;
627
628err_respond:
71a0265d 629 dev_err(&chipset_dev->acpi_device->dev,
9a8dc900 630 "%s exited with err: %d\n", __func__, err);
b6b057d8 631 if (inmsg->hdr.flags.response_expected == 1)
4fb2539c 632 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
c71529fe 633 return err;
12e364b9
KC
634}
635
e80ffd4b 636static int visorbus_device_create(struct controlvm_message *inmsg)
12e364b9 637{
2ea5117b 638 struct controlvm_message_packet *cmd = &inmsg->cmd;
ef7b9dcb 639 struct controlvm_message_header *pmsg_hdr;
52063eca
JS
640 u32 bus_no = cmd->create_device.bus_no;
641 u32 dev_no = cmd->create_device.dev_no;
ef7b9dcb 642 struct visor_device *dev_info;
d32517e3 643 struct visor_device *bus_info;
b32c4997 644 struct visorchannel *visorchannel;
ad2a7d65 645 int err;
12e364b9 646
a298bc0b
DZ
647 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
648 if (!bus_info) {
a8c26e4b
DK
649 dev_err(&chipset_dev->acpi_device->dev,
650 "failed to get bus by id: %d\n", bus_no);
ad2a7d65
DK
651 err = -ENODEV;
652 goto err_respond;
12e364b9 653 }
a298bc0b 654 if (bus_info->state.created == 0) {
a8c26e4b
DK
655 dev_err(&chipset_dev->acpi_device->dev,
656 "bus not created, id: %d\n", bus_no);
ad2a7d65
DK
657 err = -EINVAL;
658 goto err_respond;
12e364b9 659 }
a298bc0b 660 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
614b083d 661 if (dev_info && dev_info->state.created == 1) {
a8c26e4b
DK
662 dev_err(&chipset_dev->acpi_device->dev,
663 "failed to get bus by id: %d/%d\n", bus_no, dev_no);
ad2a7d65
DK
664 err = -EEXIST;
665 goto err_respond;
12e364b9 666 }
a298bc0b 667
c60c8e26
BR
668 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
669 if (!dev_info) {
ad2a7d65
DK
670 err = -ENOMEM;
671 goto err_respond;
12e364b9 672 }
a298bc0b
DZ
673 dev_info->chipset_bus_no = bus_no;
674 dev_info->chipset_dev_no = dev_no;
b32c5cb8 675 guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
a298bc0b 676 dev_info->device.parent = &bus_info->device;
90476670
SW
677 visorchannel = visorchannel_create(cmd->create_device.channel_addr,
678 GFP_KERNEL,
679 &cmd->create_device.data_type_guid,
680 true);
b32c4997 681 if (!visorchannel) {
a8c26e4b
DK
682 dev_err(&chipset_dev->acpi_device->dev,
683 "failed to create visorchannel: %d/%d\n",
684 bus_no, dev_no);
ad2a7d65
DK
685 err = -ENOMEM;
686 goto err_free_dev_info;
b32c4997
DZ
687 }
688 dev_info->visorchannel = visorchannel;
fe9f4b53
SW
689 guid_copy(&dev_info->channel_type_guid,
690 &cmd->create_device.data_type_guid);
691 if (guid_equal(&cmd->create_device.data_type_guid,
692 &visor_vhba_channel_guid)) {
ad2a7d65
DK
693 err = save_crash_message(inmsg, CRASH_DEV);
694 if (err)
3f49a21d 695 goto err_destroy_visorchannel;
ad2a7d65 696 }
5a80e98a
DK
697 if (inmsg->hdr.flags.response_expected == 1) {
698 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
699 if (!pmsg_hdr) {
ad2a7d65 700 err = -ENOMEM;
3f49a21d 701 goto err_destroy_visorchannel;
5a80e98a 702 }
5a80e98a
DK
703 memcpy(pmsg_hdr, &inmsg->hdr,
704 sizeof(struct controlvm_message_header));
705 dev_info->pending_msg_hdr = pmsg_hdr;
706 }
51c0f81c
SW
707 /* create_visor_device will send response */
708 err = create_visor_device(dev_info);
3f49a21d
DK
709 if (err)
710 goto err_destroy_visorchannel;
711
ad2a7d65 712 return 0;
5a80e98a 713
3f49a21d
DK
714err_destroy_visorchannel:
715 visorchannel_destroy(visorchannel);
716
ad2a7d65 717err_free_dev_info:
5a80e98a
DK
718 kfree(dev_info);
719
ad2a7d65 720err_respond:
5a80e98a 721 if (inmsg->hdr.flags.response_expected == 1)
4fb2539c 722 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
ad2a7d65 723 return err;
12e364b9
KC
724}
725
e80ffd4b 726static int visorbus_device_changestate(struct controlvm_message *inmsg)
12e364b9 727{
2ea5117b 728 struct controlvm_message_packet *cmd = &inmsg->cmd;
ef7b9dcb 729 struct controlvm_message_header *pmsg_hdr;
52063eca
JS
730 u32 bus_no = cmd->device_change_state.bus_no;
731 u32 dev_no = cmd->device_change_state.dev_no;
545f0913 732 struct visor_segment_state state = cmd->device_change_state.state;
a298bc0b 733 struct visor_device *dev_info;
b4a8e6ae 734 int err = 0;
12e364b9 735
a298bc0b 736 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
0278a905 737 if (!dev_info) {
40fc79f9 738 err = -ENODEV;
0825f191
DK
739 goto err_respond;
740 }
741 if (dev_info->state.created == 0) {
40fc79f9 742 err = -EINVAL;
0825f191 743 goto err_respond;
12e364b9 744 }
8e609b5b
DK
745 if (dev_info->pending_msg_hdr) {
746 /* only non-NULL if dev is still waiting on a response */
40fc79f9 747 err = -EIO;
8e609b5b
DK
748 goto err_respond;
749 }
9116ae7a 750
8e609b5b
DK
751 if (inmsg->hdr.flags.response_expected == 1) {
752 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
753 if (!pmsg_hdr) {
40fc79f9 754 err = -ENOMEM;
8e609b5b
DK
755 goto err_respond;
756 }
8e609b5b
DK
757 memcpy(pmsg_hdr, &inmsg->hdr,
758 sizeof(struct controlvm_message_header));
759 dev_info->pending_msg_hdr = pmsg_hdr;
760 }
8e609b5b
DK
761 if (state.alive == segment_state_running.alive &&
762 state.operating == segment_state_running.operating)
c0b44136
SW
763 /* Response will be sent from visorchipset_device_resume */
764 err = visorchipset_device_resume(dev_info);
8e609b5b
DK
765 /* ServerNotReady / ServerLost / SegmentStateStandby */
766 else if (state.alive == segment_state_standby.alive &&
767 state.operating == segment_state_standby.operating)
768 /*
769 * technically this is standby case where server is lost.
c0b44136 770 * Response will be sent from visorchipset_device_pause.
8e609b5b 771 */
c0b44136 772 err = visorchipset_device_pause(dev_info);
b4a8e6ae
DK
773 if (err)
774 goto err_respond;
40fc79f9 775 return 0;
0825f191
DK
776
777err_respond:
03662df8 778 dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
8e609b5b 779 if (inmsg->hdr.flags.response_expected == 1)
4fb2539c 780 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
40fc79f9 781 return err;
12e364b9
KC
782}
783
e80ffd4b 784static int visorbus_device_destroy(struct controlvm_message *inmsg)
12e364b9 785{
2ea5117b 786 struct controlvm_message_packet *cmd = &inmsg->cmd;
ef7b9dcb 787 struct controlvm_message_header *pmsg_hdr;
52063eca
JS
788 u32 bus_no = cmd->destroy_device.bus_no;
789 u32 dev_no = cmd->destroy_device.dev_no;
a298bc0b 790 struct visor_device *dev_info;
e7954918 791 int err;
12e364b9 792
a298bc0b 793 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
9e9eec6b 794 if (!dev_info) {
e7954918 795 err = -ENODEV;
9e9eec6b
DK
796 goto err_respond;
797 }
798 if (dev_info->state.created == 0) {
e7954918 799 err = -EINVAL;
9e9eec6b
DK
800 goto err_respond;
801 }
9e9eec6b
DK
802 if (dev_info->pending_msg_hdr) {
803 /* only non-NULL if dev is still waiting on a response */
e7954918 804 err = -EIO;
9e9eec6b
DK
805 goto err_respond;
806 }
807 if (inmsg->hdr.flags.response_expected == 1) {
808 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
809 if (!pmsg_hdr) {
e7954918 810 err = -ENOMEM;
9e9eec6b
DK
811 goto err_respond;
812 }
813
814 memcpy(pmsg_hdr, &inmsg->hdr,
815 sizeof(struct controlvm_message_header));
816 dev_info->pending_msg_hdr = pmsg_hdr;
817 }
661a215b 818 kfree(dev_info->name);
b74856b4 819 remove_visor_device(dev_info);
e7954918 820 return 0;
9e9eec6b
DK
821
822err_respond:
823 if (inmsg->hdr.flags.response_expected == 1)
4fb2539c 824 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
e7954918 825 return err;
12e364b9
KC
826}
827
12e364b9 828/*
5d501ef4
DB
829 * The general parahotplug flow works as follows. The visorchipset receives
830 * a DEVICE_CHANGESTATE message from Command specifying a physical device
831 * to enable or disable. The CONTROLVM message handler calls
832 * parahotplug_process_message, which then adds the message to a global list
833 * and kicks off a udev event which causes a user level script to enable or
834 * disable the specified device. The udev script then writes to
835 * /sys/devices/platform/visorchipset/parahotplug, which causes the
836 * parahotplug store functions to get called, at which point the
904ee62a 837 * appropriate CONTROLVM message is retrieved from the list and responded to.
12e364b9
KC
838 */
839
840#define PARAHOTPLUG_TIMEOUT_MS 2000
841
04dbfea6 842/*
5d501ef4
DB
843 * parahotplug_next_id() - generate unique int to match an outstanding
844 * CONTROLVM message with a udev script /sys
845 * response
ec17f452
DB
846 *
847 * Return: a unique integer value
12e364b9 848 */
e80ffd4b 849static int parahotplug_next_id(void)
12e364b9
KC
850{
851 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 852
12e364b9
KC
853 return atomic_inc_return(&id);
854}
855
04dbfea6 856/*
ec17f452
DB
857 * parahotplug_next_expiration() - returns the time (in jiffies) when a
858 * CONTROLVM message on the list should expire
859 * -- PARAHOTPLUG_TIMEOUT_MS in the future
860 *
861 * Return: expected expiration time (in jiffies)
12e364b9 862 */
e80ffd4b 863static unsigned long parahotplug_next_expiration(void)
12e364b9 864{
2cc1a1b3 865 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
866}
867
04dbfea6 868/*
ec17f452
DB
869 * parahotplug_request_create() - create a parahotplug_request, which is
870 * basically a wrapper for a CONTROLVM_MESSAGE
871 * that we can stick on a list
872 * @msg: the message to insert in the request
873 *
874 * Return: the request containing the provided message
12e364b9 875 */
e80ffd4b
CD
876static struct parahotplug_request *parahotplug_request_create(
877 struct controlvm_message *msg)
12e364b9 878{
ea0dcfcf
QL
879 struct parahotplug_request *req;
880
8c8c975f 881 req = kmalloc(sizeof(*req), GFP_KERNEL);
38f736e9 882 if (!req)
12e364b9 883 return NULL;
12e364b9
KC
884 req->id = parahotplug_next_id();
885 req->expiration = parahotplug_next_expiration();
886 req->msg = *msg;
12e364b9
KC
887 return req;
888}
889
04dbfea6 890/*
ec17f452
DB
891 * parahotplug_request_destroy() - free a parahotplug_request
892 * @req: the request to deallocate
12e364b9 893 */
e80ffd4b 894static void parahotplug_request_destroy(struct parahotplug_request *req)
12e364b9
KC
895{
896 kfree(req);
897}
898
51319662 899static LIST_HEAD(parahotplug_request_list);
ac0aba67
SW
900/* lock for above */
901static DEFINE_SPINLOCK(parahotplug_request_list_lock);
51319662 902
04dbfea6 903/*
ec17f452
DB
904 * parahotplug_request_complete() - mark request as complete
905 * @id: the id of the request
906 * @active: indicates whether the request is assigned to active partition
907 *
5d501ef4 908 * Called from the /sys handler, which means the user script has
ec17f452 909 * finished the enable/disable. Find the matching identifier, and
12e364b9 910 * respond to the CONTROLVM message with success.
ec17f452
DB
911 *
912 * Return: 0 on success or -EINVAL on failure
12e364b9 913 */
e80ffd4b 914static int parahotplug_request_complete(int id, u16 active)
12e364b9 915{
e82ba62e
JS
916 struct list_head *pos;
917 struct list_head *tmp;
040b78f7 918 struct parahotplug_request *req;
12e364b9 919
ddf5de53 920 spin_lock(&parahotplug_request_list_lock);
12e364b9 921 /* Look for a request matching "id". */
ddf5de53 922 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
040b78f7 923 req = list_entry(pos, struct parahotplug_request, list);
12e364b9 924 if (req->id == id) {
ec17f452
DB
925 /*
926 * Found a match. Remove it from the list and
12e364b9
KC
927 * respond.
928 */
929 list_del(pos);
ddf5de53 930 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 931 req->msg.cmd.device_change_state.state.active = active;
98d7b594 932 if (req->msg.hdr.flags.response_expected)
4c0e65f8
DK
933 controlvm_respond(
934 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
935 &req->msg.cmd.device_change_state.state);
12e364b9
KC
936 parahotplug_request_destroy(req);
937 return 0;
938 }
939 }
ddf5de53 940 spin_unlock(&parahotplug_request_list_lock);
119296ea 941 return -EINVAL;
12e364b9
KC
942}
943
04dbfea6 944/*
ebeff055
DK
945 * devicedisabled_store() - disables the hotplug device
946 * @dev: sysfs interface variable not utilized in this function
947 * @attr: sysfs interface variable not utilized in this function
948 * @buf: buffer containing the device id
949 * @count: the size of the buffer
950 *
951 * The parahotplug/devicedisabled interface gets called by our support script
952 * when an SR-IOV device has been shut down. The ID is passed to the script
953 * and then passed back when the device has been removed.
954 *
955 * Return: the size of the buffer for success or negative for error
956 */
957static ssize_t devicedisabled_store(struct device *dev,
958 struct device_attribute *attr,
959 const char *buf, size_t count)
960{
961 unsigned int id;
962 int err;
963
964 if (kstrtouint(buf, 10, &id))
965 return -EINVAL;
ebeff055
DK
966 err = parahotplug_request_complete(id, 0);
967 if (err < 0)
968 return err;
969 return count;
970}
971static DEVICE_ATTR_WO(devicedisabled);
972
04dbfea6 973/*
ebeff055
DK
974 * deviceenabled_store() - enables the hotplug device
975 * @dev: sysfs interface variable not utilized in this function
976 * @attr: sysfs interface variable not utilized in this function
977 * @buf: buffer containing the device id
978 * @count: the size of the buffer
979 *
980 * The parahotplug/deviceenabled interface gets called by our support script
981 * when an SR-IOV device has been recovered. The ID is passed to the script
982 * and then passed back when the device has been brought back up.
983 *
984 * Return: the size of the buffer for success or negative for error
985 */
986static ssize_t deviceenabled_store(struct device *dev,
987 struct device_attribute *attr,
988 const char *buf, size_t count)
989{
990 unsigned int id;
991
992 if (kstrtouint(buf, 10, &id))
993 return -EINVAL;
ebeff055
DK
994 parahotplug_request_complete(id, 1);
995 return count;
996}
997static DEVICE_ATTR_WO(deviceenabled);
998
999static struct attribute *visorchipset_install_attrs[] = {
1000 &dev_attr_toolaction.attr,
1001 &dev_attr_boottotool.attr,
1002 &dev_attr_error.attr,
1003 &dev_attr_textid.attr,
1004 &dev_attr_remaining_steps.attr,
1005 NULL
1006};
1007
a2d1e428 1008static const struct attribute_group visorchipset_install_group = {
ebeff055
DK
1009 .name = "install",
1010 .attrs = visorchipset_install_attrs
1011};
1012
1013static struct attribute *visorchipset_parahotplug_attrs[] = {
1014 &dev_attr_devicedisabled.attr,
1015 &dev_attr_deviceenabled.attr,
1016 NULL
1017};
1018
1722270b 1019static const struct attribute_group visorchipset_parahotplug_group = {
ebeff055
DK
1020 .name = "parahotplug",
1021 .attrs = visorchipset_parahotplug_attrs
1022};
1023
1024static const struct attribute_group *visorchipset_dev_groups[] = {
1025 &visorchipset_install_group,
1026 &visorchipset_parahotplug_group,
1027 NULL
1028};
1029
04dbfea6 1030/*
ebeff055
DK
1031 * parahotplug_request_kickoff() - initiate parahotplug request
1032 * @req: the request to initiate
1033 *
1034 * Cause uevent to run the user level script to do the disable/enable specified
1035 * in the parahotplug_request.
1036 */
e80ffd4b 1037static int parahotplug_request_kickoff(struct parahotplug_request *req)
ebeff055
DK
1038{
1039 struct controlvm_message_packet *cmd = &req->msg.cmd;
1040 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
da56cb04
DK
1041 env_func[40];
1042 char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
1043 env_func, NULL
ebeff055
DK
1044 };
1045
c5a28902
SW
1046 sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
1047 sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
1048 sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
ebeff055 1049 cmd->device_change_state.state.active);
c5a28902 1050 sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
ebeff055 1051 cmd->device_change_state.bus_no);
c5a28902 1052 sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
ebeff055 1053 cmd->device_change_state.dev_no >> 3);
c5a28902 1054 sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
ebeff055 1055 cmd->device_change_state.dev_no & 0x7);
ae0fa822
DK
1056 return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1057 KOBJ_CHANGE, envp);
ebeff055
DK
1058}
1059
04dbfea6 1060/*
ec17f452
DB
1061 * parahotplug_process_message() - enables or disables a PCI device by kicking
1062 * off a udev script
1063 * @inmsg: the message indicating whether to enable or disable
12e364b9 1064 */
e80ffd4b 1065static int parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1066{
1067 struct parahotplug_request *req;
ae0fa822 1068 int err;
12e364b9
KC
1069
1070 req = parahotplug_request_create(inmsg);
38f736e9 1071 if (!req)
114d5dcf 1072 return -ENOMEM;
d02bde9d
DK
1073 /*
1074 * For enable messages, just respond with success right away, we don't
1075 * need to wait to see if the enable was successful.
1076 */
2ea5117b 1077 if (inmsg->cmd.device_change_state.state.active) {
ae0fa822
DK
1078 err = parahotplug_request_kickoff(req);
1079 if (err)
1080 goto err_respond;
4c0e65f8
DK
1081 controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1082 &inmsg->cmd.device_change_state.state);
12e364b9 1083 parahotplug_request_destroy(req);
ae0fa822 1084 return 0;
12e364b9 1085 }
ae0fa822 1086 /*
6577cbf1
DK
1087 * For disable messages, add the request to the request list before
1088 * kicking off the udev script. It won't get responded to until the
1089 * script has indicated it's done.
ae0fa822
DK
1090 */
1091 spin_lock(&parahotplug_request_list_lock);
1092 list_add_tail(&req->list, &parahotplug_request_list);
1093 spin_unlock(&parahotplug_request_list_lock);
ae0fa822
DK
1094 err = parahotplug_request_kickoff(req);
1095 if (err)
1096 goto err_respond;
114d5dcf 1097 return 0;
ae0fa822
DK
1098
1099err_respond:
4c0e65f8
DK
1100 controlvm_respond(&inmsg->hdr, err,
1101 &inmsg->cmd.device_change_state.state);
ae0fa822 1102 return err;
12e364b9
KC
1103}
1104
7289a8dd
DB
1105/*
1106 * chipset_ready_uevent() - sends chipset_ready action
ebeff055
DK
1107 *
1108 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1109 *
7289a8dd 1110 * Return: 0 on success, negative on failure
ebeff055 1111 */
e80ffd4b 1112static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
ebeff055 1113{
deeeca6d
DK
1114 int res;
1115
040b78f7 1116 res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
7289a8dd 1117 if (msg_hdr->flags.response_expected)
4c0e65f8 1118 controlvm_respond(msg_hdr, res, NULL);
deeeca6d 1119 return res;
ebeff055
DK
1120}
1121
7289a8dd
DB
1122/*
1123 * chipset_selftest_uevent() - sends chipset_selftest action
1124 *
1125 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1126 *
1127 * Return: 0 on success, negative on failure
1128 */
e80ffd4b 1129static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
ebeff055
DK
1130{
1131 char env_selftest[20];
1132 char *envp[] = { env_selftest, NULL };
deeeca6d 1133 int res;
ebeff055
DK
1134
1135 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
deeeca6d
DK
1136 res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1137 KOBJ_CHANGE, envp);
7289a8dd 1138 if (msg_hdr->flags.response_expected)
4c0e65f8 1139 controlvm_respond(msg_hdr, res, NULL);
deeeca6d 1140 return res;
ebeff055
DK
1141}
1142
7289a8dd
DB
1143/*
1144 * chipset_notready_uevent() - sends chipset_notready action
ebeff055
DK
1145 *
1146 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1147 *
7289a8dd 1148 * Return: 0 on success, negative on failure
ebeff055 1149 */
e80ffd4b 1150static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
ebeff055 1151{
904ee62a 1152 int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
34fbf6a0 1153 KOBJ_OFFLINE);
904ee62a 1154
ebeff055 1155 if (msg_hdr->flags.response_expected)
4c0e65f8 1156 controlvm_respond(msg_hdr, res, NULL);
deeeca6d 1157 return res;
ebeff055
DK
1158}
1159
88845f40
DK
1160static int unisys_vmcall(unsigned long tuple, unsigned long param)
1161{
1162 int result = 0;
1163 unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1164 unsigned long reg_ebx;
1165 unsigned long reg_ecx;
1166
1167 reg_ebx = param & 0xFFFFFFFF;
1168 reg_ecx = param >> 32;
88845f40
DK
1169 cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1170 if (!(cpuid_ecx & 0x80000000))
1171 return -EPERM;
88845f40 1172 __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
34fbf6a0 1173 "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
bd801a07
DK
1174 if (result)
1175 goto error;
bd801a07 1176 return 0;
9116ae7a 1177
ac0aba67
SW
1178/* Need to convert from VMCALL error codes to Linux */
1179error:
bd801a07
DK
1180 switch (result) {
1181 case VMCALL_RESULT_INVALID_PARAM:
1182 return -EINVAL;
1183 case VMCALL_RESULT_DATA_UNAVAILABLE:
1184 return -ENODEV;
1185 default:
1186 return -EFAULT;
1187 }
88845f40 1188}
ab61097c 1189
f1f537c2 1190static int controlvm_channel_create(struct visorchipset_device *dev)
5f3a7e36 1191{
f1f537c2
DK
1192 struct visorchannel *chan;
1193 u64 addr;
800da5fb
DK
1194 int err;
1195
f1f537c2
DK
1196 err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
1197 virt_to_phys(&dev->controlvm_params));
800da5fb
DK
1198 if (err)
1199 return err;
f1f537c2 1200 addr = dev->controlvm_params.address;
90476670
SW
1201 chan = visorchannel_create(addr, GFP_KERNEL,
1202 &visor_controlvm_channel_guid, true);
f1f537c2
DK
1203 if (!chan)
1204 return -ENOMEM;
1205 dev->controlvm_channel = chan;
bd801a07 1206 return 0;
5f3a7e36
DK
1207}
1208
e80ffd4b 1209static void setup_crash_devices_work_queue(struct work_struct *work)
12e364b9 1210{
e6bdb904
BR
1211 struct controlvm_message local_crash_bus_msg;
1212 struct controlvm_message local_crash_dev_msg;
3ab47701 1213 struct controlvm_message msg;
e6bdb904
BR
1214 u32 local_crash_msg_offset;
1215 u16 local_crash_msg_count;
12e364b9 1216
12e364b9 1217 /* send init chipset msg */
98d7b594 1218 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1219 msg.cmd.init_chipset.bus_count = 23;
1220 msg.cmd.init_chipset.switch_count = 0;
12e364b9 1221 chipset_init(&msg);
12e364b9 1222 /* get saved message count */
765b2f82 1223 if (visorchannel_read(chipset_dev->controlvm_channel,
545f0913 1224 offsetof(struct visor_controlvm_channel,
d19642f6 1225 saved_crash_message_count),
e6bdb904 1226 &local_crash_msg_count, sizeof(u16)) < 0) {
0f7453af
DK
1227 dev_err(&chipset_dev->acpi_device->dev,
1228 "failed to read channel\n");
12e364b9
KC
1229 return;
1230 }
e6bdb904 1231 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
040b78f7 1232 dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
12e364b9
KC
1233 return;
1234 }
12e364b9 1235 /* get saved crash message offset */
765b2f82 1236 if (visorchannel_read(chipset_dev->controlvm_channel,
545f0913 1237 offsetof(struct visor_controlvm_channel,
d19642f6 1238 saved_crash_message_offset),
e6bdb904 1239 &local_crash_msg_offset, sizeof(u32)) < 0) {
0f7453af
DK
1240 dev_err(&chipset_dev->acpi_device->dev,
1241 "failed to read channel\n");
12e364b9
KC
1242 return;
1243 }
12e364b9 1244 /* read create device message for storage bus offset */
765b2f82 1245 if (visorchannel_read(chipset_dev->controlvm_channel,
e6bdb904
BR
1246 local_crash_msg_offset,
1247 &local_crash_bus_msg,
3ab47701 1248 sizeof(struct controlvm_message)) < 0) {
0f7453af
DK
1249 dev_err(&chipset_dev->acpi_device->dev,
1250 "failed to read channel\n");
12e364b9
KC
1251 return;
1252 }
12e364b9 1253 /* read create device message for storage device */
765b2f82 1254 if (visorchannel_read(chipset_dev->controlvm_channel,
e6bdb904 1255 local_crash_msg_offset +
3ab47701 1256 sizeof(struct controlvm_message),
e6bdb904 1257 &local_crash_dev_msg,
3ab47701 1258 sizeof(struct controlvm_message)) < 0) {
0f7453af
DK
1259 dev_err(&chipset_dev->acpi_device->dev,
1260 "failed to read channel\n");
12e364b9
KC
1261 return;
1262 }
12e364b9 1263 /* reuse IOVM create bus message */
d9b89ef1 1264 if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
0f7453af
DK
1265 dev_err(&chipset_dev->acpi_device->dev,
1266 "no valid create_bus message\n");
12e364b9
KC
1267 return;
1268 }
ec17cb8a 1269 visorbus_create(&local_crash_bus_msg);
12e364b9 1270 /* reuse create device message for storage device */
d9b89ef1 1271 if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
0f7453af
DK
1272 dev_err(&chipset_dev->acpi_device->dev,
1273 "no valid create_device message\n");
12e364b9
KC
1274 return;
1275 }
8b0a6cfa 1276 visorbus_device_create(&local_crash_dev_msg);
12e364b9
KC
1277}
1278
76956aa7
SW
1279void visorbus_response(struct visor_device *bus_info, int response,
1280 int controlvm_id)
12e364b9 1281{
fd9e450c
DK
1282 if (!bus_info->pending_msg_hdr)
1283 return;
0274b5ae 1284
fd9e450c 1285 controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
0274b5ae
DZ
1286 kfree(bus_info->pending_msg_hdr);
1287 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
1288}
1289
722e73d5
SW
1290void visorbus_device_changestate_response(struct visor_device *dev_info,
1291 int response,
1292 struct visor_segment_state state)
12e364b9 1293{
fd9e450c
DK
1294 if (!dev_info->pending_msg_hdr)
1295 return;
1296
040b78f7
DK
1297 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
1298 response, state);
0274b5ae
DZ
1299 kfree(dev_info->pending_msg_hdr);
1300 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
1301}
1302
39b486d6
DK
1303static void parser_done(struct parser_context *ctx)
1304{
1305 chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
1306 kfree(ctx);
1307}
1308
45311439
DK
1309static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
1310 bool *retry)
612b81c9 1311{
a5eb2188 1312 unsigned long allocbytes;
612b81c9 1313 struct parser_context *ctx;
a35e3268 1314 void *mapping;
612b81c9 1315
3e4273db 1316 *retry = false;
26a42c25 1317 /* alloc an extra byte to ensure payload is \0 terminated */
a5eb2188 1318 allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
26a42c25 1319 sizeof(struct visor_controlvm_parameters_header));
040b78f7
DK
1320 if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
1321 MAX_CONTROLVM_PAYLOAD_BYTES) {
3e4273db 1322 *retry = true;
612b81c9
DK
1323 return NULL;
1324 }
8c8c975f 1325 ctx = kzalloc(allocbytes, GFP_KERNEL);
612b81c9 1326 if (!ctx) {
3e4273db 1327 *retry = true;
612b81c9
DK
1328 return NULL;
1329 }
612b81c9
DK
1330 ctx->allocbytes = allocbytes;
1331 ctx->param_bytes = bytes;
a35e3268
EA
1332 mapping = memremap(addr, bytes, MEMREMAP_WB);
1333 if (!mapping)
1334 goto err_finish_ctx;
26a42c25 1335 memcpy(&ctx->data, mapping, bytes);
a35e3268 1336 memunmap(mapping);
612b81c9 1337 ctx->byte_stream = true;
765b2f82 1338 chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
612b81c9
DK
1339 return ctx;
1340
1341err_finish_ctx:
90544cb1 1342 kfree(ctx);
612b81c9
DK
1343 return NULL;
1344}
1345
04dbfea6 1346/*
511474a5
DK
1347 * handle_command() - process a controlvm message
1348 * @inmsg: the message to process
1349 * @channel_addr: address of the controlvm channel
1350 *
1351 * Return:
25a5128e
DK
1352 * 0 - Successfully processed the message
1353 * -EAGAIN - ControlVM message was not processed and should be retried
1354 * reading the next controlvm message; a scenario where this can
1355 * occur is when we need to throttle the allocation of memory in
1356 * which to copy out controlvm payload data.
1357 * < 0 - error: ControlVM message was processed but an error occurred.
511474a5 1358 */
e80ffd4b 1359static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
511474a5
DK
1360{
1361 struct controlvm_message_packet *cmd = &inmsg.cmd;
1362 u64 parm_addr;
1363 u32 parm_bytes;
1364 struct parser_context *parser_ctx = NULL;
511474a5 1365 struct controlvm_message ackmsg;
25a5128e 1366 int err = 0;
511474a5
DK
1367
1368 /* create parsing context if necessary */
511474a5
DK
1369 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1370 parm_bytes = inmsg.hdr.payload_bytes;
511474a5
DK
1371 /*
1372 * Parameter and channel addresses within test messages actually lie
1373 * within our OS-controlled memory. We need to know that, because it
1374 * makes a difference in how we compute the virtual address.
1375 */
4d77e606 1376 if (parm_bytes) {
ef7b9dcb 1377 bool retry;
511474a5 1378
45311439 1379 parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
511474a5 1380 if (!parser_ctx && retry)
25a5128e 1381 return -EAGAIN;
511474a5 1382 }
a35e3268
EA
1383 controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1384 err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1385 CONTROLVM_QUEUE_ACK, &ackmsg);
1386 if (err)
1387 return err;
511474a5
DK
1388 switch (inmsg.hdr.id) {
1389 case CONTROLVM_CHIPSET_INIT:
25a5128e 1390 err = chipset_init(&inmsg);
511474a5
DK
1391 break;
1392 case CONTROLVM_BUS_CREATE:
ec17cb8a 1393 err = visorbus_create(&inmsg);
511474a5
DK
1394 break;
1395 case CONTROLVM_BUS_DESTROY:
ec17cb8a 1396 err = visorbus_destroy(&inmsg);
511474a5
DK
1397 break;
1398 case CONTROLVM_BUS_CONFIGURE:
ec17cb8a 1399 err = visorbus_configure(&inmsg, parser_ctx);
511474a5
DK
1400 break;
1401 case CONTROLVM_DEVICE_CREATE:
8b0a6cfa 1402 err = visorbus_device_create(&inmsg);
511474a5
DK
1403 break;
1404 case CONTROLVM_DEVICE_CHANGESTATE:
1405 if (cmd->device_change_state.flags.phys_device) {
25a5128e 1406 err = parahotplug_process_message(&inmsg);
511474a5
DK
1407 } else {
1408 /*
6577cbf1
DK
1409 * save the hdr and cmd structures for later use when
1410 * sending back the response to Command
511474a5 1411 */
8b0a6cfa 1412 err = visorbus_device_changestate(&inmsg);
511474a5
DK
1413 break;
1414 }
1415 break;
1416 case CONTROLVM_DEVICE_DESTROY:
8b0a6cfa 1417 err = visorbus_device_destroy(&inmsg);
511474a5
DK
1418 break;
1419 case CONTROLVM_DEVICE_CONFIGURE:
25a5128e 1420 /* no op just send a respond that we passed */
511474a5 1421 if (inmsg.hdr.flags.response_expected)
4c0e65f8
DK
1422 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1423 NULL);
511474a5
DK
1424 break;
1425 case CONTROLVM_CHIPSET_READY:
25a5128e 1426 err = chipset_ready_uevent(&inmsg.hdr);
511474a5
DK
1427 break;
1428 case CONTROLVM_CHIPSET_SELFTEST:
25a5128e 1429 err = chipset_selftest_uevent(&inmsg.hdr);
511474a5
DK
1430 break;
1431 case CONTROLVM_CHIPSET_STOP:
25a5128e 1432 err = chipset_notready_uevent(&inmsg.hdr);
511474a5
DK
1433 break;
1434 default:
25a5128e 1435 err = -ENOMSG;
511474a5 1436 if (inmsg.hdr.flags.response_expected)
25a5128e 1437 controlvm_respond(&inmsg.hdr,
4c0e65f8 1438 -CONTROLVM_RESP_ID_UNKNOWN, NULL);
511474a5
DK
1439 break;
1440 }
511474a5
DK
1441 if (parser_ctx) {
1442 parser_done(parser_ctx);
1443 parser_ctx = NULL;
1444 }
25a5128e 1445 return err;
511474a5
DK
1446}
1447
04dbfea6 1448/*
8a285327
DK
1449 * read_controlvm_event() - retreives the next message from the
1450 * CONTROLVM_QUEUE_EVENT queue in the controlvm
1451 * channel
1452 * @msg: pointer to the retrieved message
1453 *
25a5128e 1454 * Return: 0 if valid message was retrieved or -error
8a285327 1455 */
e80ffd4b 1456static int read_controlvm_event(struct controlvm_message *msg)
8a285327 1457{
904ee62a 1458 int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
da56cb04 1459 CONTROLVM_QUEUE_EVENT, msg);
9116ae7a 1460
25a5128e
DK
1461 if (err)
1462 return err;
25a5128e
DK
1463 /* got a message */
1464 if (msg->hdr.flags.test_message == 1)
1465 return -EINVAL;
25a5128e 1466 return 0;
8a285327
DK
1467}
1468
04dbfea6 1469/*
a9c73937
DK
1470 * parahotplug_process_list() - remove any request from the list that's been on
1471 * there too long and respond with an error
1472 */
e80ffd4b 1473static void parahotplug_process_list(void)
a9c73937
DK
1474{
1475 struct list_head *pos;
1476 struct list_head *tmp;
1477
1478 spin_lock(&parahotplug_request_list_lock);
a9c73937
DK
1479 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1480 struct parahotplug_request *req =
1481 list_entry(pos, struct parahotplug_request, list);
1482
1483 if (!time_after_eq(jiffies, req->expiration))
1484 continue;
a9c73937
DK
1485 list_del(pos);
1486 if (req->msg.hdr.flags.response_expected)
4c0e65f8 1487 controlvm_respond(
a9c73937 1488 &req->msg.hdr,
98f9ed9e 1489 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
4c0e65f8 1490 &req->msg.cmd.device_change_state.state);
a9c73937
DK
1491 parahotplug_request_destroy(req);
1492 }
a9c73937
DK
1493 spin_unlock(&parahotplug_request_list_lock);
1494}
1495
e80ffd4b 1496static void controlvm_periodic_work(struct work_struct *work)
3d8394c8
DK
1497{
1498 struct controlvm_message inmsg;
04dbc09b 1499 int count = 0;
fbc1023a
DK
1500 int err;
1501
1502 /* Drain the RESPONSE queue make it empty */
1503 do {
1504 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1505 CONTROLVM_QUEUE_RESPONSE,
1506 &inmsg);
04dbc09b 1507 } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
fbc1023a
DK
1508 if (err != -EAGAIN)
1509 goto schedule_out;
fbc1023a
DK
1510 if (chipset_dev->controlvm_pending_msg_valid) {
1511 /*
6577cbf1
DK
1512 * we throttled processing of a prior msg, so try to process
1513 * it again rather than reading a new one
fbc1023a
DK
1514 */
1515 inmsg = chipset_dev->controlvm_pending_msg;
1516 chipset_dev->controlvm_pending_msg_valid = false;
1517 err = 0;
1518 } else {
1519 err = read_controlvm_event(&inmsg);
3d8394c8 1520 }
fbc1023a 1521 while (!err) {
765b2f82 1522 chipset_dev->most_recent_message_jiffies = jiffies;
fbc1023a
DK
1523 err = handle_command(inmsg,
1524 visorchannel_get_physaddr
1525 (chipset_dev->controlvm_channel));
1526 if (err == -EAGAIN) {
765b2f82
SW
1527 chipset_dev->controlvm_pending_msg = inmsg;
1528 chipset_dev->controlvm_pending_msg_valid = true;
fbc1023a 1529 break;
3d8394c8 1530 }
fbc1023a
DK
1531
1532 err = read_controlvm_event(&inmsg);
3d8394c8 1533 }
3d8394c8
DK
1534 /* parahotplug_worker */
1535 parahotplug_process_list();
1536
d36c4857
SW
1537/*
1538 * The controlvm messages are sent in a bulk. If we start receiving messages, we
1539 * want the polling to be fast. If we do not receive any message for
1540 * MIN_IDLE_SECONDS, we can slow down the polling.
1541 */
fbc1023a 1542schedule_out:
765b2f82
SW
1543 if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1544 (HZ * MIN_IDLE_SECONDS))) {
3d8394c8 1545 /*
6577cbf1
DK
1546 * it's been longer than MIN_IDLE_SECONDS since we processed
1547 * our last controlvm message; slow down the polling
3d8394c8 1548 */
3fbee197
DK
1549 if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
1550 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
3d8394c8 1551 } else {
3fbee197
DK
1552 if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
1553 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
3d8394c8 1554 }
765b2f82
SW
1555 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1556 chipset_dev->poll_jiffies);
3d8394c8
DK
1557}
1558
e80ffd4b 1559static int visorchipset_init(struct acpi_device *acpi_device)
12e364b9 1560{
1366a3db 1561 int err = -ENODEV;
765b2f82 1562 struct visorchannel *controlvm_channel;
d3368a58 1563
765b2f82
SW
1564 chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1565 if (!chipset_dev)
1366a3db 1566 goto error;
f1f537c2
DK
1567 err = controlvm_channel_create(chipset_dev);
1568 if (err)
1569 goto error_free_chipset_dev;
765b2f82 1570 acpi_device->driver_data = chipset_dev;
765b2f82 1571 chipset_dev->acpi_device = acpi_device;
3fbee197 1572 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
15c012d5
SW
1573 err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1574 visorchipset_dev_groups);
1575 if (err < 0)
1576 goto error_destroy_channel;
f1f537c2 1577 controlvm_channel = chipset_dev->controlvm_channel;
403043c4 1578 if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
e25201d6 1579 &chipset_dev->acpi_device->dev,
b32c5cb8 1580 &visor_controlvm_channel_guid,
403043c4
SW
1581 "controlvm",
1582 sizeof(struct visor_controlvm_channel),
1583 VISOR_CONTROLVM_CHANNEL_VERSIONID,
1584 VISOR_CHANNEL_SIGNATURE))
15c012d5 1585 goto error_delete_groups;
4da3336c
DK
1586 /* if booting in a crash kernel */
1587 if (is_kdump_kernel())
765b2f82 1588 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
4da3336c
DK
1589 setup_crash_devices_work_queue);
1590 else
765b2f82 1591 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
4da3336c 1592 controlvm_periodic_work);
765b2f82 1593 chipset_dev->most_recent_message_jiffies = jiffies;
3fbee197 1594 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
765b2f82
SW
1595 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1596 chipset_dev->poll_jiffies);
1366a3db
DK
1597 err = visorbus_init();
1598 if (err < 0)
15c012d5 1599 goto error_cancel_work;
1366a3db
DK
1600 return 0;
1601
1366a3db 1602error_cancel_work:
765b2f82 1603 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1366a3db 1604
15c012d5
SW
1605error_delete_groups:
1606 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1607 visorchipset_dev_groups);
1608
1366a3db 1609error_destroy_channel:
765b2f82
SW
1610 visorchannel_destroy(chipset_dev->controlvm_channel);
1611
1612error_free_chipset_dev:
1613 kfree(chipset_dev);
1366a3db
DK
1614
1615error:
372b9f22 1616 dev_err(&acpi_device->dev, "failed with error %d\n", err);
1366a3db 1617 return err;
e3420ed6
EA
1618}
1619
e80ffd4b 1620static int visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 1621{
c79b28f7 1622 visorbus_exit();
765b2f82 1623 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
15c012d5
SW
1624 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1625 visorchipset_dev_groups);
765b2f82 1626 visorchannel_destroy(chipset_dev->controlvm_channel);
765b2f82 1627 kfree(chipset_dev);
55c67dca
PB
1628 return 0;
1629}
1630
1631static const struct acpi_device_id unisys_device_ids[] = {
1632 {"PNP0A07", 0},
1633 {"", 0},
1634};
55c67dca
PB
1635
1636static struct acpi_driver unisys_acpi_driver = {
1637 .name = "unisys_acpi",
1638 .class = "unisys_acpi_class",
1639 .owner = THIS_MODULE,
1640 .ids = unisys_device_ids,
1641 .ops = {
1642 .add = visorchipset_init,
1643 .remove = visorchipset_exit,
027b03e7 1644 },
55c67dca 1645};
1fc07f99
DK
1646
1647MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1648
c1d28da7 1649static __init int visorutil_spar_detect(void)
d5b3f1dc
EA
1650{
1651 unsigned int eax, ebx, ecx, edx;
1652
0c9f3536 1653 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
d5b3f1dc 1654 /* check the ID */
a27ded92
SW
1655 cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1656 return (ebx == UNISYS_VISOR_ID_EBX) &&
1657 (ecx == UNISYS_VISOR_ID_ECX) &&
1658 (edx == UNISYS_VISOR_ID_EDX);
d5b3f1dc 1659 }
e4a06430 1660 return 0;
d5b3f1dc 1661}
55c67dca 1662
056e4fc2 1663static int __init init_unisys(void)
55c67dca
PB
1664{
1665 int result;
35e606de 1666
d5b3f1dc 1667 if (!visorutil_spar_detect())
55c67dca 1668 return -ENODEV;
55c67dca
PB
1669 result = acpi_bus_register_driver(&unisys_acpi_driver);
1670 if (result)
1671 return -ENODEV;
55c67dca
PB
1672 pr_info("Unisys Visorchipset Driver Loaded.\n");
1673 return 0;
1674};
1675
056e4fc2 1676static void __exit exit_unisys(void)
55c67dca
PB
1677{
1678 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
1679}
1680
55c67dca
PB
1681module_init(init_unisys);
1682module_exit(exit_unisys);
12e364b9
KC
1683
1684MODULE_AUTHOR("Unisys");
1685MODULE_LICENSE("GPL");
bff8c1a1 1686MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");