/[base]/stable/10/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
ViewVC logotype

Contents of /stable/10/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 304581 - (show annotations) (download)
Mon Aug 22 02:11:30 2016 UTC (7 years, 10 months ago) by sephe
File MIME type: text/plain
File size: 57866 byte(s)
MFC 304251

    hyperv/storvsc: Deliver CAM_SEL_TIMEOUT upon SRB status error.

    SRB status is set to 0x20 by the hypervisor, if the specified LUN is
    unaccessible, and even worse the INQUIRY response will not be set by
    the hypervisor at all under this situation.  Additionally, SRB status
    is 0x20 too, for TUR on an unaccessible LUN.

    Deliver CAM_SEL_TIMEOUT to CAM upon SRB status errors as suggested by
    Scott Long, other values seems improper.

    This commit fixes the Hyper-V disk hotplug support.

    Submitted by:   Hongjiang Zhang <honzhan microsoft com>
    Sponsored by:   Microsoft
    Differential Revision:  https://reviews.freebsd.org/D7521

1 /*-
2 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /**
30 * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface
31 * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are
32 * converted into VSCSI protocol messages which are delivered to the parent
33 * partition StorVSP driver over the Hyper-V VMBUS.
34 */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/condvar.h>
41 #include <sys/time.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/kernel.h>
48 #include <sys/queue.h>
49 #include <sys/lock.h>
50 #include <sys/sx.h>
51 #include <sys/taskqueue.h>
52 #include <sys/bus.h>
53 #include <sys/mutex.h>
54 #include <sys/callout.h>
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 #include <vm/uma.h>
58 #include <sys/lock.h>
59 #include <sys/sema.h>
60 #include <sys/sglist.h>
61 #include <machine/bus.h>
62 #include <sys/bus_dma.h>
63
64 #include <cam/cam.h>
65 #include <cam/cam_ccb.h>
66 #include <cam/cam_periph.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_internal.h>
70 #include <cam/cam_debug.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73
74 #include <dev/hyperv/include/hyperv.h>
75 #include "hv_vstorage.h"
76
77 #define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE)
78 #define STORVSC_MAX_LUNS_PER_TARGET (64)
79 #define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2)
80 #define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1)
81 #define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS
82 #define STORVSC_MAX_TARGETS (2)
83
84 #define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta)
85
86 #define HV_ALIGN(x, a) roundup2(x, a)
87
88 struct storvsc_softc;
89
90 struct hv_sgl_node {
91 LIST_ENTRY(hv_sgl_node) link;
92 struct sglist *sgl_data;
93 };
94
95 struct hv_sgl_page_pool{
96 LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
97 LIST_HEAD(, hv_sgl_node) free_sgl_list;
98 boolean_t is_init;
99 } g_hv_sgl_page_pool;
100
101 #define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT
102
103 enum storvsc_request_type {
104 WRITE_TYPE,
105 READ_TYPE,
106 UNKNOWN_TYPE
107 };
108
109 struct hv_storvsc_request {
110 LIST_ENTRY(hv_storvsc_request) link;
111 struct vstor_packet vstor_packet;
112 hv_vmbus_multipage_buffer data_buf;
113 void *sense_data;
114 uint8_t sense_info_len;
115 uint8_t retries;
116 union ccb *ccb;
117 struct storvsc_softc *softc;
118 struct callout callout;
119 struct sema synch_sema; /*Synchronize the request/response if needed */
120 struct sglist *bounce_sgl;
121 unsigned int bounce_sgl_count;
122 uint64_t not_aligned_seg_bits;
123 };
124
125 struct storvsc_softc {
126 struct hv_device *hs_dev;
127 LIST_HEAD(, hv_storvsc_request) hs_free_list;
128 struct mtx hs_lock;
129 struct storvsc_driver_props *hs_drv_props;
130 int hs_unit;
131 uint32_t hs_frozen;
132 struct cam_sim *hs_sim;
133 struct cam_path *hs_path;
134 uint32_t hs_num_out_reqs;
135 boolean_t hs_destroy;
136 boolean_t hs_drain_notify;
137 struct sema hs_drain_sema;
138 struct hv_storvsc_request hs_init_req;
139 struct hv_storvsc_request hs_reset_req;
140 };
141
142
143 /**
144 * HyperV storvsc timeout testing cases:
145 * a. IO returned after first timeout;
146 * b. IO returned after second timeout and queue freeze;
147 * c. IO returned while timer handler is running
148 * The first can be tested by "sg_senddiag -vv /dev/daX",
149 * and the second and third can be done by
150 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
151 */
152 #define HVS_TIMEOUT_TEST 0
153
154 /*
155 * Bus/adapter reset functionality on the Hyper-V host is
156 * buggy and it will be disabled until
157 * it can be further tested.
158 */
159 #define HVS_HOST_RESET 0
160
161 struct storvsc_driver_props {
162 char *drv_name;
163 char *drv_desc;
164 uint8_t drv_max_luns_per_target;
165 uint8_t drv_max_ios_per_target;
166 uint32_t drv_ringbuffer_size;
167 };
168
169 enum hv_storage_type {
170 DRIVER_BLKVSC,
171 DRIVER_STORVSC,
172 DRIVER_UNKNOWN
173 };
174
175 #define HS_MAX_ADAPTERS 10
176
177 #define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
178
179 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
180 static const hv_guid gStorVscDeviceType={
181 .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
182 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
183 };
184
185 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
186 static const hv_guid gBlkVscDeviceType={
187 .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
188 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
189 };
190
191 static struct storvsc_driver_props g_drv_props_table[] = {
192 {"blkvsc", "Hyper-V IDE Storage Interface",
193 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
194 STORVSC_RINGBUFFER_SIZE},
195 {"storvsc", "Hyper-V SCSI Storage Interface",
196 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
197 STORVSC_RINGBUFFER_SIZE}
198 };
199
200 /*
201 * Sense buffer size changed in win8; have a run-time
202 * variable to track the size we should use.
203 */
204 static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
205
206 /*
207 * The size of the vmscsi_request has changed in win8. The
208 * additional size is for the newly added elements in the
209 * structure. These elements are valid only when we are talking
210 * to a win8 host.
211 * Track the correct size we need to apply.
212 */
213 static int vmscsi_size_delta;
214 /*
215 * The storage protocol version is determined during the
216 * initial exchange with the host. It will indicate which
217 * storage functionality is available in the host.
218 */
219 static int vmstor_proto_version;
220
221 struct vmstor_proto {
222 int proto_version;
223 int sense_buffer_size;
224 int vmscsi_size_delta;
225 };
226
227 static const struct vmstor_proto vmstor_proto_list[] = {
228 {
229 VMSTOR_PROTOCOL_VERSION_WIN10,
230 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
231 0
232 },
233 {
234 VMSTOR_PROTOCOL_VERSION_WIN8_1,
235 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
236 0
237 },
238 {
239 VMSTOR_PROTOCOL_VERSION_WIN8,
240 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
241 0
242 },
243 {
244 VMSTOR_PROTOCOL_VERSION_WIN7,
245 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
246 sizeof(struct vmscsi_win8_extension),
247 },
248 {
249 VMSTOR_PROTOCOL_VERSION_WIN6,
250 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
251 sizeof(struct vmscsi_win8_extension),
252 }
253 };
254
255 /* static functions */
256 static int storvsc_probe(device_t dev);
257 static int storvsc_attach(device_t dev);
258 static int storvsc_detach(device_t dev);
259 static void storvsc_poll(struct cam_sim * sim);
260 static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
261 static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
262 static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
263 static enum hv_storage_type storvsc_get_storage_type(device_t dev);
264 static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
265 static void hv_storvsc_on_channel_callback(void *context);
266 static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
267 struct vstor_packet *vstor_packet,
268 struct hv_storvsc_request *request);
269 static int hv_storvsc_connect_vsp(struct hv_device *device);
270 static void storvsc_io_done(struct hv_storvsc_request *reqp);
271 static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
272 bus_dma_segment_t *orig_sgl,
273 unsigned int orig_sgl_count,
274 uint64_t seg_bits);
275 void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
276 unsigned int dest_sgl_count,
277 struct sglist* src_sgl,
278 uint64_t seg_bits);
279
280 static device_method_t storvsc_methods[] = {
281 /* Device interface */
282 DEVMETHOD(device_probe, storvsc_probe),
283 DEVMETHOD(device_attach, storvsc_attach),
284 DEVMETHOD(device_detach, storvsc_detach),
285 DEVMETHOD(device_shutdown, bus_generic_shutdown),
286 DEVMETHOD_END
287 };
288
289 static driver_t storvsc_driver = {
290 "storvsc", storvsc_methods, sizeof(struct storvsc_softc),
291 };
292
293 static devclass_t storvsc_devclass;
294 DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
295 MODULE_VERSION(storvsc, 1);
296 MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
297
298
299 /**
300 * The host is capable of sending messages to us that are
301 * completely unsolicited. So, we need to address the race
302 * condition where we may be in the process of unloading the
303 * driver when the host may send us an unsolicited message.
304 * We address this issue by implementing a sequentially
305 * consistent protocol:
306 *
307 * 1. Channel callback is invoked while holding the the channel lock
308 * and an unloading driver will reset the channel callback under
309 * the protection of this channel lock.
310 *
311 * 2. To ensure bounded wait time for unloading a driver, we don't
312 * permit outgoing traffic once the device is marked as being
313 * destroyed.
314 *
315 * 3. Once the device is marked as being destroyed, we only
316 * permit incoming traffic to properly account for
317 * packets already sent out.
318 */
319 static inline struct storvsc_softc *
320 get_stor_device(struct hv_device *device,
321 boolean_t outbound)
322 {
323 struct storvsc_softc *sc;
324
325 sc = device_get_softc(device->device);
326
327 if (outbound) {
328 /*
329 * Here we permit outgoing I/O only
330 * if the device is not being destroyed.
331 */
332
333 if (sc->hs_destroy) {
334 sc = NULL;
335 }
336 } else {
337 /*
338 * inbound case; if being destroyed
339 * only permit to account for
340 * messages already sent out.
341 */
342 if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
343 sc = NULL;
344 }
345 }
346 return sc;
347 }
348
349 static void
350 storvsc_subchan_attach(struct hv_vmbus_channel *new_channel)
351 {
352 struct hv_device *device;
353 struct storvsc_softc *sc;
354 struct vmstor_chan_props props;
355 int ret = 0;
356
357 device = new_channel->device;
358 sc = get_stor_device(device, TRUE);
359 if (sc == NULL)
360 return;
361
362 memset(&props, 0, sizeof(props));
363
364 ret = hv_vmbus_channel_open(new_channel,
365 sc->hs_drv_props->drv_ringbuffer_size,
366 sc->hs_drv_props->drv_ringbuffer_size,
367 (void *)&props,
368 sizeof(struct vmstor_chan_props),
369 hv_storvsc_on_channel_callback,
370 new_channel);
371
372 return;
373 }
374
375 /**
376 * @brief Send multi-channel creation request to host
377 *
378 * @param device a Hyper-V device pointer
379 * @param max_chans the max channels supported by vmbus
380 */
381 static void
382 storvsc_send_multichannel_request(struct hv_device *dev, int max_chans)
383 {
384 struct hv_vmbus_channel **subchan;
385 struct storvsc_softc *sc;
386 struct hv_storvsc_request *request;
387 struct vstor_packet *vstor_packet;
388 int request_channels_cnt = 0;
389 int ret, i;
390
391 /* get multichannels count that need to create */
392 request_channels_cnt = MIN(max_chans, mp_ncpus);
393
394 sc = get_stor_device(dev, TRUE);
395 if (sc == NULL) {
396 printf("Storvsc_error: get sc failed while send mutilchannel "
397 "request\n");
398 return;
399 }
400
401 request = &sc->hs_init_req;
402
403 /* request the host to create multi-channel */
404 memset(request, 0, sizeof(struct hv_storvsc_request));
405
406 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
407
408 vstor_packet = &request->vstor_packet;
409
410 vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
411 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
412 vstor_packet->u.multi_channels_cnt = request_channels_cnt;
413
414 ret = hv_vmbus_channel_send_packet(
415 dev->channel,
416 vstor_packet,
417 VSTOR_PKT_SIZE,
418 (uint64_t)(uintptr_t)request,
419 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
420 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
421
422 /* wait for 5 seconds */
423 ret = sema_timedwait(&request->synch_sema, 5 * hz);
424 if (ret != 0) {
425 printf("Storvsc_error: create multi-channel timeout, %d\n",
426 ret);
427 return;
428 }
429
430 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
431 vstor_packet->status != 0) {
432 printf("Storvsc_error: create multi-channel invalid operation "
433 "(%d) or statue (%u)\n",
434 vstor_packet->operation, vstor_packet->status);
435 return;
436 }
437
438 /* Wait for sub-channels setup to complete. */
439 subchan = vmbus_get_subchan(dev->channel, request_channels_cnt);
440
441 /* Attach the sub-channels. */
442 for (i = 0; i < request_channels_cnt; ++i)
443 storvsc_subchan_attach(subchan[i]);
444
445 /* Release the sub-channels. */
446 vmbus_rel_subchan(subchan, request_channels_cnt);
447
448 if (bootverbose)
449 printf("Storvsc create multi-channel success!\n");
450 }
451
452 /**
453 * @brief initialize channel connection to parent partition
454 *
455 * @param dev a Hyper-V device pointer
456 * @returns 0 on success, non-zero error on failure
457 */
458 static int
459 hv_storvsc_channel_init(struct hv_device *dev)
460 {
461 int ret = 0, i;
462 struct hv_storvsc_request *request;
463 struct vstor_packet *vstor_packet;
464 struct storvsc_softc *sc;
465 uint16_t max_chans = 0;
466 boolean_t support_multichannel = FALSE;
467
468 max_chans = 0;
469 support_multichannel = FALSE;
470
471 sc = get_stor_device(dev, TRUE);
472 if (sc == NULL)
473 return (ENODEV);
474
475 request = &sc->hs_init_req;
476 memset(request, 0, sizeof(struct hv_storvsc_request));
477 vstor_packet = &request->vstor_packet;
478 request->softc = sc;
479
480 /**
481 * Initiate the vsc/vsp initialization protocol on the open channel
482 */
483 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
484
485 vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
486 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
487
488
489 ret = hv_vmbus_channel_send_packet(
490 dev->channel,
491 vstor_packet,
492 VSTOR_PKT_SIZE,
493 (uint64_t)(uintptr_t)request,
494 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
495 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
496
497 if (ret != 0)
498 goto cleanup;
499
500 /* wait 5 seconds */
501 ret = sema_timedwait(&request->synch_sema, 5 * hz);
502 if (ret != 0)
503 goto cleanup;
504
505 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
506 vstor_packet->status != 0) {
507 goto cleanup;
508 }
509
510 for (i = 0; i < nitems(vmstor_proto_list); i++) {
511 /* reuse the packet for version range supported */
512
513 memset(vstor_packet, 0, sizeof(struct vstor_packet));
514 vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
515 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
516
517 vstor_packet->u.version.major_minor =
518 vmstor_proto_list[i].proto_version;
519
520 /* revision is only significant for Windows guests */
521 vstor_packet->u.version.revision = 0;
522
523 ret = hv_vmbus_channel_send_packet(
524 dev->channel,
525 vstor_packet,
526 VSTOR_PKT_SIZE,
527 (uint64_t)(uintptr_t)request,
528 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
529 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
530
531 if (ret != 0)
532 goto cleanup;
533
534 /* wait 5 seconds */
535 ret = sema_timedwait(&request->synch_sema, 5 * hz);
536
537 if (ret)
538 goto cleanup;
539
540 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) {
541 ret = EINVAL;
542 goto cleanup;
543 }
544 if (vstor_packet->status == 0) {
545 vmstor_proto_version =
546 vmstor_proto_list[i].proto_version;
547 sense_buffer_size =
548 vmstor_proto_list[i].sense_buffer_size;
549 vmscsi_size_delta =
550 vmstor_proto_list[i].vmscsi_size_delta;
551 break;
552 }
553 }
554
555 if (vstor_packet->status != 0) {
556 ret = EINVAL;
557 goto cleanup;
558 }
559 /**
560 * Query channel properties
561 */
562 memset(vstor_packet, 0, sizeof(struct vstor_packet));
563 vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
564 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
565
566 ret = hv_vmbus_channel_send_packet(
567 dev->channel,
568 vstor_packet,
569 VSTOR_PKT_SIZE,
570 (uint64_t)(uintptr_t)request,
571 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
572 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
573
574 if ( ret != 0)
575 goto cleanup;
576
577 /* wait 5 seconds */
578 ret = sema_timedwait(&request->synch_sema, 5 * hz);
579
580 if (ret != 0)
581 goto cleanup;
582
583 /* TODO: Check returned version */
584 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
585 vstor_packet->status != 0) {
586 goto cleanup;
587 }
588
589 /* multi-channels feature is supported by WIN8 and above version */
590 max_chans = vstor_packet->u.chan_props.max_channel_cnt;
591 if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) &&
592 (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) &&
593 (vstor_packet->u.chan_props.flags &
594 HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
595 support_multichannel = TRUE;
596 }
597
598 memset(vstor_packet, 0, sizeof(struct vstor_packet));
599 vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
600 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
601
602 ret = hv_vmbus_channel_send_packet(
603 dev->channel,
604 vstor_packet,
605 VSTOR_PKT_SIZE,
606 (uint64_t)(uintptr_t)request,
607 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
608 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
609
610 if (ret != 0) {
611 goto cleanup;
612 }
613
614 /* wait 5 seconds */
615 ret = sema_timedwait(&request->synch_sema, 5 * hz);
616
617 if (ret != 0)
618 goto cleanup;
619
620 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
621 vstor_packet->status != 0)
622 goto cleanup;
623
624 /*
625 * If multi-channel is supported, send multichannel create
626 * request to host.
627 */
628 if (support_multichannel)
629 storvsc_send_multichannel_request(dev, max_chans);
630
631 cleanup:
632 sema_destroy(&request->synch_sema);
633 return (ret);
634 }
635
636 /**
637 * @brief Open channel connection to paraent partition StorVSP driver
638 *
639 * Open and initialize channel connection to parent partition StorVSP driver.
640 *
641 * @param pointer to a Hyper-V device
642 * @returns 0 on success, non-zero error on failure
643 */
644 static int
645 hv_storvsc_connect_vsp(struct hv_device *dev)
646 {
647 int ret = 0;
648 struct vmstor_chan_props props;
649 struct storvsc_softc *sc;
650
651 sc = device_get_softc(dev->device);
652
653 memset(&props, 0, sizeof(struct vmstor_chan_props));
654
655 /*
656 * Open the channel
657 */
658
659 ret = hv_vmbus_channel_open(
660 dev->channel,
661 sc->hs_drv_props->drv_ringbuffer_size,
662 sc->hs_drv_props->drv_ringbuffer_size,
663 (void *)&props,
664 sizeof(struct vmstor_chan_props),
665 hv_storvsc_on_channel_callback,
666 dev->channel);
667
668 if (ret != 0) {
669 return ret;
670 }
671
672 ret = hv_storvsc_channel_init(dev);
673
674 return (ret);
675 }
676
677 #if HVS_HOST_RESET
678 static int
679 hv_storvsc_host_reset(struct hv_device *dev)
680 {
681 int ret = 0;
682 struct storvsc_softc *sc;
683
684 struct hv_storvsc_request *request;
685 struct vstor_packet *vstor_packet;
686
687 sc = get_stor_device(dev, TRUE);
688 if (sc == NULL) {
689 return ENODEV;
690 }
691
692 request = &sc->hs_reset_req;
693 request->softc = sc;
694 vstor_packet = &request->vstor_packet;
695
696 sema_init(&request->synch_sema, 0, "stor synch sema");
697
698 vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
699 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
700
701 ret = hv_vmbus_channel_send_packet(dev->channel,
702 vstor_packet,
703 VSTOR_PKT_SIZE,
704 (uint64_t)(uintptr_t)&sc->hs_reset_req,
705 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
706 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
707
708 if (ret != 0) {
709 goto cleanup;
710 }
711
712 ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */
713
714 if (ret) {
715 goto cleanup;
716 }
717
718
719 /*
720 * At this point, all outstanding requests in the adapter
721 * should have been flushed out and return to us
722 */
723
724 cleanup:
725 sema_destroy(&request->synch_sema);
726 return (ret);
727 }
728 #endif /* HVS_HOST_RESET */
729
730 /**
731 * @brief Function to initiate an I/O request
732 *
733 * @param device Hyper-V device pointer
734 * @param request pointer to a request structure
735 * @returns 0 on success, non-zero error on failure
736 */
737 static int
738 hv_storvsc_io_request(struct hv_device *device,
739 struct hv_storvsc_request *request)
740 {
741 struct storvsc_softc *sc;
742 struct vstor_packet *vstor_packet = &request->vstor_packet;
743 struct hv_vmbus_channel* outgoing_channel = NULL;
744 int ret = 0;
745
746 sc = get_stor_device(device, TRUE);
747
748 if (sc == NULL) {
749 return ENODEV;
750 }
751
752 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
753
754 vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE;
755
756 vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
757
758 vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
759
760 vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
761
762 outgoing_channel = vmbus_select_outgoing_channel(device->channel);
763
764 mtx_unlock(&request->softc->hs_lock);
765 if (request->data_buf.length) {
766 ret = hv_vmbus_channel_send_packet_multipagebuffer(
767 outgoing_channel,
768 &request->data_buf,
769 vstor_packet,
770 VSTOR_PKT_SIZE,
771 (uint64_t)(uintptr_t)request);
772
773 } else {
774 ret = hv_vmbus_channel_send_packet(
775 outgoing_channel,
776 vstor_packet,
777 VSTOR_PKT_SIZE,
778 (uint64_t)(uintptr_t)request,
779 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
780 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
781 }
782 mtx_lock(&request->softc->hs_lock);
783
784 if (ret != 0) {
785 printf("Unable to send packet %p ret %d", vstor_packet, ret);
786 } else {
787 atomic_add_int(&sc->hs_num_out_reqs, 1);
788 }
789
790 return (ret);
791 }
792
793
794 /**
795 * Process IO_COMPLETION_OPERATION and ready
796 * the result to be completed for upper layer
797 * processing by the CAM layer.
798 */
799 static void
800 hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
801 struct vstor_packet *vstor_packet,
802 struct hv_storvsc_request *request)
803 {
804 struct vmscsi_req *vm_srb;
805
806 vm_srb = &vstor_packet->u.vm_srb;
807
808 /*
809 * Copy some fields of the host's response into the request structure,
810 * because the fields will be used later in storvsc_io_done().
811 */
812 request->vstor_packet.u.vm_srb.scsi_status = vm_srb->scsi_status;
813 request->vstor_packet.u.vm_srb.srb_status = vm_srb->srb_status;
814 request->vstor_packet.u.vm_srb.transfer_len = vm_srb->transfer_len;
815
816 if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
817 (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
818 /* Autosense data available */
819
820 KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
821 ("vm_srb->sense_info_len <= "
822 "request->sense_info_len"));
823
824 memcpy(request->sense_data, vm_srb->u.sense_data,
825 vm_srb->sense_info_len);
826
827 request->sense_info_len = vm_srb->sense_info_len;
828 }
829
830 /* Complete request by passing to the CAM layer */
831 storvsc_io_done(request);
832 atomic_subtract_int(&sc->hs_num_out_reqs, 1);
833 if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
834 sema_post(&sc->hs_drain_sema);
835 }
836 }
837
838 static void
839 hv_storvsc_rescan_target(struct storvsc_softc *sc)
840 {
841 path_id_t pathid;
842 target_id_t targetid;
843 union ccb *ccb;
844
845 pathid = cam_sim_path(sc->hs_sim);
846 targetid = CAM_TARGET_WILDCARD;
847
848 /*
849 * Allocate a CCB and schedule a rescan.
850 */
851 ccb = xpt_alloc_ccb_nowait();
852 if (ccb == NULL) {
853 printf("unable to alloc CCB for rescan\n");
854 return;
855 }
856
857 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
858 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
859 printf("unable to create path for rescan, pathid: %u,"
860 "targetid: %u\n", pathid, targetid);
861 xpt_free_ccb(ccb);
862 return;
863 }
864
865 if (targetid == CAM_TARGET_WILDCARD)
866 ccb->ccb_h.func_code = XPT_SCAN_BUS;
867 else
868 ccb->ccb_h.func_code = XPT_SCAN_TGT;
869
870 xpt_rescan(ccb);
871 }
872
873 static void
874 hv_storvsc_on_channel_callback(void *context)
875 {
876 int ret = 0;
877 hv_vmbus_channel *channel = (hv_vmbus_channel *)context;
878 struct hv_device *device = NULL;
879 struct storvsc_softc *sc;
880 uint32_t bytes_recvd;
881 uint64_t request_id;
882 uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
883 struct hv_storvsc_request *request;
884 struct vstor_packet *vstor_packet;
885
886 device = channel->device;
887 KASSERT(device, ("device is NULL"));
888
889 sc = get_stor_device(device, FALSE);
890 if (sc == NULL) {
891 printf("Storvsc_error: get stor device failed.\n");
892 return;
893 }
894
895 ret = hv_vmbus_channel_recv_packet(
896 channel,
897 packet,
898 roundup2(VSTOR_PKT_SIZE, 8),
899 &bytes_recvd,
900 &request_id);
901
902 while ((ret == 0) && (bytes_recvd > 0)) {
903 request = (struct hv_storvsc_request *)(uintptr_t)request_id;
904
905 if ((request == &sc->hs_init_req) ||
906 (request == &sc->hs_reset_req)) {
907 memcpy(&request->vstor_packet, packet,
908 sizeof(struct vstor_packet));
909 sema_post(&request->synch_sema);
910 } else {
911 vstor_packet = (struct vstor_packet *)packet;
912 switch(vstor_packet->operation) {
913 case VSTOR_OPERATION_COMPLETEIO:
914 if (request == NULL)
915 panic("VMBUS: storvsc received a "
916 "packet with NULL request id in "
917 "COMPLETEIO operation.");
918
919 hv_storvsc_on_iocompletion(sc,
920 vstor_packet, request);
921 break;
922 case VSTOR_OPERATION_REMOVEDEVICE:
923 printf("VMBUS: storvsc operation %d not "
924 "implemented.\n", vstor_packet->operation);
925 /* TODO: implement */
926 break;
927 case VSTOR_OPERATION_ENUMERATE_BUS:
928 hv_storvsc_rescan_target(sc);
929 break;
930 default:
931 break;
932 }
933 }
934 ret = hv_vmbus_channel_recv_packet(
935 channel,
936 packet,
937 roundup2(VSTOR_PKT_SIZE, 8),
938 &bytes_recvd,
939 &request_id);
940 }
941 }
942
943 /**
944 * @brief StorVSC probe function
945 *
946 * Device probe function. Returns 0 if the input device is a StorVSC
947 * device. Otherwise, a ENXIO is returned. If the input device is
948 * for BlkVSC (paravirtual IDE) device and this support is disabled in
949 * favor of the emulated ATA/IDE device, return ENXIO.
950 *
951 * @param a device
952 * @returns 0 on success, ENXIO if not a matcing StorVSC device
953 */
954 static int
955 storvsc_probe(device_t dev)
956 {
957 int ata_disk_enable = 0;
958 int ret = ENXIO;
959
960 switch (storvsc_get_storage_type(dev)) {
961 case DRIVER_BLKVSC:
962 if(bootverbose)
963 device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
964 if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
965 if(bootverbose)
966 device_printf(dev,
967 "Enlightened ATA/IDE detected\n");
968 device_set_desc(dev, g_drv_props_table[DRIVER_BLKVSC].drv_desc);
969 ret = BUS_PROBE_DEFAULT;
970 } else if(bootverbose)
971 device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
972 break;
973 case DRIVER_STORVSC:
974 if(bootverbose)
975 device_printf(dev, "Enlightened SCSI device detected\n");
976 device_set_desc(dev, g_drv_props_table[DRIVER_STORVSC].drv_desc);
977 ret = BUS_PROBE_DEFAULT;
978 break;
979 default:
980 ret = ENXIO;
981 }
982 return (ret);
983 }
984
985 /**
986 * @brief StorVSC attach function
987 *
988 * Function responsible for allocating per-device structures,
989 * setting up CAM interfaces and scanning for available LUNs to
990 * be used for SCSI device peripherals.
991 *
992 * @param a device
993 * @returns 0 on success or an error on failure
994 */
995 static int
996 storvsc_attach(device_t dev)
997 {
998 struct hv_device *hv_dev = vmbus_get_devctx(dev);
999 enum hv_storage_type stor_type;
1000 struct storvsc_softc *sc;
1001 struct cam_devq *devq;
1002 int ret, i, j;
1003 struct hv_storvsc_request *reqp;
1004 struct root_hold_token *root_mount_token = NULL;
1005 struct hv_sgl_node *sgl_node = NULL;
1006 void *tmp_buff = NULL;
1007
1008 /*
1009 * We need to serialize storvsc attach calls.
1010 */
1011 root_mount_token = root_mount_hold("storvsc");
1012
1013 sc = device_get_softc(dev);
1014
1015 stor_type = storvsc_get_storage_type(dev);
1016
1017 if (stor_type == DRIVER_UNKNOWN) {
1018 ret = ENODEV;
1019 goto cleanup;
1020 }
1021
1022 /* fill in driver specific properties */
1023 sc->hs_drv_props = &g_drv_props_table[stor_type];
1024
1025 /* fill in device specific properties */
1026 sc->hs_unit = device_get_unit(dev);
1027 sc->hs_dev = hv_dev;
1028
1029 LIST_INIT(&sc->hs_free_list);
1030 mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
1031
1032 for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
1033 reqp = malloc(sizeof(struct hv_storvsc_request),
1034 M_DEVBUF, M_WAITOK|M_ZERO);
1035 reqp->softc = sc;
1036
1037 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1038 }
1039
1040 /* create sg-list page pool */
1041 if (FALSE == g_hv_sgl_page_pool.is_init) {
1042 g_hv_sgl_page_pool.is_init = TRUE;
1043 LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
1044 LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
1045
1046 /*
1047 * Pre-create SG list, each SG list with
1048 * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each
1049 * segment has one page buffer
1050 */
1051 for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
1052 sgl_node = malloc(sizeof(struct hv_sgl_node),
1053 M_DEVBUF, M_WAITOK|M_ZERO);
1054
1055 sgl_node->sgl_data =
1056 sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT,
1057 M_WAITOK|M_ZERO);
1058
1059 for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1060 tmp_buff = malloc(PAGE_SIZE,
1061 M_DEVBUF, M_WAITOK|M_ZERO);
1062
1063 sgl_node->sgl_data->sg_segs[j].ss_paddr =
1064 (vm_paddr_t)tmp_buff;
1065 }
1066
1067 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
1068 sgl_node, link);
1069 }
1070 }
1071
1072 sc->hs_destroy = FALSE;
1073 sc->hs_drain_notify = FALSE;
1074 sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
1075
1076 ret = hv_storvsc_connect_vsp(hv_dev);
1077 if (ret != 0) {
1078 goto cleanup;
1079 }
1080
1081 /*
1082 * Create the device queue.
1083 * Hyper-V maps each target to one SCSI HBA
1084 */
1085 devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
1086 if (devq == NULL) {
1087 device_printf(dev, "Failed to alloc device queue\n");
1088 ret = ENOMEM;
1089 goto cleanup;
1090 }
1091
1092 sc->hs_sim = cam_sim_alloc(storvsc_action,
1093 storvsc_poll,
1094 sc->hs_drv_props->drv_name,
1095 sc,
1096 sc->hs_unit,
1097 &sc->hs_lock, 1,
1098 sc->hs_drv_props->drv_max_ios_per_target,
1099 devq);
1100
1101 if (sc->hs_sim == NULL) {
1102 device_printf(dev, "Failed to alloc sim\n");
1103 cam_simq_free(devq);
1104 ret = ENOMEM;
1105 goto cleanup;
1106 }
1107
1108 mtx_lock(&sc->hs_lock);
1109 /* bus_id is set to 0, need to get it from VMBUS channel query? */
1110 if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1111 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1112 mtx_unlock(&sc->hs_lock);
1113 device_printf(dev, "Unable to register SCSI bus\n");
1114 ret = ENXIO;
1115 goto cleanup;
1116 }
1117
1118 if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1119 cam_sim_path(sc->hs_sim),
1120 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1121 xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1122 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1123 mtx_unlock(&sc->hs_lock);
1124 device_printf(dev, "Unable to create path\n");
1125 ret = ENXIO;
1126 goto cleanup;
1127 }
1128
1129 mtx_unlock(&sc->hs_lock);
1130
1131 root_mount_rel(root_mount_token);
1132 return (0);
1133
1134
1135 cleanup:
1136 root_mount_rel(root_mount_token);
1137 while (!LIST_EMPTY(&sc->hs_free_list)) {
1138 reqp = LIST_FIRST(&sc->hs_free_list);
1139 LIST_REMOVE(reqp, link);
1140 free(reqp, M_DEVBUF);
1141 }
1142
1143 while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1144 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1145 LIST_REMOVE(sgl_node, link);
1146 for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1147 if (NULL !=
1148 (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1149 free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1150 }
1151 }
1152 sglist_free(sgl_node->sgl_data);
1153 free(sgl_node, M_DEVBUF);
1154 }
1155
1156 return (ret);
1157 }
1158
1159 /**
1160 * @brief StorVSC device detach function
1161 *
1162 * This function is responsible for safely detaching a
1163 * StorVSC device. This includes waiting for inbound responses
1164 * to complete and freeing associated per-device structures.
1165 *
1166 * @param dev a device
1167 * returns 0 on success
1168 */
1169 static int
1170 storvsc_detach(device_t dev)
1171 {
1172 struct storvsc_softc *sc = device_get_softc(dev);
1173 struct hv_storvsc_request *reqp = NULL;
1174 struct hv_device *hv_device = vmbus_get_devctx(dev);
1175 struct hv_sgl_node *sgl_node = NULL;
1176 int j = 0;
1177
1178 sc->hs_destroy = TRUE;
1179
1180 /*
1181 * At this point, all outbound traffic should be disabled. We
1182 * only allow inbound traffic (responses) to proceed so that
1183 * outstanding requests can be completed.
1184 */
1185
1186 sc->hs_drain_notify = TRUE;
1187 sema_wait(&sc->hs_drain_sema);
1188 sc->hs_drain_notify = FALSE;
1189
1190 /*
1191 * Since we have already drained, we don't need to busy wait.
1192 * The call to close the channel will reset the callback
1193 * under the protection of the incoming channel lock.
1194 */
1195
1196 hv_vmbus_channel_close(hv_device->channel);
1197
1198 mtx_lock(&sc->hs_lock);
1199 while (!LIST_EMPTY(&sc->hs_free_list)) {
1200 reqp = LIST_FIRST(&sc->hs_free_list);
1201 LIST_REMOVE(reqp, link);
1202
1203 free(reqp, M_DEVBUF);
1204 }
1205 mtx_unlock(&sc->hs_lock);
1206
1207 while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1208 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1209 LIST_REMOVE(sgl_node, link);
1210 for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){
1211 if (NULL !=
1212 (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1213 free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1214 }
1215 }
1216 sglist_free(sgl_node->sgl_data);
1217 free(sgl_node, M_DEVBUF);
1218 }
1219
1220 return (0);
1221 }
1222
1223 #if HVS_TIMEOUT_TEST
1224 /**
1225 * @brief unit test for timed out operations
1226 *
1227 * This function provides unit testing capability to simulate
1228 * timed out operations. Recompilation with HV_TIMEOUT_TEST=1
1229 * is required.
1230 *
1231 * @param reqp pointer to a request structure
1232 * @param opcode SCSI operation being performed
1233 * @param wait if 1, wait for I/O to complete
1234 */
1235 static void
1236 storvsc_timeout_test(struct hv_storvsc_request *reqp,
1237 uint8_t opcode, int wait)
1238 {
1239 int ret;
1240 union ccb *ccb = reqp->ccb;
1241 struct storvsc_softc *sc = reqp->softc;
1242
1243 if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1244 return;
1245 }
1246
1247 if (wait) {
1248 mtx_lock(&reqp->event.mtx);
1249 }
1250 ret = hv_storvsc_io_request(sc->hs_dev, reqp);
1251 if (ret != 0) {
1252 if (wait) {
1253 mtx_unlock(&reqp->event.mtx);
1254 }
1255 printf("%s: io_request failed with %d.\n",
1256 __func__, ret);
1257 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1258 mtx_lock(&sc->hs_lock);
1259 storvsc_free_request(sc, reqp);
1260 xpt_done(ccb);
1261 mtx_unlock(&sc->hs_lock);
1262 return;
1263 }
1264
1265 if (wait) {
1266 xpt_print(ccb->ccb_h.path,
1267 "%u: %s: waiting for IO return.\n",
1268 ticks, __func__);
1269 ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1270 mtx_unlock(&reqp->event.mtx);
1271 xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1272 ticks, __func__, (ret == 0)?
1273 "IO return detected" :
1274 "IO return not detected");
1275 /*
1276 * Now both the timer handler and io done are running
1277 * simultaneously. We want to confirm the io done always
1278 * finishes after the timer handler exits. So reqp used by
1279 * timer handler is not freed or stale. Do busy loop for
1280 * another 1/10 second to make sure io done does
1281 * wait for the timer handler to complete.
1282 */
1283 DELAY(100*1000);
1284 mtx_lock(&sc->hs_lock);
1285 xpt_print(ccb->ccb_h.path,
1286 "%u: %s: finishing, queue frozen %d, "
1287 "ccb status 0x%x scsi_status 0x%x.\n",
1288 ticks, __func__, sc->hs_frozen,
1289 ccb->ccb_h.status,
1290 ccb->csio.scsi_status);
1291 mtx_unlock(&sc->hs_lock);
1292 }
1293 }
1294 #endif /* HVS_TIMEOUT_TEST */
1295
1296 #ifdef notyet
1297 /**
1298 * @brief timeout handler for requests
1299 *
1300 * This function is called as a result of a callout expiring.
1301 *
1302 * @param arg pointer to a request
1303 */
1304 static void
1305 storvsc_timeout(void *arg)
1306 {
1307 struct hv_storvsc_request *reqp = arg;
1308 struct storvsc_softc *sc = reqp->softc;
1309 union ccb *ccb = reqp->ccb;
1310
1311 if (reqp->retries == 0) {
1312 mtx_lock(&sc->hs_lock);
1313 xpt_print(ccb->ccb_h.path,
1314 "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1315 ticks, reqp, ccb->ccb_h.timeout / 1000);
1316 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1317 mtx_unlock(&sc->hs_lock);
1318
1319 reqp->retries++;
1320 callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1321 0, storvsc_timeout, reqp, 0);
1322 #if HVS_TIMEOUT_TEST
1323 storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1324 #endif
1325 return;
1326 }
1327
1328 mtx_lock(&sc->hs_lock);
1329 xpt_print(ccb->ccb_h.path,
1330 "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1331 ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1332 (sc->hs_frozen == 0)?
1333 "freezing the queue" : "the queue is already frozen");
1334 if (sc->hs_frozen == 0) {
1335 sc->hs_frozen = 1;
1336 xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1337 }
1338 mtx_unlock(&sc->hs_lock);
1339
1340 #if HVS_TIMEOUT_TEST
1341 storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1342 #endif
1343 }
1344 #endif
1345
1346 /**
1347 * @brief StorVSC device poll function
1348 *
1349 * This function is responsible for servicing requests when
1350 * interrupts are disabled (i.e when we are dumping core.)
1351 *
1352 * @param sim a pointer to a CAM SCSI interface module
1353 */
1354 static void
1355 storvsc_poll(struct cam_sim *sim)
1356 {
1357 struct storvsc_softc *sc = cam_sim_softc(sim);
1358
1359 mtx_assert(&sc->hs_lock, MA_OWNED);
1360 mtx_unlock(&sc->hs_lock);
1361 hv_storvsc_on_channel_callback(sc->hs_dev->channel);
1362 mtx_lock(&sc->hs_lock);
1363 }
1364
1365 /**
1366 * @brief StorVSC device action function
1367 *
1368 * This function is responsible for handling SCSI operations which
1369 * are passed from the CAM layer. The requests are in the form of
1370 * CAM control blocks which indicate the action being performed.
1371 * Not all actions require converting the request to a VSCSI protocol
1372 * message - these actions can be responded to by this driver.
1373 * Requests which are destined for a backend storage device are converted
1374 * to a VSCSI protocol message and sent on the channel connection associated
1375 * with this device.
1376 *
1377 * @param sim pointer to a CAM SCSI interface module
1378 * @param ccb pointer to a CAM control block
1379 */
1380 static void
1381 storvsc_action(struct cam_sim *sim, union ccb *ccb)
1382 {
1383 struct storvsc_softc *sc = cam_sim_softc(sim);
1384 int res;
1385
1386 mtx_assert(&sc->hs_lock, MA_OWNED);
1387 switch (ccb->ccb_h.func_code) {
1388 case XPT_PATH_INQ: {
1389 struct ccb_pathinq *cpi = &ccb->cpi;
1390
1391 cpi->version_num = 1;
1392 cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1393 cpi->target_sprt = 0;
1394 cpi->hba_misc = PIM_NOBUSRESET;
1395 cpi->hba_eng_cnt = 0;
1396 cpi->max_target = STORVSC_MAX_TARGETS;
1397 cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1398 cpi->initiator_id = cpi->max_target;
1399 cpi->bus_id = cam_sim_bus(sim);
1400 cpi->base_transfer_speed = 300000;
1401 cpi->transport = XPORT_SAS;
1402 cpi->transport_version = 0;
1403 cpi->protocol = PROTO_SCSI;
1404 cpi->protocol_version = SCSI_REV_SPC2;
1405 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1406 strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1407 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1408 cpi->unit_number = cam_sim_unit(sim);
1409
1410 ccb->ccb_h.status = CAM_REQ_CMP;
1411 xpt_done(ccb);
1412 return;
1413 }
1414 case XPT_GET_TRAN_SETTINGS: {
1415 struct ccb_trans_settings *cts = &ccb->cts;
1416
1417 cts->transport = XPORT_SAS;
1418 cts->transport_version = 0;
1419 cts->protocol = PROTO_SCSI;
1420 cts->protocol_version = SCSI_REV_SPC2;
1421
1422 /* enable tag queuing and disconnected mode */
1423 cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1424 cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1425 cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1426 cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1427 cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1428
1429 ccb->ccb_h.status = CAM_REQ_CMP;
1430 xpt_done(ccb);
1431 return;
1432 }
1433 case XPT_SET_TRAN_SETTINGS: {
1434 ccb->ccb_h.status = CAM_REQ_CMP;
1435 xpt_done(ccb);
1436 return;
1437 }
1438 case XPT_CALC_GEOMETRY:{
1439 cam_calc_geometry(&ccb->ccg, 1);
1440 xpt_done(ccb);
1441 return;
1442 }
1443 case XPT_RESET_BUS:
1444 case XPT_RESET_DEV:{
1445 #if HVS_HOST_RESET
1446 if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1447 xpt_print(ccb->ccb_h.path,
1448 "hv_storvsc_host_reset failed with %d\n", res);
1449 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1450 xpt_done(ccb);
1451 return;
1452 }
1453 ccb->ccb_h.status = CAM_REQ_CMP;
1454 xpt_done(ccb);
1455 return;
1456 #else
1457 xpt_print(ccb->ccb_h.path,
1458 "%s reset not supported.\n",
1459 (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1460 "bus" : "dev");
1461 ccb->ccb_h.status = CAM_REQ_INVALID;
1462 xpt_done(ccb);
1463 return;
1464 #endif /* HVS_HOST_RESET */
1465 }
1466 case XPT_SCSI_IO:
1467 case XPT_IMMED_NOTIFY: {
1468 struct hv_storvsc_request *reqp = NULL;
1469
1470 if (ccb->csio.cdb_len == 0) {
1471 panic("cdl_len is 0\n");
1472 }
1473
1474 if (LIST_EMPTY(&sc->hs_free_list)) {
1475 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1476 if (sc->hs_frozen == 0) {
1477 sc->hs_frozen = 1;
1478 xpt_freeze_simq(sim, /* count*/1);
1479 }
1480 xpt_done(ccb);
1481 return;
1482 }
1483
1484 reqp = LIST_FIRST(&sc->hs_free_list);
1485 LIST_REMOVE(reqp, link);
1486
1487 bzero(reqp, sizeof(struct hv_storvsc_request));
1488 reqp->softc = sc;
1489
1490 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1491 if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1492 ccb->ccb_h.status = CAM_REQ_INVALID;
1493 xpt_done(ccb);
1494 return;
1495 }
1496
1497 #ifdef notyet
1498 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1499 callout_init(&reqp->callout, CALLOUT_MPSAFE);
1500 callout_reset_sbt(&reqp->callout,
1501 SBT_1MS * ccb->ccb_h.timeout, 0,
1502 storvsc_timeout, reqp, 0);
1503 #if HVS_TIMEOUT_TEST
1504 cv_init(&reqp->event.cv, "storvsc timeout cv");
1505 mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1506 NULL, MTX_DEF);
1507 switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1508 case MODE_SELECT_10:
1509 case SEND_DIAGNOSTIC:
1510 /* To have timer send the request. */
1511 return;
1512 default:
1513 break;
1514 }
1515 #endif /* HVS_TIMEOUT_TEST */
1516 }
1517 #endif
1518
1519 if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1520 xpt_print(ccb->ccb_h.path,
1521 "hv_storvsc_io_request failed with %d\n", res);
1522 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1523 storvsc_free_request(sc, reqp);
1524 xpt_done(ccb);
1525 return;
1526 }
1527 return;
1528 }
1529
1530 default:
1531 ccb->ccb_h.status = CAM_REQ_INVALID;
1532 xpt_done(ccb);
1533 return;
1534 }
1535 }
1536
1537 /**
1538 * @brief destroy bounce buffer
1539 *
1540 * This function is responsible for destroy a Scatter/Gather list
1541 * that create by storvsc_create_bounce_buffer()
1542 *
1543 * @param sgl- the Scatter/Gather need be destroy
1544 * @param sg_count- page count of the SG list.
1545 *
1546 */
1547 static void
1548 storvsc_destroy_bounce_buffer(struct sglist *sgl)
1549 {
1550 struct hv_sgl_node *sgl_node = NULL;
1551 if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) {
1552 printf("storvsc error: not enough in use sgl\n");
1553 return;
1554 }
1555 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1556 LIST_REMOVE(sgl_node, link);
1557 sgl_node->sgl_data = sgl;
1558 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1559 }
1560
1561 /**
1562 * @brief create bounce buffer
1563 *
1564 * This function is responsible for create a Scatter/Gather list,
1565 * which hold several pages that can be aligned with page size.
1566 *
1567 * @param seg_count- SG-list segments count
1568 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1569 * otherwise set used size to page size.
1570 *
1571 * return NULL if create failed
1572 */
1573 static struct sglist *
1574 storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1575 {
1576 int i = 0;
1577 struct sglist *bounce_sgl = NULL;
1578 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1579 struct hv_sgl_node *sgl_node = NULL;
1580
1581 /* get struct sglist from free_sgl_list */
1582 if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1583 printf("storvsc error: not enough free sgl\n");
1584 return NULL;
1585 }
1586 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1587 LIST_REMOVE(sgl_node, link);
1588 bounce_sgl = sgl_node->sgl_data;
1589 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1590
1591 bounce_sgl->sg_maxseg = seg_count;
1592
1593 if (write == WRITE_TYPE)
1594 bounce_sgl->sg_nseg = 0;
1595 else
1596 bounce_sgl->sg_nseg = seg_count;
1597
1598 for (i = 0; i < seg_count; i++)
1599 bounce_sgl->sg_segs[i].ss_len = buf_len;
1600
1601 return bounce_sgl;
1602 }
1603
1604 /**
1605 * @brief copy data from SG list to bounce buffer
1606 *
1607 * This function is responsible for copy data from one SG list's segments
1608 * to another SG list which used as bounce buffer.
1609 *
1610 * @param bounce_sgl - the destination SG list
1611 * @param orig_sgl - the segment of the source SG list.
1612 * @param orig_sgl_count - the count of segments.
1613 * @param orig_sgl_count - indicate which segment need bounce buffer,
1614 * set 1 means need.
1615 *
1616 */
1617 static void
1618 storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1619 bus_dma_segment_t *orig_sgl,
1620 unsigned int orig_sgl_count,
1621 uint64_t seg_bits)
1622 {
1623 int src_sgl_idx = 0;
1624
1625 for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1626 if (seg_bits & (1 << src_sgl_idx)) {
1627 memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1628 (void*)orig_sgl[src_sgl_idx].ds_addr,
1629 orig_sgl[src_sgl_idx].ds_len);
1630
1631 bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1632 orig_sgl[src_sgl_idx].ds_len;
1633 }
1634 }
1635 }
1636
1637 /**
1638 * @brief copy data from SG list which used as bounce to another SG list
1639 *
1640 * This function is responsible for copy data from one SG list with bounce
1641 * buffer to another SG list's segments.
1642 *
1643 * @param dest_sgl - the destination SG list's segments
1644 * @param dest_sgl_count - the count of destination SG list's segment.
1645 * @param src_sgl - the source SG list.
1646 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1647 *
1648 */
1649 void
1650 storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1651 unsigned int dest_sgl_count,
1652 struct sglist* src_sgl,
1653 uint64_t seg_bits)
1654 {
1655 int sgl_idx = 0;
1656
1657 for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1658 if (seg_bits & (1 << sgl_idx)) {
1659 memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1660 (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1661 src_sgl->sg_segs[sgl_idx].ss_len);
1662 }
1663 }
1664 }
1665
1666 /**
1667 * @brief check SG list with bounce buffer or not
1668 *
1669 * This function is responsible for check if need bounce buffer for SG list.
1670 *
1671 * @param sgl - the SG list's segments
1672 * @param sg_count - the count of SG list's segment.
1673 * @param bits - segmengs number that need bounce buffer
1674 *
1675 * return -1 if SG list needless bounce buffer
1676 */
1677 static int
1678 storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1679 unsigned int sg_count,
1680 uint64_t *bits)
1681 {
1682 int i = 0;
1683 int offset = 0;
1684 uint64_t phys_addr = 0;
1685 uint64_t tmp_bits = 0;
1686 boolean_t found_hole = FALSE;
1687 boolean_t pre_aligned = TRUE;
1688
1689 if (sg_count < 2){
1690 return -1;
1691 }
1692
1693 *bits = 0;
1694
1695 phys_addr = vtophys(sgl[0].ds_addr);
1696 offset = phys_addr - trunc_page(phys_addr);
1697
1698 if (offset != 0) {
1699 pre_aligned = FALSE;
1700 tmp_bits |= 1;
1701 }
1702
1703 for (i = 1; i < sg_count; i++) {
1704 phys_addr = vtophys(sgl[i].ds_addr);
1705 offset = phys_addr - trunc_page(phys_addr);
1706
1707 if (offset == 0) {
1708 if (FALSE == pre_aligned){
1709 /*
1710 * This segment is aligned, if the previous
1711 * one is not aligned, find a hole
1712 */
1713 found_hole = TRUE;
1714 }
1715 pre_aligned = TRUE;
1716 } else {
1717 tmp_bits |= 1 << i;
1718 if (!pre_aligned) {
1719 if (phys_addr != vtophys(sgl[i-1].ds_addr +
1720 sgl[i-1].ds_len)) {
1721 /*
1722 * Check whether connect to previous
1723 * segment,if not, find the hole
1724 */
1725 found_hole = TRUE;
1726 }
1727 } else {
1728 found_hole = TRUE;
1729 }
1730 pre_aligned = FALSE;
1731 }
1732 }
1733
1734 if (!found_hole) {
1735 return (-1);
1736 } else {
1737 *bits = tmp_bits;
1738 return 0;
1739 }
1740 }
1741
1742 /**
1743 * @brief Fill in a request structure based on a CAM control block
1744 *
1745 * Fills in a request structure based on the contents of a CAM control
1746 * block. The request structure holds the payload information for
1747 * VSCSI protocol request.
1748 *
1749 * @param ccb pointer to a CAM contorl block
1750 * @param reqp pointer to a request structure
1751 */
1752 static int
1753 create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1754 {
1755 struct ccb_scsiio *csio = &ccb->csio;
1756 uint64_t phys_addr;
1757 uint32_t bytes_to_copy = 0;
1758 uint32_t pfn_num = 0;
1759 uint32_t pfn;
1760 uint64_t not_aligned_seg_bits = 0;
1761
1762 /* refer to struct vmscsi_req for meanings of these two fields */
1763 reqp->vstor_packet.u.vm_srb.port =
1764 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1765 reqp->vstor_packet.u.vm_srb.path_id =
1766 cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1767
1768 reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1769 reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1770
1771 reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1772 if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1773 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1774 csio->cdb_len);
1775 } else {
1776 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1777 csio->cdb_len);
1778 }
1779
1780 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1781 case CAM_DIR_OUT:
1782 reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;
1783 break;
1784 case CAM_DIR_IN:
1785 reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1786 break;
1787 case CAM_DIR_NONE:
1788 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1789 break;
1790 default:
1791 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1792 break;
1793 }
1794
1795 reqp->sense_data = &csio->sense_data;
1796 reqp->sense_info_len = csio->sense_len;
1797
1798 reqp->ccb = ccb;
1799
1800 if (0 == csio->dxfer_len) {
1801 return (0);
1802 }
1803
1804 reqp->data_buf.length = csio->dxfer_len;
1805
1806 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1807 case CAM_DATA_VADDR:
1808 {
1809 bytes_to_copy = csio->dxfer_len;
1810 phys_addr = vtophys(csio->data_ptr);
1811 reqp->data_buf.offset = phys_addr & PAGE_MASK;
1812
1813 while (bytes_to_copy != 0) {
1814 int bytes, page_offset;
1815 phys_addr =
1816 vtophys(&csio->data_ptr[reqp->data_buf.length -
1817 bytes_to_copy]);
1818 pfn = phys_addr >> PAGE_SHIFT;
1819 reqp->data_buf.pfn_array[pfn_num] = pfn;
1820 page_offset = phys_addr & PAGE_MASK;
1821
1822 bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1823
1824 bytes_to_copy -= bytes;
1825 pfn_num++;
1826 }
1827 break;
1828 }
1829
1830 case CAM_DATA_SG:
1831 {
1832 int i = 0;
1833 int offset = 0;
1834 int ret;
1835
1836 bus_dma_segment_t *storvsc_sglist =
1837 (bus_dma_segment_t *)ccb->csio.data_ptr;
1838 u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1839
1840 printf("Storvsc: get SG I/O operation, %d\n",
1841 reqp->vstor_packet.u.vm_srb.data_in);
1842
1843 if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){
1844 printf("Storvsc: %d segments is too much, "
1845 "only support %d segments\n",
1846 storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT);
1847 return (EINVAL);
1848 }
1849
1850 /*
1851 * We create our own bounce buffer function currently. Idealy
1852 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1853 * code there is no callback API to check the page alignment of
1854 * middle segments before busdma can decide if a bounce buffer
1855 * is needed for particular segment. There is callback,
1856 * "bus_dma_filter_t *filter", but the parrameters are not
1857 * sufficient for storvsc driver.
1858 * TODO:
1859 * Add page alignment check in BUS_DMA(9) callback. Once
1860 * this is complete, switch the following code to use
1861 * BUS_DMA(9) for storvsc bounce buffer support.
1862 */
1863 /* check if we need to create bounce buffer */
1864 ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1865 storvsc_sg_count, &not_aligned_seg_bits);
1866 if (ret != -1) {
1867 reqp->bounce_sgl =
1868 storvsc_create_bounce_buffer(storvsc_sg_count,
1869 reqp->vstor_packet.u.vm_srb.data_in);
1870 if (NULL == reqp->bounce_sgl) {
1871 printf("Storvsc_error: "
1872 "create bounce buffer failed.\n");
1873 return (ENOMEM);
1874 }
1875
1876 reqp->bounce_sgl_count = storvsc_sg_count;
1877 reqp->not_aligned_seg_bits = not_aligned_seg_bits;
1878
1879 /*
1880 * if it is write, we need copy the original data
1881 *to bounce buffer
1882 */
1883 if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1884 storvsc_copy_sgl_to_bounce_buf(
1885 reqp->bounce_sgl,
1886 storvsc_sglist,
1887 storvsc_sg_count,
1888 reqp->not_aligned_seg_bits);
1889 }
1890
1891 /* transfer virtual address to physical frame number */
1892 if (reqp->not_aligned_seg_bits & 0x1){
1893 phys_addr =
1894 vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
1895 }else{
1896 phys_addr =
1897 vtophys(storvsc_sglist[0].ds_addr);
1898 }
1899 reqp->data_buf.offset = phys_addr & PAGE_MASK;
1900
1901 pfn = phys_addr >> PAGE_SHIFT;
1902 reqp->data_buf.pfn_array[0] = pfn;
1903
1904 for (i = 1; i < storvsc_sg_count; i++) {
1905 if (reqp->not_aligned_seg_bits & (1 << i)) {
1906 phys_addr =
1907 vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
1908 } else {
1909 phys_addr =
1910 vtophys(storvsc_sglist[i].ds_addr);
1911 }
1912
1913 pfn = phys_addr >> PAGE_SHIFT;
1914 reqp->data_buf.pfn_array[i] = pfn;
1915 }
1916 } else {
1917 phys_addr = vtophys(storvsc_sglist[0].ds_addr);
1918
1919 reqp->data_buf.offset = phys_addr & PAGE_MASK;
1920
1921 for (i = 0; i < storvsc_sg_count; i++) {
1922 phys_addr = vtophys(storvsc_sglist[i].ds_addr);
1923 pfn = phys_addr >> PAGE_SHIFT;
1924 reqp->data_buf.pfn_array[i] = pfn;
1925 }
1926
1927 /* check the last segment cross boundary or not */
1928 offset = phys_addr & PAGE_MASK;
1929 if (offset) {
1930 phys_addr =
1931 vtophys(storvsc_sglist[i-1].ds_addr +
1932 PAGE_SIZE - offset);
1933 pfn = phys_addr >> PAGE_SHIFT;
1934 reqp->data_buf.pfn_array[i] = pfn;
1935 }
1936
1937 reqp->bounce_sgl_count = 0;
1938 }
1939 break;
1940 }
1941 default:
1942 printf("Unknow flags: %d\n", ccb->ccb_h.flags);
1943 return(EINVAL);
1944 }
1945
1946 return(0);
1947 }
1948
1949 /**
1950 * @brief completion function before returning to CAM
1951 *
1952 * I/O process has been completed and the result needs
1953 * to be passed to the CAM layer.
1954 * Free resources related to this request.
1955 *
1956 * @param reqp pointer to a request structure
1957 */
1958 static void
1959 storvsc_io_done(struct hv_storvsc_request *reqp)
1960 {
1961 union ccb *ccb = reqp->ccb;
1962 struct ccb_scsiio *csio = &ccb->csio;
1963 struct storvsc_softc *sc = reqp->softc;
1964 struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
1965 bus_dma_segment_t *ori_sglist = NULL;
1966 int ori_sg_count = 0;
1967 /* destroy bounce buffer if it is used */
1968 if (reqp->bounce_sgl_count) {
1969 ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
1970 ori_sg_count = ccb->csio.sglist_cnt;
1971
1972 /*
1973 * If it is READ operation, we should copy back the data
1974 * to original SG list.
1975 */
1976 if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1977 storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
1978 ori_sg_count,
1979 reqp->bounce_sgl,
1980 reqp->not_aligned_seg_bits);
1981 }
1982
1983 storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
1984 reqp->bounce_sgl_count = 0;
1985 }
1986
1987 if (reqp->retries > 0) {
1988 mtx_lock(&sc->hs_lock);
1989 #if HVS_TIMEOUT_TEST
1990 xpt_print(ccb->ccb_h.path,
1991 "%u: IO returned after timeout, "
1992 "waking up timer handler if any.\n", ticks);
1993 mtx_lock(&reqp->event.mtx);
1994 cv_signal(&reqp->event.cv);
1995 mtx_unlock(&reqp->event.mtx);
1996 #endif
1997 reqp->retries = 0;
1998 xpt_print(ccb->ccb_h.path,
1999 "%u: IO returned after timeout, "
2000 "stopping timer if any.\n", ticks);
2001 mtx_unlock(&sc->hs_lock);
2002 }
2003
2004 #ifdef notyet
2005 /*
2006 * callout_drain() will wait for the timer handler to finish
2007 * if it is running. So we don't need any lock to synchronize
2008 * between this routine and the timer handler.
2009 * Note that we need to make sure reqp is not freed when timer
2010 * handler is using or will use it.
2011 */
2012 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2013 callout_drain(&reqp->callout);
2014 }
2015 #endif
2016
2017 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2018 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2019 if (vm_srb->scsi_status == SCSI_STATUS_OK) {
2020 const struct scsi_generic *cmd;
2021
2022 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
2023 if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
2024 xpt_print(ccb->ccb_h.path, "invalid LUN %d\n",
2025 vm_srb->lun);
2026 } else {
2027 xpt_print(ccb->ccb_h.path, "Unknown SRB flag: %d\n",
2028 vm_srb->srb_status);
2029 }
2030 /*
2031 * If there are errors, for example, invalid LUN,
2032 * host will inform VM through SRB status.
2033 */
2034 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2035 } else {
2036 ccb->ccb_h.status |= CAM_REQ_CMP;
2037 }
2038
2039 cmd = (const struct scsi_generic *)
2040 ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2041 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
2042 if (cmd->opcode == INQUIRY) {
2043 struct scsi_inquiry_data *inq_data =
2044 (struct scsi_inquiry_data *)csio->data_ptr;
2045 uint8_t *resp_buf = (uint8_t *)csio->data_ptr;
2046 int resp_xfer_len, resp_buf_len, data_len;
2047
2048 /* Get the buffer length reported by host */
2049 resp_xfer_len = vm_srb->transfer_len;
2050 /* Get the available buffer length */
2051 resp_buf_len = resp_xfer_len >= 5 ? resp_buf[4] + 5 : 0;
2052 data_len = (resp_buf_len < resp_xfer_len) ?
2053 resp_buf_len : resp_xfer_len;
2054
2055 if (bootverbose && data_len >= 5) {
2056 xpt_print(ccb->ccb_h.path, "storvsc inquiry "
2057 "(%d) [%x %x %x %x %x ... ]\n", data_len,
2058 resp_buf[0], resp_buf[1], resp_buf[2],
2059 resp_buf[3], resp_buf[4]);
2060 }
2061 if (vm_srb->srb_status == SRB_STATUS_SUCCESS &&
2062 data_len > SHORT_INQUIRY_LENGTH) {
2063 char vendor[16];
2064
2065 cam_strvis(vendor, inq_data->vendor,
2066 sizeof(inq_data->vendor), sizeof(vendor));
2067
2068 /*
2069 * XXX: Upgrade SPC2 to SPC3 if host is WIN8 or
2070 * WIN2012 R2 in order to support UNMAP feature.
2071 */
2072 if (!strncmp(vendor, "Msft", 4) &&
2073 SID_ANSI_REV(inq_data) == SCSI_REV_SPC2 &&
2074 (vmstor_proto_version ==
2075 VMSTOR_PROTOCOL_VERSION_WIN8_1 ||
2076 vmstor_proto_version ==
2077 VMSTOR_PROTOCOL_VERSION_WIN8)) {
2078 inq_data->version = SCSI_REV_SPC3;
2079 if (bootverbose) {
2080 xpt_print(ccb->ccb_h.path,
2081 "storvsc upgrades "
2082 "SPC2 to SPC3\n");
2083 }
2084 }
2085 }
2086 }
2087 } else {
2088 mtx_lock(&sc->hs_lock);
2089 xpt_print(ccb->ccb_h.path,
2090 "storvsc scsi_status = %d\n",
2091 vm_srb->scsi_status);
2092 mtx_unlock(&sc->hs_lock);
2093 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2094 }
2095
2096 ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2097 ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2098
2099 if (reqp->sense_info_len != 0) {
2100 csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2101 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2102 }
2103
2104 mtx_lock(&sc->hs_lock);
2105 if (reqp->softc->hs_frozen == 1) {
2106 xpt_print(ccb->ccb_h.path,
2107 "%u: storvsc unfreezing softc 0x%p.\n",
2108 ticks, reqp->softc);
2109 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2110 reqp->softc->hs_frozen = 0;
2111 }
2112 storvsc_free_request(sc, reqp);
2113 mtx_unlock(&sc->hs_lock);
2114
2115 xpt_done_direct(ccb);
2116 }
2117
2118 /**
2119 * @brief Free a request structure
2120 *
2121 * Free a request structure by returning it to the free list
2122 *
2123 * @param sc pointer to a softc
2124 * @param reqp pointer to a request structure
2125 */
2126 static void
2127 storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2128 {
2129
2130 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2131 }
2132
2133 /**
2134 * @brief Determine type of storage device from GUID
2135 *
2136 * Using the type GUID, determine if this is a StorVSC (paravirtual
2137 * SCSI or BlkVSC (paravirtual IDE) device.
2138 *
2139 * @param dev a device
2140 * returns an enum
2141 */
2142 static enum hv_storage_type
2143 storvsc_get_storage_type(device_t dev)
2144 {
2145 const char *p = vmbus_get_type(dev);
2146
2147 if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
2148 return DRIVER_BLKVSC;
2149 } else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
2150 return DRIVER_STORVSC;
2151 }
2152 return (DRIVER_UNKNOWN);
2153 }
2154

Properties

Name Value
svn:eol-style native
svn:keywords FreeBSD=%H
svn:mime-type text/plain

  ViewVC Help
Powered by ViewVC 1.1.27