lkml.org 
[lkml]   [2012]   [Feb]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
Subjectlinux-next: manual merge of the kmap_atomic tree with the staging tree
Hi Cong,

Today's linux-next merge of the kmap_atomic tree got a conflict in
drivers/staging/hv/storvsc_drv.c between various commits from the staging
tree and commit c41a603a7235 ("hv: remove the second argument of k
[un]map_atomic()") from the kmap_atomic tree.

I fixed it up (see below) and can carry the fix as necessary.
--
Cheers,
Stephen Rothwell sfr@canb.auug.org.au

diff --cc drivers/staging/hv/storvsc_drv.c
index 695ffc3,ddbdec8..0000000
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@@ -408,339 -398,113 +408,335 @@@ get_in_err

}

-static int storvsc_channel_init(struct hv_device *device)
+static void destroy_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count)
{
- struct storvsc_device *stor_device;
- struct hv_storvsc_request *request;
- struct vstor_packet *vstor_packet;
- int ret, t;
+ int i;
+ struct page *page_buf;

- stor_device = get_out_stor_device(device);
- if (!stor_device)
- return -ENODEV;
+ for (i = 0; i < sg_count; i++) {
+ page_buf = sg_page((&sgl[i]));
+ if (page_buf != NULL)
+ __free_page(page_buf);
+ }

- request = &stor_device->init_request;
- vstor_packet = &request->vstor_packet;
+ kfree(sgl);
+}

- /*
- * Now, initiate the vsc/vsp initialization protocol on the open
- * channel
- */
- memset(request, 0, sizeof(struct hv_storvsc_request));
- init_completion(&request->wait_event);
- vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
+{
+ int i;

- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
+ /* No need to check */
+ if (sg_count < 2)
+ return -1;

- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
+ /* We have at least 2 sg entries */
+ for (i = 0; i < sg_count; i++) {
+ if (i == 0) {
+ /* make sure 1st one does not have hole */
+ if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
+ return i;
+ } else if (i == sg_count - 1) {
+ /* make sure last one does not have hole */
+ if (sgl[i].offset != 0)
+ return i;
+ } else {
+ /* make sure no hole in the middle */
+ if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
+ return i;
+ }
}
+ return -1;
+}

- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
-
-
- /* reuse the packet for version range supported */
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count,
+ unsigned int len,
+ int write)
+{
+ int i;
+ int num_pages;
+ struct scatterlist *bounce_sgl;
+ struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);

- vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
- FILL_VMSTOR_REVISION(vstor_packet->version.revision);
+ num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;

- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
+ bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!bounce_sgl)
+ return NULL;

- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
+ goto cleanup;
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
}

- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
+ return bounce_sgl;

+cleanup:
+ destroy_bounce_buffer(bounce_sgl, num_pages);
+ return NULL;
+}

- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->storage_channel_properties.port_number =
- stor_device->port_number;
+/* Assume the original sgl has enough room */
+static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long dest_addr = 0;
+ unsigned long flags;

- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ local_irq_save(flags);

- if (ret != 0)
- goto cleanup;
+ for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
++ dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
++ + orig_sgl[i].offset;
+ dest = dest_addr;
+ destlen = orig_sgl[i].length;

- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (bounce_addr == 0)
+ bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
++ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));

- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
- goto cleanup;
+ while (destlen) {
+ src = bounce_addr + bounce_sgl[j].offset;
+ srclen = bounce_sgl[j].length - bounce_sgl[j].offset;

- stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
- stor_device->target_id
- = vstor_packet->storage_channel_properties.target_id;
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);

- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ total_copied += copylen;
+ bounce_sgl[j].offset += copylen;
+ destlen -= copylen;
+ dest += copylen;

- ret = vmbus_sendpacket(device->channel, vstor_packet,
- sizeof(struct vstor_packet),
- (unsigned long)request,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (bounce_sgl[j].offset == bounce_sgl[j].length) {
+ /* full */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++ kunmap_atomic((void *)bounce_addr);
+ j++;
+
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ kunmap_atomic((void *)(dest_addr -
- orig_sgl[i].offset),
- KM_IRQ0);
++ orig_sgl[i].offset));
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
+ /* if we need to use another bounce buffer */
+ if (destlen || i != orig_sgl_count - 1)
+ bounce_addr =
+ (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
++ sg_page((&bounce_sgl[j])));
+ } else if (destlen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++ kunmap_atomic((void *)bounce_addr);
+ }
+ }
+
- kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
- KM_IRQ0);
++ kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset));
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
+static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long src_addr = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < orig_sgl_count; i++) {
- src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
++ src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
++ + orig_sgl[i].offset;
+ src = src_addr;
+ srclen = orig_sgl[i].length;
+
+ if (bounce_addr == 0)
+ bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
++ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
+
+ while (srclen) {
+ /* assume bounce offset always == 0 */
+ dest = bounce_addr + bounce_sgl[j].length;
+ destlen = PAGE_SIZE - bounce_sgl[j].length;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ bounce_sgl[j].length += copylen;
+ srclen -= copylen;
+ src += copylen;
+
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++ kunmap_atomic((void *)bounce_addr);
+ j++;
+
+ /* if we need to use another bounce buffer */
+ if (srclen || i != orig_sgl_count - 1)
+ bounce_addr =
+ (unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
++ sg_page((&bounce_sgl[j])));
+
+ } else if (srclen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++ kunmap_atomic((void *)bounce_addr);
+ }
+ }
+
- kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
++ kunmap_atomic((void *)(src_addr - orig_sgl[i].offset));
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+static int storvsc_channel_init(struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+ struct storvsc_cmd_request *request;
+ struct vstor_packet *vstor_packet;
+ int ret, t;
+
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return -ENODEV;
+
+ request = &stor_device->init_request;
+ vstor_packet = &request->vstor_packet;
+
+ /*
+ * Now, initiate the vsc/vsp initialization protocol on the open
+ * channel
+ */
+ memset(request, 0, sizeof(struct storvsc_cmd_request));
+ init_completion(&request->wait_event);
+ vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ /* reuse the packet for version range supported */
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ vstor_packet->version.major_minor =
+ storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
+
+ /*
+ * The revision number is only used in Windows; set it to 0.
+ */
+ vstor_packet->version.revision = 0;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->storage_channel_properties.port_number =
+ stor_device->port_number;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+ stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
+ stor_device->target_id
+ = vstor_packet->storage_channel_properties.target_id;
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);

if (ret != 0)
goto cleanup;
[unhandled content-type:application/pgp-signature]
\
 
 \ /
  Last update: 2012-02-13 06:11    [W:0.039 / U:0.216 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site