lkml.org 
[lkml]   [2021]   [Apr]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [Resend RFC PATCH V2 06/12] HV/Vmbus: Add SNP support for VMbus channel initiate message
On Wed, Apr 14, 2021 at 10:49:39AM -0400, Tianyu Lan wrote:
> From: Tianyu Lan <Tianyu.Lan@microsoft.com>
>
> The physical address of monitor pages in the CHANNELMSG_INITIATE_CONTACT
> msg should be in the extra address space for SNP support and these

What is this 'extra address space'? Is that just normal virtual address
space of the Linux kernel?

> pages also should be accessed via the extra address space inside Linux
> guest and remap the extra address by ioremap function.

OK, why do you need to use ioremap on them? Why not use vmap for
example? What is it that makes ioremap the right candidate?





>
> Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
> ---
> drivers/hv/connection.c | 62 +++++++++++++++++++++++++++++++++++++++
> drivers/hv/hyperv_vmbus.h | 1 +
> 2 files changed, 63 insertions(+)
>
> diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
> index 79bca653dce9..a0be9c11d737 100644
> --- a/drivers/hv/connection.c
> +++ b/drivers/hv/connection.c
> @@ -101,6 +101,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
>
> msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
> msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
> +
> + if (hv_isolation_type_snp()) {
> + msg->monitor_page1 += ms_hyperv.shared_gpa_boundary;
> + msg->monitor_page2 += ms_hyperv.shared_gpa_boundary;
> + }
> +
> msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
>
> /*
> @@ -145,6 +151,29 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
> return -ECONNREFUSED;
> }
>
> + if (hv_isolation_type_snp()) {
> + vmbus_connection.monitor_pages_va[0]
> + = vmbus_connection.monitor_pages[0];
> + vmbus_connection.monitor_pages[0]
> + = ioremap_cache(msg->monitor_page1, HV_HYP_PAGE_SIZE);
> + if (!vmbus_connection.monitor_pages[0])
> + return -ENOMEM;
> +
> + vmbus_connection.monitor_pages_va[1]
> + = vmbus_connection.monitor_pages[1];
> + vmbus_connection.monitor_pages[1]
> + = ioremap_cache(msg->monitor_page2, HV_HYP_PAGE_SIZE);
> + if (!vmbus_connection.monitor_pages[1]) {
> + vunmap(vmbus_connection.monitor_pages[0]);
> + return -ENOMEM;
> + }
> +
> + memset(vmbus_connection.monitor_pages[0], 0x00,
> + HV_HYP_PAGE_SIZE);
> + memset(vmbus_connection.monitor_pages[1], 0x00,
> + HV_HYP_PAGE_SIZE);
> + }
> +
> return ret;
> }
>
> @@ -156,6 +185,7 @@ int vmbus_connect(void)
> struct vmbus_channel_msginfo *msginfo = NULL;
> int i, ret = 0;
> __u32 version;
> + u64 pfn[2];
>
> /* Initialize the vmbus connection */
> vmbus_connection.conn_state = CONNECTING;
> @@ -213,6 +243,16 @@ int vmbus_connect(void)
> goto cleanup;
> }
>
> + if (hv_isolation_type_snp()) {
> + pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
> + pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
> + if (hv_mark_gpa_visibility(2, pfn,
> + VMBUS_PAGE_VISIBLE_READ_WRITE)) {
> + ret = -EFAULT;
> + goto cleanup;
> + }
> + }
> +
> msginfo = kzalloc(sizeof(*msginfo) +
> sizeof(struct vmbus_channel_initiate_contact),
> GFP_KERNEL);
> @@ -279,6 +319,8 @@ int vmbus_connect(void)
>
> void vmbus_disconnect(void)
> {
> + u64 pfn[2];
> +
> /*
> * First send the unload request to the host.
> */
> @@ -298,6 +340,26 @@ void vmbus_disconnect(void)
> vmbus_connection.int_page = NULL;
> }
>
> + if (hv_isolation_type_snp()) {
> + if (vmbus_connection.monitor_pages_va[0]) {
> + vunmap(vmbus_connection.monitor_pages[0]);
> + vmbus_connection.monitor_pages[0]
> + = vmbus_connection.monitor_pages_va[0];
> + vmbus_connection.monitor_pages_va[0] = NULL;
> + }
> +
> + if (vmbus_connection.monitor_pages_va[1]) {
> + vunmap(vmbus_connection.monitor_pages[1]);
> + vmbus_connection.monitor_pages[1]
> + = vmbus_connection.monitor_pages_va[1];
> + vmbus_connection.monitor_pages_va[1] = NULL;
> + }
> +
> + pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
> + pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
> + hv_mark_gpa_visibility(2, pfn, VMBUS_PAGE_NOT_VISIBLE);
> + }
> +
> hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
> hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
> vmbus_connection.monitor_pages[0] = NULL;
> diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
> index 9416e09ebd58..0778add21a9c 100644
> --- a/drivers/hv/hyperv_vmbus.h
> +++ b/drivers/hv/hyperv_vmbus.h
> @@ -240,6 +240,7 @@ struct vmbus_connection {
> * is child->parent notification
> */
> struct hv_monitor_page *monitor_pages[2];
> + void *monitor_pages_va[2];
> struct list_head chn_msg_list;
> spinlock_t channelmsg_lock;
>
> --
> 2.25.1
>

\
 
 \ /
  Last update: 2021-04-15 20:55    [W:0.485 / U:0.680 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site