mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2025-07-23 15:10:35 +02:00
550.78
This commit is contained in:
@@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
## Release 550 Entries
|
## Release 550 Entries
|
||||||
|
|
||||||
|
### [550.78] 2024-04-25
|
||||||
|
|
||||||
### [550.76] 2024-04-17
|
### [550.76] 2024-04-17
|
||||||
|
|
||||||
### [550.67] 2024-03-19
|
### [550.67] 2024-03-19
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
# NVIDIA Linux Open GPU Kernel Module Source
|
# NVIDIA Linux Open GPU Kernel Module Source
|
||||||
|
|
||||||
This is the source release of the NVIDIA Linux open GPU kernel modules,
|
This is the source release of the NVIDIA Linux open GPU kernel modules,
|
||||||
version 550.76.
|
version 550.78.
|
||||||
|
|
||||||
|
|
||||||
## How to Build
|
## How to Build
|
||||||
@@ -17,7 +17,7 @@ as root:
|
|||||||
|
|
||||||
Note that the kernel modules built here must be used with GSP
|
Note that the kernel modules built here must be used with GSP
|
||||||
firmware and user-space NVIDIA GPU driver components from a corresponding
|
firmware and user-space NVIDIA GPU driver components from a corresponding
|
||||||
550.76 driver release. This can be achieved by installing
|
550.78 driver release. This can be achieved by installing
|
||||||
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
|
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
|
||||||
option. E.g.,
|
option. E.g.,
|
||||||
|
|
||||||
@@ -188,7 +188,7 @@ encountered specific to them.
|
|||||||
For details on feature support and limitations, see the NVIDIA GPU driver
|
For details on feature support and limitations, see the NVIDIA GPU driver
|
||||||
end user README here:
|
end user README here:
|
||||||
|
|
||||||
https://us.download.nvidia.com/XFree86/Linux-x86_64/550.76/README/kernel_open.html
|
https://us.download.nvidia.com/XFree86/Linux-x86_64/550.78/README/kernel_open.html
|
||||||
|
|
||||||
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
|
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
|
||||||
Package for more details.
|
Package for more details.
|
||||||
|
@@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
|
|||||||
EXTRA_CFLAGS += -I$(src)
|
EXTRA_CFLAGS += -I$(src)
|
||||||
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
|
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
|
||||||
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
|
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
|
||||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.76\"
|
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.78\"
|
||||||
|
|
||||||
ifneq ($(SYSSRCHOST1X),)
|
ifneq ($(SYSSRCHOST1X),)
|
||||||
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
|
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
|
||||||
|
@@ -43,18 +43,18 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
|
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
|
||||||
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r550_00-237"
|
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r550_00-242"
|
||||||
#define NV_BUILD_CHANGELIST_NUM (34145289)
|
#define NV_BUILD_CHANGELIST_NUM (34157620)
|
||||||
#define NV_BUILD_TYPE "Official"
|
#define NV_BUILD_TYPE "Official"
|
||||||
#define NV_BUILD_NAME "rel/gpu_drv/r550/r550_00-237"
|
#define NV_BUILD_NAME "rel/gpu_drv/r550/r550_00-242"
|
||||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34145289)
|
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34157620)
|
||||||
|
|
||||||
#else /* Windows builds */
|
#else /* Windows builds */
|
||||||
#define NV_BUILD_BRANCH_VERSION "r550_00-227"
|
#define NV_BUILD_BRANCH_VERSION "r550_00-233"
|
||||||
#define NV_BUILD_CHANGELIST_NUM (34145289)
|
#define NV_BUILD_CHANGELIST_NUM (34158633)
|
||||||
#define NV_BUILD_TYPE "Official"
|
#define NV_BUILD_TYPE "Official"
|
||||||
#define NV_BUILD_NAME "552.19"
|
#define NV_BUILD_NAME "552.25"
|
||||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34145289)
|
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34158633)
|
||||||
#define NV_BUILD_BRANCH_BASE_VERSION R550
|
#define NV_BUILD_BRANCH_BASE_VERSION R550
|
||||||
#endif
|
#endif
|
||||||
// End buildmeister python edited section
|
// End buildmeister python edited section
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
|
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
|
||||||
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
|
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
|
||||||
|
|
||||||
#define NV_VERSION_STRING "550.76"
|
#define NV_VERSION_STRING "550.78"
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -30,6 +30,8 @@
|
|||||||
// Source file: class/cl0050.finn
|
// Source file: class/cl0050.finn
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#include "nvcfg_sdk.h"
|
||||||
|
|
||||||
#define NV_CE_UTILS (0x50U) /* finn: Evaluated from "NV0050_ALLOCATION_PARAMETERS_MESSAGE_ID" */
|
#define NV_CE_UTILS (0x50U) /* finn: Evaluated from "NV0050_ALLOCATION_PARAMETERS_MESSAGE_ID" */
|
||||||
|
|
||||||
#define NV0050_ALLOCATION_PARAMETERS_MESSAGE_ID (0x0050U)
|
#define NV0050_ALLOCATION_PARAMETERS_MESSAGE_ID (0x0050U)
|
||||||
@@ -37,6 +39,7 @@
|
|||||||
typedef struct NV0050_ALLOCATION_PARAMETERS {
|
typedef struct NV0050_ALLOCATION_PARAMETERS {
|
||||||
NvHandle hVaspace;
|
NvHandle hVaspace;
|
||||||
NV_DECLARE_ALIGNED(NvU64 flags, 8);
|
NV_DECLARE_ALIGNED(NvU64 flags, 8);
|
||||||
|
NvU32 forceCeId;
|
||||||
} NV0050_ALLOCATION_PARAMETERS;
|
} NV0050_ALLOCATION_PARAMETERS;
|
||||||
|
|
||||||
|
|
||||||
@@ -54,3 +57,14 @@ typedef struct NV0050_ALLOCATION_PARAMETERS {
|
|||||||
#define NV0050_CEUTILS_FLAGS_FIFO_LITE 2:2
|
#define NV0050_CEUTILS_FLAGS_FIFO_LITE 2:2
|
||||||
#define NV0050_CEUTILS_FLAGS_FIFO_LITE_FALSE (0x00000000)
|
#define NV0050_CEUTILS_FLAGS_FIFO_LITE_FALSE (0x00000000)
|
||||||
#define NV0050_CEUTILS_FLAGS_FIFO_LITE_TRUE (0x00000001)
|
#define NV0050_CEUTILS_FLAGS_FIFO_LITE_TRUE (0x00000001)
|
||||||
|
|
||||||
|
// Force a specific CE engine to be used be setting forceCeId
|
||||||
|
#define NV0050_CEUTILS_FLAGS_FORCE_CE_ID 4:4
|
||||||
|
#define NV0050_CEUTILS_FLAGS_FORCE_CE_ID_FALSE (0x00000000)
|
||||||
|
#define NV0050_CEUTILS_FLAGS_FORCE_CE_ID_TRUE (0x00000001)
|
||||||
|
|
||||||
|
// Use a CC secure channel
|
||||||
|
#define NV0050_CEUTILS_FLAGS_CC_SECURE 5:5
|
||||||
|
#define NV0050_CEUTILS_FLAGS_CC_SECURE_FALSE (0x00000000)
|
||||||
|
#define NV0050_CEUTILS_FLAGS_CC_SECURE_TRUE (0x00000001)
|
||||||
|
|
||||||
|
@@ -52,6 +52,10 @@
|
|||||||
#define NVC8B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001)
|
#define NVC8B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001)
|
||||||
#define NVC8B5_LAUNCH_DMA_DST_TYPE 13:13
|
#define NVC8B5_LAUNCH_DMA_DST_TYPE 13:13
|
||||||
#define NVC8B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001)
|
#define NVC8B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001)
|
||||||
|
#define NVC8B5_LAUNCH_DMA_COPY_TYPE 21:20
|
||||||
|
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_PROT2PROT (0x00000000)
|
||||||
|
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_DEFAULT (0x00000000)
|
||||||
|
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_SECURE (0x00000001)
|
||||||
#define NVC8B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE 23:23
|
#define NVC8B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE 23:23
|
||||||
#define NVC8B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_TRUE (0x00000001)
|
#define NVC8B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_TRUE (0x00000001)
|
||||||
#define NVC8B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
#define NVC8B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
||||||
@@ -61,6 +65,22 @@
|
|||||||
#define NVC8B5_OFFSET_OUT_LOWER (0x0000040C)
|
#define NVC8B5_OFFSET_OUT_LOWER (0x0000040C)
|
||||||
#define NVC8B5_OFFSET_OUT_LOWER_VALUE 31:0
|
#define NVC8B5_OFFSET_OUT_LOWER_VALUE 31:0
|
||||||
#define NVC8B5_LINE_LENGTH_IN (0x00000418)
|
#define NVC8B5_LINE_LENGTH_IN (0x00000418)
|
||||||
|
#define NVC8B5_SET_SECURE_COPY_MODE (0x00000500)
|
||||||
|
#define NVC8B5_SET_SECURE_COPY_MODE_MODE 0:0
|
||||||
|
#define NVC8B5_SET_SECURE_COPY_MODE_MODE_ENCRYPT (0x00000000)
|
||||||
|
#define NVC8B5_SET_SECURE_COPY_MODE_MODE_DECRYPT (0x00000001)
|
||||||
|
#define NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER (0x00000514)
|
||||||
|
#define NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER_UPPER 24:0
|
||||||
|
#define NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER (0x00000518)
|
||||||
|
#define NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER_LOWER 31:0
|
||||||
|
#define NVC8B5_SET_ENCRYPT_AUTH_TAG_ADDR_UPPER (0x00000530)
|
||||||
|
#define NVC8B5_SET_ENCRYPT_AUTH_TAG_ADDR_UPPER_UPPER 24:0
|
||||||
|
#define NVC8B5_SET_ENCRYPT_AUTH_TAG_ADDR_LOWER (0x00000534)
|
||||||
|
#define NVC8B5_SET_ENCRYPT_AUTH_TAG_ADDR_LOWER_LOWER 31:0
|
||||||
|
#define NVC8B5_SET_ENCRYPT_IV_ADDR_UPPER (0x00000538)
|
||||||
|
#define NVC8B5_SET_ENCRYPT_IV_ADDR_UPPER_UPPER 24:0
|
||||||
|
#define NVC8B5_SET_ENCRYPT_IV_ADDR_LOWER (0x0000053C)
|
||||||
|
#define NVC8B5_SET_ENCRYPT_IV_ADDR_LOWER_LOWER 31:0
|
||||||
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS (0x000006FC)
|
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS (0x000006FC)
|
||||||
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE 0:0
|
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE 0:0
|
||||||
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_FALSE (0x00000000)
|
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_FALSE (0x00000000)
|
||||||
|
@@ -3839,6 +3839,34 @@ typedef struct NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS {
|
|||||||
NvBool bAcceptClientRequest;
|
NvBool bAcceptClientRequest;
|
||||||
} NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS;
|
} NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS;
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY
|
||||||
|
*
|
||||||
|
* This control call can be used to set CC security policy on GSP.
|
||||||
|
* This is a internal command sent from Kernel RM to Physical RM.
|
||||||
|
*
|
||||||
|
* attackerAdvantage [IN]
|
||||||
|
* The minimum and maximum values for attackerAdvantage.
|
||||||
|
* The probability of an attacker successfully guessing the contents of
|
||||||
|
* an encrypted packet go up ("attacker advantage").
|
||||||
|
*
|
||||||
|
* Possible status values returned are:
|
||||||
|
* NV_OK
|
||||||
|
* NV_ERR_INVALID_OBJECT_HANDLE
|
||||||
|
* NV_ERR_INVALID_STATE
|
||||||
|
* NV_ERR_INVALID_ARGUMENT
|
||||||
|
* NV_ERR_NOT_SUPPORTED
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY (0x20800ae8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS_MESSAGE_ID" */
|
||||||
|
|
||||||
|
#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS_MESSAGE_ID (0xE8U)
|
||||||
|
|
||||||
|
typedef struct NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS {
|
||||||
|
NV_DECLARE_ALIGNED(NvU64 attackerAdvantage, 8);
|
||||||
|
} NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NV2080_CTRL_CMD_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP
|
* NV2080_CTRL_CMD_INTERNAL_MEMMGR_MEMORY_TRANSFER_WITH_GSP
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -168,20 +168,20 @@ typedef union RM_GSP_SPDM_CMD *PRM_GSP_SPDM_CMD;
|
|||||||
* SPDM message structure.
|
* SPDM message structure.
|
||||||
*/
|
*/
|
||||||
typedef struct RM_GSP_SPDM_MSG {
|
typedef struct RM_GSP_SPDM_MSG {
|
||||||
NvU8 msgType;
|
NvU8 msgType;
|
||||||
|
|
||||||
// status returned from GSP message infrastructure.
|
// status returned from GSP message infrastructure.
|
||||||
NvU32 status;
|
NvU32 status;
|
||||||
|
|
||||||
NvU32 rsvd1;
|
NvU32 rsvd1;
|
||||||
|
|
||||||
NvU32 rsvd2;
|
NvU32 rsvd2;
|
||||||
|
|
||||||
NvU32 rsvd3;
|
NvU32 rsvd3;
|
||||||
|
|
||||||
NvU32 rsvd4;
|
NvU32 rsvd4;
|
||||||
|
|
||||||
NvBool rsvd5;
|
NvU32 rsvd5;
|
||||||
} RM_GSP_SPDM_MSG;
|
} RM_GSP_SPDM_MSG;
|
||||||
typedef struct RM_GSP_SPDM_MSG *PRM_GSP_SPDM_MSG;
|
typedef struct RM_GSP_SPDM_MSG *PRM_GSP_SPDM_MSG;
|
||||||
|
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -166,9 +166,9 @@ typedef struct NVC56F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS {
|
|||||||
* NVC56F_CTRL_CMD_GET_KMB_STAT_ADDR
|
* NVC56F_CTRL_CMD_GET_KMB_STAT_ADDR
|
||||||
*
|
*
|
||||||
* This struct defines the addresses to log encryption statistics
|
* This struct defines the addresses to log encryption statistics
|
||||||
* amountEncryptedAddr
|
* amountEncryptedAddr
|
||||||
* Amount of bytes encrypted
|
* Amount of bytes encrypted
|
||||||
* numberEncryptedAddr
|
* numberEncryptedAddr
|
||||||
* Number of times data was encrypted.
|
* Number of times data was encrypted.
|
||||||
*/
|
*/
|
||||||
typedef struct NVC56F_CTRL_CMD_GET_KMB_STAT_ADDR {
|
typedef struct NVC56F_CTRL_CMD_GET_KMB_STAT_ADDR {
|
||||||
@@ -180,7 +180,7 @@ typedef struct NVC56F_CTRL_CMD_GET_KMB_STAT_ADDR {
|
|||||||
* NVC56F_CTRL_CMD_GET_KMB
|
* NVC56F_CTRL_CMD_GET_KMB
|
||||||
*
|
*
|
||||||
* This command returns the Key Material Bundle (KMB) for the current channel.
|
* This command returns the Key Material Bundle (KMB) for the current channel.
|
||||||
*
|
*
|
||||||
* kmb [OUT] The KMB for the channel.
|
* kmb [OUT] The KMB for the channel.
|
||||||
* hMemory [IN] Memory handle to the encryption statistics buffer for the channel.
|
* hMemory [IN] Memory handle to the encryption statistics buffer for the channel.
|
||||||
*
|
*
|
||||||
@@ -239,10 +239,11 @@ typedef struct NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS {
|
|||||||
* NV_ERR_NOT_SUPPORTED
|
* NV_ERR_NOT_SUPPORTED
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The minimum and maximum values for attackerAdvantage.
|
* The minimum and maximum values for attackerAdvantage.
|
||||||
* The probability of an attacker successfully guessing the contents of an encrypted packet go up ("attacker advantage").
|
* The probability of an attacker successfully guessing the contents of an encrypted packet go up ("attacker advantage").
|
||||||
*/
|
*/
|
||||||
|
#define SECURITY_POLICY_ATTACKER_ADVANTAGE_DEFAULT (60)
|
||||||
#define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MIN (50)
|
#define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MIN (50)
|
||||||
#define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MAX (75)
|
#define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MAX (75)
|
||||||
|
|
||||||
@@ -259,7 +260,7 @@ typedef struct NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY_PARAMS {
|
|||||||
*
|
*
|
||||||
* This command get the CC security policy.
|
* This command get the CC security policy.
|
||||||
*
|
*
|
||||||
* attackerAdvantage [OUT]
|
* attackerAdvantage [OUT]
|
||||||
*
|
*
|
||||||
* Possible status values returned are:
|
* Possible status values returned are:
|
||||||
* NV_OK
|
* NV_OK
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ extern "C" {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -59,6 +59,12 @@ typedef struct
|
|||||||
NvU64 length;
|
NvU64 length;
|
||||||
NvU64 flags;
|
NvU64 flags;
|
||||||
NvU64 submittedWorkId; // Payload to poll for async completion
|
NvU64 submittedWorkId; // Payload to poll for async completion
|
||||||
|
|
||||||
|
NvBool bSecureCopy; // The copy encrypts/decrypts when copying to/from unprotected memory
|
||||||
|
NvBool bEncrypt; // Toggle encrypt/decrypt
|
||||||
|
NvU64 authTagAddr; // encryption authTag address. Same aperture as unencrypted operand assumed. 16 byte aligned
|
||||||
|
NvU64 encryptIvAddr; // IV value that was used for ecryption, requirements are the same as for authTagAddr. Required
|
||||||
|
|
||||||
} CEUTILS_MEMCOPY_PARAMS;
|
} CEUTILS_MEMCOPY_PARAMS;
|
||||||
|
|
||||||
struct KernelChannel;
|
struct KernelChannel;
|
||||||
@@ -90,8 +96,6 @@ struct CeUtils {
|
|||||||
struct Object *__nvoc_pbase_Object;
|
struct Object *__nvoc_pbase_Object;
|
||||||
struct CeUtils *__nvoc_pbase_CeUtils;
|
struct CeUtils *__nvoc_pbase_CeUtils;
|
||||||
NvHandle hClient;
|
NvHandle hClient;
|
||||||
NvHandle hDevice;
|
|
||||||
NvHandle hSubdevice;
|
|
||||||
OBJCHANNEL *pChannel;
|
OBJCHANNEL *pChannel;
|
||||||
struct OBJGPU *pGpu;
|
struct OBJGPU *pGpu;
|
||||||
struct KernelCE *pKCe;
|
struct KernelCE *pKCe;
|
||||||
|
@@ -7,7 +7,7 @@ extern "C" {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -39,6 +39,7 @@ extern "C" {
|
|||||||
#include "gpu/spdm/spdm.h"
|
#include "gpu/spdm/spdm.h"
|
||||||
#include "ctrl/ctrl2080/ctrl2080spdm.h"
|
#include "ctrl/ctrl2080/ctrl2080spdm.h"
|
||||||
#include "ctrl/ctrl2080/ctrl2080internal.h"
|
#include "ctrl/ctrl2080/ctrl2080internal.h"
|
||||||
|
#include "ctrl/ctrlc56f.h"
|
||||||
#include "cc_drv.h"
|
#include "cc_drv.h"
|
||||||
#include "conf_compute/cc_keystore.h"
|
#include "conf_compute/cc_keystore.h"
|
||||||
#include "kernel/gpu/fifo/kernel_channel.h"
|
#include "kernel/gpu/fifo/kernel_channel.h"
|
||||||
@@ -154,12 +155,16 @@ struct ConfidentialCompute {
|
|||||||
NvU32 keyRotationEnableMask;
|
NvU32 keyRotationEnableMask;
|
||||||
KEY_ROTATION_STATS_INFO lowerThreshold;
|
KEY_ROTATION_STATS_INFO lowerThreshold;
|
||||||
KEY_ROTATION_STATS_INFO upperThreshold;
|
KEY_ROTATION_STATS_INFO upperThreshold;
|
||||||
|
NvU64 attackerAdvantage;
|
||||||
NvU8 PRIVATE_FIELD(m_exportMasterKey)[32];
|
NvU8 PRIVATE_FIELD(m_exportMasterKey)[32];
|
||||||
void *PRIVATE_FIELD(m_keySlot);
|
void *PRIVATE_FIELD(m_keySlot);
|
||||||
KEY_ROTATION_STATUS PRIVATE_FIELD(keyRotationState)[62];
|
KEY_ROTATION_STATUS PRIVATE_FIELD(keyRotationState)[62];
|
||||||
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(aggregateStats)[62];
|
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(aggregateStats)[62];
|
||||||
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(freedChannelAggregateStats)[62];
|
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(freedChannelAggregateStats)[62];
|
||||||
PTMR_EVENT PRIVATE_FIELD(ppKeyRotationTimer)[62];
|
PTMR_EVENT PRIVATE_FIELD(ppKeyRotationTimer)[62];
|
||||||
|
NvU64 PRIVATE_FIELD(keyRotationLimitDelta);
|
||||||
|
NvU64 PRIVATE_FIELD(keyRotationUpperLimit);
|
||||||
|
NvU64 PRIVATE_FIELD(keyRotationLowerLimit);
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef __NVOC_CLASS_ConfidentialCompute_TYPEDEF__
|
#ifndef __NVOC_CLASS_ConfidentialCompute_TYPEDEF__
|
||||||
@@ -711,6 +716,39 @@ static inline NV_STATUS confComputeUpdateFreedChannelStats(struct OBJGPU *pGpu,
|
|||||||
#define confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel) confComputeUpdateFreedChannelStats_IMPL(pGpu, pConfCompute, pKernelChannel)
|
#define confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel) confComputeUpdateFreedChannelStats_IMPL(pGpu, pConfCompute, pKernelChannel)
|
||||||
#endif //__nvoc_conf_compute_h_disabled
|
#endif //__nvoc_conf_compute_h_disabled
|
||||||
|
|
||||||
|
NV_STATUS confComputeSetKeyRotationThreshold_IMPL(struct ConfidentialCompute *pConfCompute, NvU64 attackerAdvantage);
|
||||||
|
|
||||||
|
#ifdef __nvoc_conf_compute_h_disabled
|
||||||
|
static inline NV_STATUS confComputeSetKeyRotationThreshold(struct ConfidentialCompute *pConfCompute, NvU64 attackerAdvantage) {
|
||||||
|
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
|
||||||
|
return NV_ERR_NOT_SUPPORTED;
|
||||||
|
}
|
||||||
|
#else //__nvoc_conf_compute_h_disabled
|
||||||
|
#define confComputeSetKeyRotationThreshold(pConfCompute, attackerAdvantage) confComputeSetKeyRotationThreshold_IMPL(pConfCompute, attackerAdvantage)
|
||||||
|
#endif //__nvoc_conf_compute_h_disabled
|
||||||
|
|
||||||
|
NvBool confComputeIsUpperThresholdCrossed_IMPL(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||||
|
|
||||||
|
#ifdef __nvoc_conf_compute_h_disabled
|
||||||
|
static inline NvBool confComputeIsUpperThresholdCrossed(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo) {
|
||||||
|
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
|
||||||
|
return NV_FALSE;
|
||||||
|
}
|
||||||
|
#else //__nvoc_conf_compute_h_disabled
|
||||||
|
#define confComputeIsUpperThresholdCrossed(pConfCompute, pStatsInfo) confComputeIsUpperThresholdCrossed_IMPL(pConfCompute, pStatsInfo)
|
||||||
|
#endif //__nvoc_conf_compute_h_disabled
|
||||||
|
|
||||||
|
NvBool confComputeIsLowerThresholdCrossed_IMPL(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||||
|
|
||||||
|
#ifdef __nvoc_conf_compute_h_disabled
|
||||||
|
static inline NvBool confComputeIsLowerThresholdCrossed(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo) {
|
||||||
|
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
|
||||||
|
return NV_FALSE;
|
||||||
|
}
|
||||||
|
#else //__nvoc_conf_compute_h_disabled
|
||||||
|
#define confComputeIsLowerThresholdCrossed(pConfCompute, pStatsInfo) confComputeIsLowerThresholdCrossed_IMPL(pConfCompute, pStatsInfo)
|
||||||
|
#endif //__nvoc_conf_compute_h_disabled
|
||||||
|
|
||||||
#undef PRIVATE_FIELD
|
#undef PRIVATE_FIELD
|
||||||
|
|
||||||
#ifndef NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
|
#ifndef NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
|
||||||
@@ -750,6 +788,16 @@ NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey)(struct Confidentia
|
|||||||
#undef confComputeKeyStoreUpdateKey_HAL
|
#undef confComputeKeyStoreUpdateKey_HAL
|
||||||
NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey_HAL)(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
|
NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey_HAL)(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
|
||||||
|
|
||||||
|
#ifndef __nvoc_conf_compute_h_disabled
|
||||||
|
#undef confComputeIsUpperThresholdCrossed
|
||||||
|
NvBool NVOC_PRIVATE_FUNCTION(confComputeIsUpperThresholdCrossed)(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||||
|
#endif //__nvoc_conf_compute_h_disabled
|
||||||
|
|
||||||
|
#ifndef __nvoc_conf_compute_h_disabled
|
||||||
|
#undef confComputeIsLowerThresholdCrossed
|
||||||
|
NvBool NVOC_PRIVATE_FUNCTION(confComputeIsLowerThresholdCrossed)(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
|
||||||
|
#endif //__nvoc_conf_compute_h_disabled
|
||||||
|
|
||||||
#endif // NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
|
#endif // NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
|
||||||
|
|
||||||
|
|
||||||
|
@@ -303,6 +303,7 @@ struct OBJGPUMGR {
|
|||||||
GPU_HANDLE_ID gpuHandleIDList[32];
|
GPU_HANDLE_ID gpuHandleIDList[32];
|
||||||
NvU32 numGpuHandles;
|
NvU32 numGpuHandles;
|
||||||
CONF_COMPUTE_CAPS ccCaps;
|
CONF_COMPUTE_CAPS ccCaps;
|
||||||
|
NvU64 ccAttackerAdvantage;
|
||||||
pcieP2PCapsInfoList pcieP2PCapsInfoCache;
|
pcieP2PCapsInfoList pcieP2PCapsInfoCache;
|
||||||
void *pcieP2PCapsInfoLock;
|
void *pcieP2PCapsInfoLock;
|
||||||
};
|
};
|
||||||
|
@@ -83,14 +83,22 @@ static NvBool __nvoc_thunk_KernelCE_engstateIsPresent(OBJGPU *pGpu, struct OBJEN
|
|||||||
return kceIsPresent(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
return kceIsPresent(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
|
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateInitLocked(OBJGPU *arg0, struct OBJENGSTATE *arg1) {
|
||||||
return kceStateLoad(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg2);
|
return kceStateInitLocked(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKCe, NvU32 flags) {
|
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKCe, NvU32 flags) {
|
||||||
return kceStateUnload(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), flags);
|
return kceStateUnload(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static NV_STATUS __nvoc_thunk_KernelCE_engstateStateLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
|
||||||
|
return kceStateLoad(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __nvoc_thunk_KernelCE_engstateStateDestroy(OBJGPU *arg0, struct OBJENGSTATE *arg1) {
|
||||||
|
kceStateDestroy(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
||||||
|
}
|
||||||
|
|
||||||
static void __nvoc_thunk_KernelCE_intrservRegisterIntrService(OBJGPU *arg0, struct IntrService *arg1, IntrServiceRecord arg2[171]) {
|
static void __nvoc_thunk_KernelCE_intrservRegisterIntrService(OBJGPU *arg0, struct IntrService *arg1, IntrServiceRecord arg2[171]) {
|
||||||
kceRegisterIntrService(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2);
|
kceRegisterIntrService(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2);
|
||||||
}
|
}
|
||||||
@@ -99,10 +107,6 @@ static NV_STATUS __nvoc_thunk_KernelCE_intrservServiceNotificationInterrupt(OBJG
|
|||||||
return kceServiceNotificationInterrupt(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2);
|
return kceServiceNotificationInterrupt(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStateInitLocked(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
|
||||||
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreLoad(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreLoad(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||||
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
||||||
}
|
}
|
||||||
@@ -111,10 +115,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePostUnload(POBJGPU pGpu, struc
|
|||||||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __nvoc_thunk_OBJENGSTATE_kceStateDestroy(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
|
||||||
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreUnload(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreUnload(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0);
|
||||||
}
|
}
|
||||||
@@ -172,6 +172,17 @@ void __nvoc_init_dataField_KernelCE(KernelCE *pThis, RmHalspecOwner *pRmhalspeco
|
|||||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||||
|
|
||||||
|
// Hal field -- bCcFipsSelfTestRequired
|
||||||
|
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
|
||||||
|
{
|
||||||
|
pThis->bCcFipsSelfTestRequired = ((NvBool)(0 == 0));
|
||||||
|
}
|
||||||
|
// default
|
||||||
|
else
|
||||||
|
{
|
||||||
|
pThis->bCcFipsSelfTestRequired = ((NvBool)(0 != 0));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
|
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
|
||||||
@@ -210,13 +221,17 @@ static void __nvoc_init_funcTable_KernelCE_1(KernelCE *pThis, RmHalspecOwner *pR
|
|||||||
// Hal function -- kceIsPresent
|
// Hal function -- kceIsPresent
|
||||||
pThis->__kceIsPresent__ = &kceIsPresent_IMPL;
|
pThis->__kceIsPresent__ = &kceIsPresent_IMPL;
|
||||||
|
|
||||||
// Hal function -- kceStateLoad
|
pThis->__kceStateInitLocked__ = &kceStateInitLocked_IMPL;
|
||||||
pThis->__kceStateLoad__ = &kceStateLoad_GP100;
|
|
||||||
|
|
||||||
// Hal function -- kceStateUnload
|
// Hal function -- kceStateUnload
|
||||||
// default
|
// default
|
||||||
pThis->__kceStateUnload__ = &kceStateUnload_56cd7a;
|
pThis->__kceStateUnload__ = &kceStateUnload_56cd7a;
|
||||||
|
|
||||||
|
// Hal function -- kceStateLoad
|
||||||
|
pThis->__kceStateLoad__ = &kceStateLoad_GP100;
|
||||||
|
|
||||||
|
pThis->__kceStateDestroy__ = &kceStateDestroy_IMPL;
|
||||||
|
|
||||||
pThis->__kceRegisterIntrService__ = &kceRegisterIntrService_IMPL;
|
pThis->__kceRegisterIntrService__ = &kceRegisterIntrService_IMPL;
|
||||||
|
|
||||||
pThis->__kceServiceNotificationInterrupt__ = &kceServiceNotificationInterrupt_IMPL;
|
pThis->__kceServiceNotificationInterrupt__ = &kceServiceNotificationInterrupt_IMPL;
|
||||||
@@ -436,22 +451,22 @@ static void __nvoc_init_funcTable_KernelCE_1(KernelCE *pThis, RmHalspecOwner *pR
|
|||||||
|
|
||||||
pThis->__nvoc_base_OBJENGSTATE.__engstateIsPresent__ = &__nvoc_thunk_KernelCE_engstateIsPresent;
|
pThis->__nvoc_base_OBJENGSTATE.__engstateIsPresent__ = &__nvoc_thunk_KernelCE_engstateIsPresent;
|
||||||
|
|
||||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelCE_engstateStateLoad;
|
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelCE_engstateStateInitLocked;
|
||||||
|
|
||||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelCE_engstateStateUnload;
|
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelCE_engstateStateUnload;
|
||||||
|
|
||||||
|
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelCE_engstateStateLoad;
|
||||||
|
|
||||||
|
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelCE_engstateStateDestroy;
|
||||||
|
|
||||||
pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelCE_intrservRegisterIntrService;
|
pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelCE_intrservRegisterIntrService;
|
||||||
|
|
||||||
pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_KernelCE_intrservServiceNotificationInterrupt;
|
pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_KernelCE_intrservServiceNotificationInterrupt;
|
||||||
|
|
||||||
pThis->__kceStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kceStateInitLocked;
|
|
||||||
|
|
||||||
pThis->__kceStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreLoad;
|
pThis->__kceStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreLoad;
|
||||||
|
|
||||||
pThis->__kceStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePostUnload;
|
pThis->__kceStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePostUnload;
|
||||||
|
|
||||||
pThis->__kceStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kceStateDestroy;
|
|
||||||
|
|
||||||
pThis->__kceStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreUnload;
|
pThis->__kceStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreUnload;
|
||||||
|
|
||||||
pThis->__kceStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kceStateInitUnlocked;
|
pThis->__kceStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kceStateInitUnlocked;
|
||||||
|
@@ -115,8 +115,10 @@ struct KernelCE {
|
|||||||
struct KernelCE *__nvoc_pbase_KernelCE;
|
struct KernelCE *__nvoc_pbase_KernelCE;
|
||||||
NV_STATUS (*__kceConstructEngine__)(OBJGPU *, struct KernelCE *, ENGDESCRIPTOR);
|
NV_STATUS (*__kceConstructEngine__)(OBJGPU *, struct KernelCE *, ENGDESCRIPTOR);
|
||||||
NvBool (*__kceIsPresent__)(OBJGPU *, struct KernelCE *);
|
NvBool (*__kceIsPresent__)(OBJGPU *, struct KernelCE *);
|
||||||
NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE *, NvU32);
|
NV_STATUS (*__kceStateInitLocked__)(OBJGPU *, struct KernelCE *);
|
||||||
NV_STATUS (*__kceStateUnload__)(OBJGPU *, struct KernelCE *, NvU32);
|
NV_STATUS (*__kceStateUnload__)(OBJGPU *, struct KernelCE *, NvU32);
|
||||||
|
NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE *, NvU32);
|
||||||
|
void (*__kceStateDestroy__)(OBJGPU *, struct KernelCE *);
|
||||||
void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *);
|
void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *);
|
||||||
NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *);
|
NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *);
|
||||||
NV_STATUS (*__kceGetP2PCes__)(struct KernelCE *, OBJGPU *, NvU32, NvU32 *);
|
NV_STATUS (*__kceGetP2PCes__)(struct KernelCE *, OBJGPU *, NvU32, NvU32 *);
|
||||||
@@ -136,10 +138,8 @@ struct KernelCE {
|
|||||||
NvU32 (*__kceGetGrceSupportedLceMask__)(OBJGPU *, struct KernelCE *);
|
NvU32 (*__kceGetGrceSupportedLceMask__)(OBJGPU *, struct KernelCE *);
|
||||||
NvBool (*__kceIsGenXorHigherSupported__)(OBJGPU *, struct KernelCE *, NvU32);
|
NvBool (*__kceIsGenXorHigherSupported__)(OBJGPU *, struct KernelCE *, NvU32);
|
||||||
void (*__kceApplyGen4orHigherMapping__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32, NvU32);
|
void (*__kceApplyGen4orHigherMapping__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32, NvU32);
|
||||||
NV_STATUS (*__kceStateInitLocked__)(POBJGPU, struct KernelCE *);
|
|
||||||
NV_STATUS (*__kceStatePreLoad__)(POBJGPU, struct KernelCE *, NvU32);
|
NV_STATUS (*__kceStatePreLoad__)(POBJGPU, struct KernelCE *, NvU32);
|
||||||
NV_STATUS (*__kceStatePostUnload__)(POBJGPU, struct KernelCE *, NvU32);
|
NV_STATUS (*__kceStatePostUnload__)(POBJGPU, struct KernelCE *, NvU32);
|
||||||
void (*__kceStateDestroy__)(POBJGPU, struct KernelCE *);
|
|
||||||
NV_STATUS (*__kceStatePreUnload__)(POBJGPU, struct KernelCE *, NvU32);
|
NV_STATUS (*__kceStatePreUnload__)(POBJGPU, struct KernelCE *, NvU32);
|
||||||
NV_STATUS (*__kceStateInitUnlocked__)(POBJGPU, struct KernelCE *);
|
NV_STATUS (*__kceStateInitUnlocked__)(POBJGPU, struct KernelCE *);
|
||||||
void (*__kceInitMissing__)(POBJGPU, struct KernelCE *);
|
void (*__kceInitMissing__)(POBJGPU, struct KernelCE *);
|
||||||
@@ -156,6 +156,7 @@ struct KernelCE {
|
|||||||
NvBool bIsAutoConfigEnabled;
|
NvBool bIsAutoConfigEnabled;
|
||||||
NvBool bUseGen4Mapping;
|
NvBool bUseGen4Mapping;
|
||||||
struct IoAperture aperture;
|
struct IoAperture aperture;
|
||||||
|
NvBool bCcFipsSelfTestRequired;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef __NVOC_CLASS_KernelCE_TYPEDEF__
|
#ifndef __NVOC_CLASS_KernelCE_TYPEDEF__
|
||||||
@@ -191,10 +192,12 @@ NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
|
|||||||
#define kceConstructEngine(pGpu, pKCe, arg0) kceConstructEngine_DISPATCH(pGpu, pKCe, arg0)
|
#define kceConstructEngine(pGpu, pKCe, arg0) kceConstructEngine_DISPATCH(pGpu, pKCe, arg0)
|
||||||
#define kceIsPresent(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
|
#define kceIsPresent(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
|
||||||
#define kceIsPresent_HAL(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
|
#define kceIsPresent_HAL(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe)
|
||||||
#define kceStateLoad(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
#define kceStateInitLocked(arg0, arg1) kceStateInitLocked_DISPATCH(arg0, arg1)
|
||||||
#define kceStateLoad_HAL(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
|
||||||
#define kceStateUnload(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
|
#define kceStateUnload(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
|
||||||
#define kceStateUnload_HAL(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
|
#define kceStateUnload_HAL(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags)
|
||||||
|
#define kceStateLoad(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
||||||
|
#define kceStateLoad_HAL(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2)
|
||||||
|
#define kceStateDestroy(arg0, arg1) kceStateDestroy_DISPATCH(arg0, arg1)
|
||||||
#define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2)
|
#define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2)
|
||||||
#define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
|
#define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
|
||||||
#define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
|
#define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
|
||||||
@@ -231,10 +234,8 @@ NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
|
|||||||
#define kceIsGenXorHigherSupported_HAL(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
|
#define kceIsGenXorHigherSupported_HAL(pGpu, pCe, checkGen) kceIsGenXorHigherSupported_DISPATCH(pGpu, pCe, checkGen)
|
||||||
#define kceApplyGen4orHigherMapping(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
|
#define kceApplyGen4orHigherMapping(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
|
||||||
#define kceApplyGen4orHigherMapping_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
|
#define kceApplyGen4orHigherMapping_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3)
|
||||||
#define kceStateInitLocked(pGpu, pEngstate) kceStateInitLocked_DISPATCH(pGpu, pEngstate)
|
|
||||||
#define kceStatePreLoad(pGpu, pEngstate, arg0) kceStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
#define kceStatePreLoad(pGpu, pEngstate, arg0) kceStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||||
#define kceStatePostUnload(pGpu, pEngstate, arg0) kceStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
#define kceStatePostUnload(pGpu, pEngstate, arg0) kceStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||||
#define kceStateDestroy(pGpu, pEngstate) kceStateDestroy_DISPATCH(pGpu, pEngstate)
|
|
||||||
#define kceStatePreUnload(pGpu, pEngstate, arg0) kceStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
#define kceStatePreUnload(pGpu, pEngstate, arg0) kceStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||||
#define kceStateInitUnlocked(pGpu, pEngstate) kceStateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
#define kceStateInitUnlocked(pGpu, pEngstate) kceStateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||||
#define kceInitMissing(pGpu, pEngstate) kceInitMissing_DISPATCH(pGpu, pEngstate)
|
#define kceInitMissing(pGpu, pEngstate) kceInitMissing_DISPATCH(pGpu, pEngstate)
|
||||||
@@ -366,10 +367,10 @@ static inline NvBool kceIsPresent_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe)
|
|||||||
return pKCe->__kceIsPresent__(pGpu, pKCe);
|
return pKCe->__kceIsPresent__(pGpu, pKCe);
|
||||||
}
|
}
|
||||||
|
|
||||||
NV_STATUS kceStateLoad_GP100(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2);
|
NV_STATUS kceStateInitLocked_IMPL(OBJGPU *arg0, struct KernelCE *arg1);
|
||||||
|
|
||||||
static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) {
|
static inline NV_STATUS kceStateInitLocked_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1) {
|
||||||
return arg1->__kceStateLoad__(arg0, arg1, arg2);
|
return arg1->__kceStateInitLocked__(arg0, arg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline NV_STATUS kceStateUnload_56cd7a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
|
static inline NV_STATUS kceStateUnload_56cd7a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) {
|
||||||
@@ -380,6 +381,18 @@ static inline NV_STATUS kceStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelCE *p
|
|||||||
return pKCe->__kceStateUnload__(pGpu, pKCe, flags);
|
return pKCe->__kceStateUnload__(pGpu, pKCe, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NV_STATUS kceStateLoad_GP100(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2);
|
||||||
|
|
||||||
|
static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) {
|
||||||
|
return arg1->__kceStateLoad__(arg0, arg1, arg2);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kceStateDestroy_IMPL(OBJGPU *arg0, struct KernelCE *arg1);
|
||||||
|
|
||||||
|
static inline void kceStateDestroy_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1) {
|
||||||
|
arg1->__kceStateDestroy__(arg0, arg1);
|
||||||
|
}
|
||||||
|
|
||||||
void kceRegisterIntrService_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]);
|
void kceRegisterIntrService_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]);
|
||||||
|
|
||||||
static inline void kceRegisterIntrService_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]) {
|
static inline void kceRegisterIntrService_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[171]) {
|
||||||
@@ -574,10 +587,6 @@ static inline void kceApplyGen4orHigherMapping_DISPATCH(OBJGPU *pGpu, struct Ker
|
|||||||
pCe->__kceApplyGen4orHigherMapping__(pGpu, pCe, arg0, arg1, arg2, arg3);
|
pCe->__kceApplyGen4orHigherMapping__(pGpu, pCe, arg0, arg1, arg2, arg3);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline NV_STATUS kceStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
|
||||||
return pEngstate->__kceStateInitLocked__(pGpu, pEngstate);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline NV_STATUS kceStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
static inline NV_STATUS kceStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||||
return pEngstate->__kceStatePreLoad__(pGpu, pEngstate, arg0);
|
return pEngstate->__kceStatePreLoad__(pGpu, pEngstate, arg0);
|
||||||
}
|
}
|
||||||
@@ -586,10 +595,6 @@ static inline NV_STATUS kceStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelC
|
|||||||
return pEngstate->__kceStatePostUnload__(pGpu, pEngstate, arg0);
|
return pEngstate->__kceStatePostUnload__(pGpu, pEngstate, arg0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kceStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) {
|
|
||||||
pEngstate->__kceStateDestroy__(pGpu, pEngstate);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline NV_STATUS kceStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
static inline NV_STATUS kceStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) {
|
||||||
return pEngstate->__kceStatePreUnload__(pGpu, pEngstate, arg0);
|
return pEngstate->__kceStatePreUnload__(pGpu, pEngstate, arg0);
|
||||||
}
|
}
|
||||||
|
@@ -7,7 +7,7 @@ extern "C" {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
@@ -337,6 +337,9 @@ typedef struct OBJCHANNEL
|
|||||||
// Used by Partition Scrubber
|
// Used by Partition Scrubber
|
||||||
KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
|
KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
|
||||||
NvHandle hPartitionRef;
|
NvHandle hPartitionRef;
|
||||||
|
|
||||||
|
NvBool bSecure;
|
||||||
|
|
||||||
} OBJCHANNEL, *POBJCHANNEL;
|
} OBJCHANNEL, *POBJCHANNEL;
|
||||||
|
|
||||||
#define NV_METHOD(SubCh, Method, Num) \
|
#define NV_METHOD(SubCh, Method, Num) \
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -641,6 +641,7 @@ struct Subdevice {
|
|||||||
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRotateKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS *);
|
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRotateKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS *);
|
||||||
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS *);
|
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS *);
|
||||||
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeSetGpuState__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *);
|
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeSetGpuState__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *);
|
||||||
|
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS *);
|
||||||
NV_STATUS (*__subdeviceCtrlCmdInternalInitUserSharedData__)(struct Subdevice *, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *);
|
NV_STATUS (*__subdeviceCtrlCmdInternalInitUserSharedData__)(struct Subdevice *, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *);
|
||||||
NV_STATUS (*__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__)(struct Subdevice *, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *);
|
NV_STATUS (*__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__)(struct Subdevice *, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *);
|
||||||
NV_STATUS (*__subdeviceCtrlCmdInternalGspStartTrace__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_GSP_START_TRACE_INFO_PARAMS *);
|
NV_STATUS (*__subdeviceCtrlCmdInternalGspStartTrace__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_GSP_START_TRACE_INFO_PARAMS *);
|
||||||
@@ -1296,6 +1297,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
|
|||||||
#define subdeviceCtrlCmdInternalConfComputeRotateKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRotateKeys_DISPATCH(pSubdevice, pParams)
|
#define subdeviceCtrlCmdInternalConfComputeRotateKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRotateKeys_DISPATCH(pSubdevice, pParams)
|
||||||
#define subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation_DISPATCH(pSubdevice, pParams)
|
#define subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation_DISPATCH(pSubdevice, pParams)
|
||||||
#define subdeviceCtrlCmdInternalConfComputeSetGpuState(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(pSubdevice, pParams)
|
#define subdeviceCtrlCmdInternalConfComputeSetGpuState(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(pSubdevice, pParams)
|
||||||
|
#define subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy_DISPATCH(pSubdevice, pParams)
|
||||||
#define subdeviceCtrlCmdInternalInitUserSharedData(pSubdevice, pParams) subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(pSubdevice, pParams)
|
#define subdeviceCtrlCmdInternalInitUserSharedData(pSubdevice, pParams) subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(pSubdevice, pParams)
|
||||||
#define subdeviceCtrlCmdInternalUserSharedDataSetDataPoll(pSubdevice, pParams) subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_DISPATCH(pSubdevice, pParams)
|
#define subdeviceCtrlCmdInternalUserSharedDataSetDataPoll(pSubdevice, pParams) subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_DISPATCH(pSubdevice, pParams)
|
||||||
#define subdeviceCtrlCmdInternalGspStartTrace(pSubdevice, pParams) subdeviceCtrlCmdInternalGspStartTrace_DISPATCH(pSubdevice, pParams)
|
#define subdeviceCtrlCmdInternalGspStartTrace(pSubdevice, pParams) subdeviceCtrlCmdInternalGspStartTrace_DISPATCH(pSubdevice, pParams)
|
||||||
@@ -4587,6 +4589,12 @@ static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(
|
|||||||
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeSetGpuState__(pSubdevice, pParams);
|
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeSetGpuState__(pSubdevice, pParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NV_STATUS subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS *pParams);
|
||||||
|
|
||||||
|
static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS *pParams) {
|
||||||
|
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeSetSecurityPolicy__(pSubdevice, pParams);
|
||||||
|
}
|
||||||
|
|
||||||
NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams);
|
NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams);
|
||||||
|
|
||||||
static inline NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams) {
|
static inline NV_STATUS subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *pParams) {
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
|
|
||||||
#define CE_MAX_BYTES_PER_LINE 0xffffffffULL
|
#define CE_MAX_BYTES_PER_LINE 0xffffffffULL
|
||||||
#define CE_METHOD_SIZE_PER_BLOCK 0x64
|
#define CE_METHOD_SIZE_PER_BLOCK 0x64
|
||||||
#define FAST_SCRUBBER_METHOD_SIZE_PER_BLOCK 0x78
|
#define FAST_SCRUBBER_METHOD_SIZE_PER_BLOCK 0x94
|
||||||
|
|
||||||
// number of bytes per sec2 method-stream (including host methods)
|
// number of bytes per sec2 method-stream (including host methods)
|
||||||
#define SEC2_METHOD_SIZE_PER_BLOCK 0x94
|
#define SEC2_METHOD_SIZE_PER_BLOCK 0x94
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -130,9 +130,16 @@ typedef struct
|
|||||||
NV_ADDRESS_SPACE srcAddressSpace;
|
NV_ADDRESS_SPACE srcAddressSpace;
|
||||||
NvU32 dstCpuCacheAttrib;
|
NvU32 dstCpuCacheAttrib;
|
||||||
NvU32 srcCpuCacheAttrib;
|
NvU32 srcCpuCacheAttrib;
|
||||||
|
|
||||||
|
NvBool bSecureCopy; // The copy encrypts/decrypts protected memory
|
||||||
|
NvBool bEncrypt; // encrypt/decrypt
|
||||||
|
NvU64 authTagAddr;
|
||||||
|
NvU64 encryptIvAddr;
|
||||||
|
|
||||||
} CHANNEL_PB_INFO;
|
} CHANNEL_PB_INFO;
|
||||||
|
|
||||||
NV_STATUS channelSetupIDs(OBJCHANNEL *pChannel, OBJGPU *pGpu, NvBool bUseVasForCeCopy, NvBool bMIGInUse);
|
NV_STATUS channelSetupIDs(OBJCHANNEL *pChannel, OBJGPU *pGpu, NvBool bUseVasForCeCopy, NvBool bMIGInUse);
|
||||||
|
NV_STATUS channelAllocSubdevice(OBJGPU *pGpu, OBJCHANNEL *pChannel);
|
||||||
void channelSetupChannelBufferSizes(OBJCHANNEL *pChannel);
|
void channelSetupChannelBufferSizes(OBJCHANNEL *pChannel);
|
||||||
NvU32 channelReadChannelMemdesc(OBJCHANNEL *pChannel, NvU32 offset);
|
NvU32 channelReadChannelMemdesc(OBJCHANNEL *pChannel, NvU32 offset);
|
||||||
|
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -25,6 +25,7 @@
|
|||||||
#include "gpu/ce/kernel_ce.h"
|
#include "gpu/ce/kernel_ce.h"
|
||||||
#include "gpu/ce/kernel_ce_private.h"
|
#include "gpu/ce/kernel_ce_private.h"
|
||||||
#include "gpu/eng_desc.h"
|
#include "gpu/eng_desc.h"
|
||||||
|
#include "gpu/mem_mgr/ce_utils.h"
|
||||||
#include "gpu_mgr/gpu_mgr.h"
|
#include "gpu_mgr/gpu_mgr.h"
|
||||||
#include "kernel/gpu/intr/intr_service.h"
|
#include "kernel/gpu/intr/intr_service.h"
|
||||||
#include "kernel/gpu/nvlink/kernel_nvlink.h"
|
#include "kernel/gpu/nvlink/kernel_nvlink.h"
|
||||||
@@ -32,6 +33,8 @@
|
|||||||
#include "vgpu/sdk-structures.h"
|
#include "vgpu/sdk-structures.h"
|
||||||
#include "nvRmReg.h"
|
#include "nvRmReg.h"
|
||||||
|
|
||||||
|
#include "gpu/conf_compute/ccsl.h"
|
||||||
|
|
||||||
NV_STATUS kceConstructEngine_IMPL(OBJGPU *pGpu, KernelCE *pKCe, ENGDESCRIPTOR engDesc)
|
NV_STATUS kceConstructEngine_IMPL(OBJGPU *pGpu, KernelCE *pKCe, ENGDESCRIPTOR engDesc)
|
||||||
{
|
{
|
||||||
NV_ASSERT_OR_RETURN(!RMCFG_FEATURE_PLATFORM_GSP, NV_ERR_NOT_SUPPORTED);
|
NV_ASSERT_OR_RETURN(!RMCFG_FEATURE_PLATFORM_GSP, NV_ERR_NOT_SUPPORTED);
|
||||||
@@ -90,6 +93,195 @@ NvBool kceIsNewMissingEngineRemovalSequenceEnabled_IMPL(OBJGPU *pGpu, KernelCE *
|
|||||||
return NV_TRUE;
|
return NV_TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define CE_FIPS_SELF_TEST_DATA_SIZE 16
|
||||||
|
#define CE_FIPS_SELF_TEST_AUTH_TAG_SIZE 16
|
||||||
|
#define CE_FIPS_SELF_TEST_IV_SIZE 12
|
||||||
|
|
||||||
|
NV_STATUS
|
||||||
|
kceRunFipsSelfTest
|
||||||
|
(
|
||||||
|
OBJGPU *pGpu,
|
||||||
|
void *pArg
|
||||||
|
)
|
||||||
|
{
|
||||||
|
KernelCE *pKCe = pArg;
|
||||||
|
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
|
||||||
|
KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
|
||||||
|
MEMORY_DESCRIPTOR *pSrcMemDesc = NULL;
|
||||||
|
MEMORY_DESCRIPTOR *pDstMemDesc = NULL;
|
||||||
|
MEMORY_DESCRIPTOR *pAuthMemDesc = NULL;
|
||||||
|
MEMORY_DESCRIPTOR *pIvMemDesc = NULL;
|
||||||
|
CeUtils *pCeUtils = NULL;
|
||||||
|
pCcslContext pCcslCtx = NULL;
|
||||||
|
NV_STATUS status;
|
||||||
|
NV0050_ALLOCATION_PARAMETERS ceUtilsParams = {0};
|
||||||
|
CEUTILS_MEMCOPY_PARAMS params = {0};
|
||||||
|
|
||||||
|
NvU8 ceTestPlaintext[CE_FIPS_SELF_TEST_DATA_SIZE] = {
|
||||||
|
0x2d, 0x71, 0xbc, 0xfa, 0x91, 0x4e, 0x4a, 0xc0,
|
||||||
|
0x45, 0xb2, 0xaa, 0x60, 0x95, 0x5f, 0xad, 0x24
|
||||||
|
};
|
||||||
|
NvU8 decryptedData[CE_FIPS_SELF_TEST_DATA_SIZE] = { 0 };
|
||||||
|
NvU8 encryptedData[CE_FIPS_SELF_TEST_DATA_SIZE] = { 0 };
|
||||||
|
NvU8 dataAuth[CE_FIPS_SELF_TEST_AUTH_TAG_SIZE] = { 0 };
|
||||||
|
|
||||||
|
NV_ASSERT_OR_RETURN(gpuIsCCFeatureEnabled(pGpu), NV_ERR_NOT_SUPPORTED);
|
||||||
|
|
||||||
|
if (!gpuCheckEngineTable(pGpu, RM_ENGINE_TYPE_COPY(pKCe->publicID)) ||
|
||||||
|
ceIsCeGrce(pGpu, RM_ENGINE_TYPE_COPY(pKCe->publicID)))
|
||||||
|
{
|
||||||
|
// CE doesn't support encryption
|
||||||
|
return NV_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (kmigmgrIsMIGSupported(pGpu, pKernelMIGManager) &&
|
||||||
|
kmigmgrGetStaticInfo(pGpu, pKernelMIGManager) == NULL)
|
||||||
|
{
|
||||||
|
// Wait for KernelMigManager, as it might remap CEs
|
||||||
|
return NV_WARN_MORE_PROCESSING_REQUIRED;
|
||||||
|
}
|
||||||
|
|
||||||
|
NV_PRINTF(LEVEL_INFO, "Running FIPS test for CE%u\n", pKCe->publicID);
|
||||||
|
|
||||||
|
ceUtilsParams.flags |= DRF_DEF(0050_CEUTILS, _FLAGS, _FORCE_CE_ID, _TRUE);
|
||||||
|
ceUtilsParams.flags |= DRF_DEF(0050_CEUTILS, _FLAGS, _CC_SECURE, _TRUE);
|
||||||
|
ceUtilsParams.forceCeId = pKCe->publicID;
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
objCreate(&pCeUtils, pMemoryManager, CeUtils, ENG_GET_GPU(pMemoryManager), NULL, &ceUtilsParams), failed);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
ccslContextInitViaChannel_HAL(&pCcslCtx, pCeUtils->pChannel->hClient,
|
||||||
|
pCeUtils->pChannel->channelId),
|
||||||
|
failed);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescCreate(&pSrcMemDesc, pGpu, sizeof ceTestPlaintext, 0, NV_TRUE, ADDR_FBMEM,
|
||||||
|
NV_MEMORY_UNCACHED, MEMDESC_ALLOC_FLAGS_PROTECTED), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(pSrcMemDesc), failed);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescCreate(&pDstMemDesc, pGpu, sizeof encryptedData, 0, NV_TRUE, ADDR_SYSMEM,
|
||||||
|
NV_MEMORY_UNCACHED, MEMDESC_FLAGS_ALLOC_IN_UNPROTECTED_MEMORY), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(pDstMemDesc), failed);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescCreate(&pAuthMemDesc, pGpu, sizeof dataAuth, 0, NV_TRUE, ADDR_SYSMEM,
|
||||||
|
NV_MEMORY_UNCACHED, MEMDESC_FLAGS_ALLOC_IN_UNPROTECTED_MEMORY), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(pAuthMemDesc), failed);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescCreate(&pIvMemDesc, pGpu, CE_FIPS_SELF_TEST_IV_SIZE, 0, NV_TRUE, ADDR_SYSMEM,
|
||||||
|
NV_MEMORY_UNCACHED, MEMDESC_FLAGS_ALLOC_IN_UNPROTECTED_MEMORY), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(pIvMemDesc), failed);
|
||||||
|
|
||||||
|
TRANSFER_SURFACE srcSurface = { .pMemDesc = pSrcMemDesc, .offset = 0 };
|
||||||
|
TRANSFER_SURFACE dstSurface = { .pMemDesc = pDstMemDesc, .offset = 0 };
|
||||||
|
TRANSFER_SURFACE authSurface = { .pMemDesc = pAuthMemDesc, .offset = 0 };
|
||||||
|
|
||||||
|
// Write data to allocations, encrypt using CE, and read back the results
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memmgrMemDescMemSet(pMemoryManager, pDstMemDesc, 0, 0), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memmgrMemDescMemSet(pMemoryManager, pAuthMemDesc, 0, 0), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
memmgrMemWrite(pMemoryManager, &srcSurface, ceTestPlaintext, sizeof ceTestPlaintext, TRANSFER_FLAGS_NONE),
|
||||||
|
failed);
|
||||||
|
|
||||||
|
params.bSecureCopy = NV_TRUE;
|
||||||
|
params.authTagAddr = memdescGetPhysAddr(pAuthMemDesc, AT_GPU, 0);
|
||||||
|
params.encryptIvAddr = memdescGetPhysAddr(pIvMemDesc, AT_GPU, 0);
|
||||||
|
params.pDstMemDesc = pDstMemDesc;
|
||||||
|
params.dstOffset = 0;
|
||||||
|
params.pSrcMemDesc = pSrcMemDesc;
|
||||||
|
params.srcOffset = 0;
|
||||||
|
params.length = sizeof ceTestPlaintext;
|
||||||
|
params.bEncrypt = NV_TRUE;
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, ceutilsMemcopy(pCeUtils, ¶ms), failed);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
memmgrMemRead(pMemoryManager, &dstSurface, encryptedData, sizeof encryptedData, TRANSFER_FLAGS_NONE), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
memmgrMemRead(pMemoryManager, &authSurface, dataAuth, sizeof dataAuth, TRANSFER_FLAGS_NONE), failed);
|
||||||
|
|
||||||
|
// Decrypt using CPU and validate
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
ccslDecrypt_HAL(pCcslCtx, sizeof decryptedData, encryptedData, NULL, NULL, 0, decryptedData, dataAuth),
|
||||||
|
failed);
|
||||||
|
|
||||||
|
NV_ASSERT_TRUE_OR_GOTO(status, portMemCmp(decryptedData, ceTestPlaintext, sizeof ceTestPlaintext) == 0,
|
||||||
|
NV_ERR_INVALID_STATE, failed);
|
||||||
|
|
||||||
|
// Encrypt using CPU
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
ccslEncrypt_HAL(pCcslCtx, sizeof ceTestPlaintext, ceTestPlaintext, NULL, 0, encryptedData, dataAuth), failed);
|
||||||
|
|
||||||
|
// Write data to allocations, decrypt using CE, read back, and validate
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
memmgrMemWrite(pMemoryManager, &dstSurface, encryptedData, sizeof encryptedData, TRANSFER_FLAGS_NONE), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
memmgrMemWrite(pMemoryManager, &authSurface, dataAuth, sizeof dataAuth, TRANSFER_FLAGS_NONE), failed);
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, memmgrMemDescMemSet(pMemoryManager, pSrcMemDesc, 0, 0), failed);
|
||||||
|
|
||||||
|
params.pDstMemDesc = pSrcMemDesc;
|
||||||
|
params.dstOffset = 0;
|
||||||
|
params.pSrcMemDesc = pDstMemDesc;
|
||||||
|
params.srcOffset = 0;
|
||||||
|
params.length = sizeof ceTestPlaintext;
|
||||||
|
params.bEncrypt = NV_FALSE;
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, ceutilsMemcopy(pCeUtils, ¶ms), failed);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
memmgrMemRead(pMemoryManager, &srcSurface, decryptedData, sizeof decryptedData, TRANSFER_FLAGS_NONE), failed);
|
||||||
|
|
||||||
|
NV_ASSERT_TRUE_OR_GOTO(status, portMemCmp(decryptedData, ceTestPlaintext, sizeof ceTestPlaintext) == 0,
|
||||||
|
NV_ERR_INVALID_STATE, failed);
|
||||||
|
|
||||||
|
failed:
|
||||||
|
ccslContextClear(pCcslCtx);
|
||||||
|
objDelete(pCeUtils);
|
||||||
|
memdescFree(pSrcMemDesc);
|
||||||
|
memdescDestroy(pSrcMemDesc);
|
||||||
|
memdescFree(pDstMemDesc);
|
||||||
|
memdescDestroy(pDstMemDesc);
|
||||||
|
memdescFree(pAuthMemDesc);
|
||||||
|
memdescDestroy(pAuthMemDesc);
|
||||||
|
memdescFree(pIvMemDesc);
|
||||||
|
memdescDestroy(pIvMemDesc);
|
||||||
|
|
||||||
|
NV_PRINTF(LEVEL_INFO, "Test finished with status 0x%x\n", status);
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
NV_STATUS
|
||||||
|
kceStateInitLocked_IMPL
|
||||||
|
(
|
||||||
|
OBJGPU *pGpu,
|
||||||
|
KernelCE *pKCe
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if (!gpuIsCCFeatureEnabled(pGpu) || !IS_SILICON(pGpu))
|
||||||
|
{
|
||||||
|
pKCe->bCcFipsSelfTestRequired = NV_FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pKCe->bCcFipsSelfTestRequired)
|
||||||
|
{
|
||||||
|
NV_ASSERT_OK_OR_RETURN(
|
||||||
|
kfifoAddSchedulingHandler(pGpu, GPU_GET_KERNEL_FIFO(pGpu), kceRunFipsSelfTest, pKCe, NULL, NULL));
|
||||||
|
}
|
||||||
|
|
||||||
|
return NV_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
kceStateDestroy_IMPL
|
||||||
|
(
|
||||||
|
OBJGPU *pGpu,
|
||||||
|
KernelCE *pKCe
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if (pKCe->bCcFipsSelfTestRequired)
|
||||||
|
{
|
||||||
|
kfifoRemoveSchedulingHandler(pGpu, GPU_GET_KERNEL_FIFO(pGpu), kceRunFipsSelfTest, pKCe, NULL, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void printCaps(OBJGPU *pGpu, KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, const NvU8 *capsTbl)
|
static void printCaps(OBJGPU *pGpu, KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, const NvU8 *capsTbl)
|
||||||
{
|
{
|
||||||
NV_PRINTF(LEVEL_INFO, "LCE%d caps (engineType = %d (%d))\n", pKCe->publicID,
|
NV_PRINTF(LEVEL_INFO, "LCE%d caps (engineType = %d (%d))\n", pKCe->publicID,
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -36,10 +36,10 @@ static void getKeyPairForKeySpace(NvU32 keySpace, NvBool bKernel, NvU32 *pGlobal
|
|||||||
static NV_STATUS triggerKeyRotationByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
|
static NV_STATUS triggerKeyRotationByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
|
||||||
static NV_STATUS calculateEncryptionStatsByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
|
static NV_STATUS calculateEncryptionStatsByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
|
||||||
static NV_STATUS notifyKeyRotationByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey);
|
static NV_STATUS notifyKeyRotationByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey);
|
||||||
static NvBool confComputeIsLowerThresholdCrossed(ConfidentialCompute *pConfCompute, KEY_ROTATION_STATS_INFO *pH2DInfo,
|
static NvBool isLowerThresholdCrossed(ConfidentialCompute *pConfCompute, KEY_ROTATION_STATS_INFO *pH2DInfo,
|
||||||
KEY_ROTATION_STATS_INFO *pD2HInfo);
|
KEY_ROTATION_STATS_INFO *pD2HInfo);
|
||||||
static NvBool confComputeIsUpperThresholdCrossed(ConfidentialCompute *pConfCompute, KEY_ROTATION_STATS_INFO *pH2DInfo,
|
static NvBool isUpperThresholdCrossed(ConfidentialCompute *pConfCompute, KEY_ROTATION_STATS_INFO *pH2DInfo,
|
||||||
KEY_ROTATION_STATS_INFO *pD2HInfo);
|
KEY_ROTATION_STATS_INFO *pD2HInfo);
|
||||||
static NV_STATUS keyRotationTimeoutCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent);
|
static NV_STATUS keyRotationTimeoutCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent);
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@@ -209,7 +209,7 @@ triggerKeyRotationByKeyPair
|
|||||||
// If key rotation is alredy scheduled because we crossed upper threshold or hit timeout
|
// If key rotation is alredy scheduled because we crossed upper threshold or hit timeout
|
||||||
// then we dont need to update encryption statistics as they will be zeroed out soon.
|
// then we dont need to update encryption statistics as they will be zeroed out soon.
|
||||||
//
|
//
|
||||||
if ((state == KEY_ROTATION_STATUS_FAILED_THRESHOLD) ||
|
if ((state == KEY_ROTATION_STATUS_FAILED_THRESHOLD) ||
|
||||||
(state == KEY_ROTATION_STATUS_FAILED_TIMEOUT))
|
(state == KEY_ROTATION_STATUS_FAILED_TIMEOUT))
|
||||||
{
|
{
|
||||||
return NV_OK;
|
return NV_OK;
|
||||||
@@ -227,15 +227,15 @@ triggerKeyRotationByKeyPair
|
|||||||
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
|
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
|
||||||
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
|
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
|
||||||
|
|
||||||
if (confComputeIsUpperThresholdCrossed(pConfCompute, &pConfCompute->aggregateStats[h2dIndex],
|
if (isUpperThresholdCrossed(pConfCompute, &pConfCompute->aggregateStats[h2dIndex],
|
||||||
&pConfCompute->aggregateStats[d2hIndex]))
|
&pConfCompute->aggregateStats[d2hIndex]))
|
||||||
{
|
{
|
||||||
NV_PRINTF(LEVEL_ERROR, "Crossed UPPER threshold for key = 0x%x\n", h2dKey);
|
NV_PRINTF(LEVEL_ERROR, "Crossed UPPER threshold for key = 0x%x\n", h2dKey);
|
||||||
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_FAILED_THRESHOLD));
|
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_FAILED_THRESHOLD));
|
||||||
NV_ASSERT_OK_OR_RETURN(confComputeScheduleKeyRotationWorkItem(pGpu, pConfCompute, h2dKey, d2hKey));
|
NV_ASSERT_OK_OR_RETURN(confComputeScheduleKeyRotationWorkItem(pGpu, pConfCompute, h2dKey, d2hKey));
|
||||||
}
|
}
|
||||||
else if (confComputeIsLowerThresholdCrossed(pConfCompute, &pConfCompute->aggregateStats[h2dIndex],
|
else if (isLowerThresholdCrossed(pConfCompute, &pConfCompute->aggregateStats[h2dIndex],
|
||||||
&pConfCompute->aggregateStats[d2hIndex]))
|
&pConfCompute->aggregateStats[d2hIndex]))
|
||||||
{
|
{
|
||||||
NV_PRINTF(LEVEL_INFO, "Crossed LOWER threshold for key = 0x%x\n", h2dKey);
|
NV_PRINTF(LEVEL_INFO, "Crossed LOWER threshold for key = 0x%x\n", h2dKey);
|
||||||
if (state == KEY_ROTATION_STATUS_IDLE)
|
if (state == KEY_ROTATION_STATUS_IDLE)
|
||||||
@@ -244,7 +244,7 @@ triggerKeyRotationByKeyPair
|
|||||||
|
|
||||||
//
|
//
|
||||||
// Start the timeout timer once lower threshold is crossed.
|
// Start the timeout timer once lower threshold is crossed.
|
||||||
//
|
//
|
||||||
// If timer is not already created then create it now. Else, just schedule a callback.
|
// If timer is not already created then create it now. Else, just schedule a callback.
|
||||||
// make sure callback is canceled if we schedule the KR task (after crossing lower or upper threshold)
|
// make sure callback is canceled if we schedule the KR task (after crossing lower or upper threshold)
|
||||||
// make sure all these timer events are deleted as part of RM shutdown
|
// make sure all these timer events are deleted as part of RM shutdown
|
||||||
@@ -266,7 +266,7 @@ triggerKeyRotationByKeyPair
|
|||||||
|
|
||||||
//
|
//
|
||||||
// Notify clients of pending KR
|
// Notify clients of pending KR
|
||||||
// We can't schedule a workitem for this since it may get scheduled too late and
|
// We can't schedule a workitem for this since it may get scheduled too late and
|
||||||
// we might have already crossed the upper threshold by then.
|
// we might have already crossed the upper threshold by then.
|
||||||
//
|
//
|
||||||
NV_ASSERT_OK_OR_RETURN(notifyKeyRotationByKeyPair(pGpu, pConfCompute, h2dKey));
|
NV_ASSERT_OK_OR_RETURN(notifyKeyRotationByKeyPair(pGpu, pConfCompute, h2dKey));
|
||||||
@@ -324,7 +324,7 @@ calculateEncryptionStatsByKeyPair
|
|||||||
if (pEncStats == NULL)
|
if (pEncStats == NULL)
|
||||||
{
|
{
|
||||||
NV_ASSERT(pEncStats != NULL);
|
NV_ASSERT(pEncStats != NULL);
|
||||||
NV_PRINTF(LEVEL_ERROR, "Failed to get stats for chid = 0x%x RM engineId = 0x%x\n",
|
NV_PRINTF(LEVEL_ERROR, "Failed to get stats for chid = 0x%x RM engineId = 0x%x\n",
|
||||||
kchannelGetDebugTag(pKernelChannel), kchannelGetEngineType(pKernelChannel));
|
kchannelGetDebugTag(pKernelChannel), kchannelGetEngineType(pKernelChannel));
|
||||||
return NV_ERR_INVALID_STATE;
|
return NV_ERR_INVALID_STATE;
|
||||||
}
|
}
|
||||||
@@ -363,7 +363,7 @@ calculateEncryptionStatsByKeyPair
|
|||||||
}
|
}
|
||||||
|
|
||||||
static NvBool
|
static NvBool
|
||||||
confComputeIsUpperThresholdCrossed
|
isUpperThresholdCrossed
|
||||||
(
|
(
|
||||||
ConfidentialCompute *pConfCompute,
|
ConfidentialCompute *pConfCompute,
|
||||||
KEY_ROTATION_STATS_INFO *pH2DInfo,
|
KEY_ROTATION_STATS_INFO *pH2DInfo,
|
||||||
@@ -376,22 +376,13 @@ confComputeIsUpperThresholdCrossed
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if ((pH2DInfo->totalBytesEncrypted > pConfCompute->upperThreshold.totalBytesEncrypted) ||
|
return (confComputeIsUpperThresholdCrossed(pConfCompute, pH2DInfo) ||
|
||||||
(pH2DInfo->totalEncryptOps > pConfCompute->upperThreshold.totalEncryptOps))
|
confComputeIsUpperThresholdCrossed(pConfCompute, pD2HInfo));
|
||||||
{
|
|
||||||
return NV_TRUE;
|
|
||||||
}
|
|
||||||
else if ((pD2HInfo->totalBytesEncrypted > pConfCompute->upperThreshold.totalBytesEncrypted) ||
|
|
||||||
(pD2HInfo->totalEncryptOps > pConfCompute->upperThreshold.totalEncryptOps))
|
|
||||||
{
|
|
||||||
return NV_TRUE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return NV_FALSE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static NvBool
|
static NvBool
|
||||||
confComputeIsLowerThresholdCrossed
|
isLowerThresholdCrossed
|
||||||
(
|
(
|
||||||
ConfidentialCompute *pConfCompute,
|
ConfidentialCompute *pConfCompute,
|
||||||
KEY_ROTATION_STATS_INFO *pH2DInfo,
|
KEY_ROTATION_STATS_INFO *pH2DInfo,
|
||||||
@@ -404,18 +395,9 @@ confComputeIsLowerThresholdCrossed
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if ((pH2DInfo->totalBytesEncrypted > pConfCompute->lowerThreshold.totalBytesEncrypted) ||
|
return (confComputeIsLowerThresholdCrossed(pConfCompute, pH2DInfo) ||
|
||||||
(pH2DInfo->totalEncryptOps > pConfCompute->lowerThreshold.totalEncryptOps))
|
confComputeIsLowerThresholdCrossed(pConfCompute, pD2HInfo));
|
||||||
{
|
|
||||||
return NV_TRUE;
|
|
||||||
}
|
|
||||||
else if ((pD2HInfo->totalBytesEncrypted > pConfCompute->lowerThreshold.totalBytesEncrypted) ||
|
|
||||||
(pD2HInfo->totalEncryptOps > pConfCompute->lowerThreshold.totalEncryptOps))
|
|
||||||
{
|
|
||||||
return NV_TRUE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return NV_FALSE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@@ -495,12 +477,12 @@ notifyKeyRotationByKeyPair
|
|||||||
static void
|
static void
|
||||||
initKeyRotationRegistryOverrides
|
initKeyRotationRegistryOverrides
|
||||||
(
|
(
|
||||||
OBJGPU *pGpu,
|
OBJGPU *pGpu,
|
||||||
ConfidentialCompute *pConfCompute
|
ConfidentialCompute *pConfCompute
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
//
|
//
|
||||||
// Temp CONFCOMP-984: This will be removed once all RM clients support
|
// Temp CONFCOMP-984: This will be removed once all RM clients support
|
||||||
// key rotation by default.
|
// key rotation by default.
|
||||||
//
|
//
|
||||||
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
|
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -50,6 +50,8 @@
|
|||||||
static NV_STATUS _confComputeInitRegistryOverrides(OBJGPU *, ConfidentialCompute*);
|
static NV_STATUS _confComputeInitRegistryOverrides(OBJGPU *, ConfidentialCompute*);
|
||||||
static NvU32 _confComputeGetKeyspaceSize(NvU16 keyspace);
|
static NvU32 _confComputeGetKeyspaceSize(NvU16 keyspace);
|
||||||
|
|
||||||
|
#define KEY_ROTATION_THRESHOLD_DELTA 20000000ull
|
||||||
|
|
||||||
NV_STATUS
|
NV_STATUS
|
||||||
confComputeConstructEngine_IMPL(OBJGPU *pGpu,
|
confComputeConstructEngine_IMPL(OBJGPU *pGpu,
|
||||||
ConfidentialCompute *pConfCompute,
|
ConfidentialCompute *pConfCompute,
|
||||||
@@ -154,6 +156,11 @@ confComputeConstructEngine_IMPL(OBJGPU *pGpu,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// init key rotation state
|
// init key rotation state
|
||||||
|
pConfCompute->attackerAdvantage = SECURITY_POLICY_ATTACKER_ADVANTAGE_DEFAULT;
|
||||||
|
pConfCompute->keyRotationLimitDelta = KEY_ROTATION_THRESHOLD_DELTA;
|
||||||
|
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationThreshold(pConfCompute,
|
||||||
|
pConfCompute->attackerAdvantage));
|
||||||
|
|
||||||
for (NvU32 i = 0; i < CC_KEYSPACE_TOTAL_SIZE; i++)
|
for (NvU32 i = 0; i < CC_KEYSPACE_TOTAL_SIZE; i++)
|
||||||
{
|
{
|
||||||
pConfCompute->keyRotationState[i] = KEY_ROTATION_STATUS_IDLE;
|
pConfCompute->keyRotationState[i] = KEY_ROTATION_STATUS_IDLE;
|
||||||
@@ -165,6 +172,7 @@ confComputeConstructEngine_IMPL(OBJGPU *pGpu,
|
|||||||
pConfCompute->keyRotationChannelRefCount = 0;
|
pConfCompute->keyRotationChannelRefCount = 0;
|
||||||
pConfCompute->keyRotationEnableMask = 0;
|
pConfCompute->keyRotationEnableMask = 0;
|
||||||
NV_ASSERT_OK_OR_RETURN(confComputeEnableKeyRotationSupport_HAL(pGpu, pConfCompute));
|
NV_ASSERT_OK_OR_RETURN(confComputeEnableKeyRotationSupport_HAL(pGpu, pConfCompute));
|
||||||
|
|
||||||
return NV_OK;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -427,8 +435,8 @@ confComputeStatePostLoad_IMPL
|
|||||||
NvU32 flags
|
NvU32 flags
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
NV_STATUS status = NV_OK;
|
NV_STATUS status = NV_OK;
|
||||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
|
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
|
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
|
||||||
pGpu->hInternalClient,
|
pGpu->hInternalClient,
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -384,7 +384,14 @@ confComputeApiCtrlCmdSystemGetSecurityPolicy_IMPL
|
|||||||
NV_CONF_COMPUTE_CTRL_GET_SECURITY_POLICY_PARAMS *pParams
|
NV_CONF_COMPUTE_CTRL_GET_SECURITY_POLICY_PARAMS *pParams
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
return NV_ERR_NOT_SUPPORTED;
|
OBJSYS *pSys = SYS_GET_INSTANCE();
|
||||||
|
OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys);
|
||||||
|
|
||||||
|
LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
|
||||||
|
|
||||||
|
pParams->attackerAdvantage = pGpuMgr->ccAttackerAdvantage;
|
||||||
|
|
||||||
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
NV_STATUS
|
NV_STATUS
|
||||||
@@ -394,6 +401,48 @@ confComputeApiCtrlCmdSystemSetSecurityPolicy_IMPL
|
|||||||
NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY_PARAMS *pParams
|
NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY_PARAMS *pParams
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
return NV_ERR_NOT_SUPPORTED;
|
OBJSYS *pSys = SYS_GET_INSTANCE();
|
||||||
}
|
OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys);
|
||||||
|
OBJGPU *pGpu;
|
||||||
|
NvU32 gpuMask;
|
||||||
|
NvU32 gpuInstance = 0;
|
||||||
|
RM_API *pRmApi = NULL;
|
||||||
|
NV_STATUS status = NV_OK;
|
||||||
|
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY_PARAMS params = {0};
|
||||||
|
|
||||||
|
LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
|
||||||
|
|
||||||
|
// CC security policy can only be set before GpuReadyState is set.
|
||||||
|
NV_ASSERT_OR_RETURN(pConfComputeApi->pCcCaps->bAcceptClientRequest == NV_FALSE, NV_ERR_INVALID_STATE);
|
||||||
|
|
||||||
|
if ((pParams->attackerAdvantage < SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MIN) ||
|
||||||
|
(pParams->attackerAdvantage > SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MAX))
|
||||||
|
{
|
||||||
|
return NV_ERR_INVALID_ARGUMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
params.attackerAdvantage = pParams->attackerAdvantage;
|
||||||
|
(void)gpumgrGetGpuAttachInfo(NULL, &gpuMask);
|
||||||
|
|
||||||
|
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)
|
||||||
|
{
|
||||||
|
pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
|
||||||
|
ConfidentialCompute* pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
|
||||||
|
|
||||||
|
status = pRmApi->Control(pRmApi,
|
||||||
|
pGpu->hInternalClient,
|
||||||
|
pGpu->hInternalSubdevice,
|
||||||
|
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_SECURITY_POLICY,
|
||||||
|
¶ms,
|
||||||
|
sizeof(params));
|
||||||
|
if (status != NV_OK)
|
||||||
|
return status;
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationThreshold(pConfCompute,
|
||||||
|
pParams->attackerAdvantage));
|
||||||
|
}
|
||||||
|
|
||||||
|
pGpuMgr->ccAttackerAdvantage = pParams->attackerAdvantage;
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -207,7 +207,7 @@ performKeyRotationByKeyPair
|
|||||||
return NV_OK;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Checks if all channels corresponding to key pair
|
* Checks if all channels corresponding to key pair
|
||||||
* are disabled and schedules key rotation.
|
* are disabled and schedules key rotation.
|
||||||
*
|
*
|
||||||
@@ -253,7 +253,7 @@ confComputeCheckAndScheduleKeyRotation_IMPL
|
|||||||
return NV_OK;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* schedules key rotation workitem
|
* schedules key rotation workitem
|
||||||
*
|
*
|
||||||
* @param[in] pGpu : OBJGPU pointer
|
* @param[in] pGpu : OBJGPU pointer
|
||||||
@@ -304,7 +304,7 @@ confComputeScheduleKeyRotationWorkItem_IMPL
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Sets KEY_ROTATION_STATUS for key pair corresponding to given key
|
* Sets KEY_ROTATION_STATUS for key pair corresponding to given key
|
||||||
*
|
*
|
||||||
* @param[in] pConfCompute : conf comp pointer
|
* @param[in] pConfCompute : conf comp pointer
|
||||||
* @param[in] globalKey : key for which to set the status
|
* @param[in] globalKey : key for which to set the status
|
||||||
@@ -328,7 +328,7 @@ NV_STATUS confComputeSetKeyRotationStatus_IMPL
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Gets KEY_ROTATION_STATUS for given key
|
* Gets KEY_ROTATION_STATUS for given key
|
||||||
*
|
*
|
||||||
* @param[in] pConfCompute : conf comp pointer
|
* @param[in] pConfCompute : conf comp pointer
|
||||||
* @param[in] globalKey : key for which to set the status
|
* @param[in] globalKey : key for which to set the status
|
||||||
@@ -346,7 +346,7 @@ NV_STATUS confComputeGetKeyRotationStatus_IMPL
|
|||||||
NvU32 h2dIndex, d2hIndex;
|
NvU32 h2dIndex, d2hIndex;
|
||||||
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
|
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
|
||||||
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
|
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
|
||||||
NV_ASSERT_OR_RETURN(pConfCompute->keyRotationState[h2dIndex] ==
|
NV_ASSERT_OR_RETURN(pConfCompute->keyRotationState[h2dIndex] ==
|
||||||
pConfCompute->keyRotationState[d2hIndex], NV_ERR_INVALID_STATE);
|
pConfCompute->keyRotationState[d2hIndex], NV_ERR_INVALID_STATE);
|
||||||
*pStatus = pConfCompute->keyRotationState[h2dIndex];
|
*pStatus = pConfCompute->keyRotationState[h2dIndex];
|
||||||
return NV_OK;
|
return NV_OK;
|
||||||
@@ -406,7 +406,7 @@ NV_STATUS
|
|||||||
confComputeUpdateFreedChannelStats_IMPL
|
confComputeUpdateFreedChannelStats_IMPL
|
||||||
(
|
(
|
||||||
OBJGPU *pGpu,
|
OBJGPU *pGpu,
|
||||||
ConfidentialCompute *pConfCompute,
|
ConfidentialCompute *pConfCompute,
|
||||||
KernelChannel *pKernelChannel
|
KernelChannel *pKernelChannel
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
@@ -430,3 +430,75 @@ confComputeUpdateFreedChannelStats_IMPL
|
|||||||
pConfCompute->freedChannelAggregateStats[d2hIndex].totalEncryptOps += pEncStats->numEncryptionsD2H;
|
pConfCompute->freedChannelAggregateStats[d2hIndex].totalEncryptOps += pEncStats->numEncryptionsD2H;
|
||||||
return NV_OK;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NV_STATUS
|
||||||
|
confComputeSetKeyRotationThreshold_IMPL(ConfidentialCompute *pConfCompute,
|
||||||
|
NvU64 attackerAdvantage)
|
||||||
|
{
|
||||||
|
//
|
||||||
|
// Limit beyond which an encryption key cannot be used.
|
||||||
|
// The index is the attacker advantage as described in
|
||||||
|
// https://datatracker.ietf.org/doc/draft-irtf-cfrg-aead-limits/
|
||||||
|
// The limit is expressed in units of total amount of data encrypted
|
||||||
|
// (in units of 16 B) plus the number of encryption invocations.
|
||||||
|
//
|
||||||
|
const NvU32 offset = 50;
|
||||||
|
|
||||||
|
static const NvU64 keyRotationUpperThreshold[] = {
|
||||||
|
777472127993ull,
|
||||||
|
549755813887ull,
|
||||||
|
388736063996ull,
|
||||||
|
274877906943ull,
|
||||||
|
194368031997ull,
|
||||||
|
137438953471ull,
|
||||||
|
97184015998ull,
|
||||||
|
68719476735ull,
|
||||||
|
48592007999ull,
|
||||||
|
34359738367ull,
|
||||||
|
24296003999ull,
|
||||||
|
17179869183ull,
|
||||||
|
12148001999ull,
|
||||||
|
8589934591ull,
|
||||||
|
6074000999ull,
|
||||||
|
4294967295ull,
|
||||||
|
3037000499ull,
|
||||||
|
2147483647ull,
|
||||||
|
1518500249ull,
|
||||||
|
1073741823ull,
|
||||||
|
759250124ull,
|
||||||
|
536870911ull,
|
||||||
|
379625061ull,
|
||||||
|
268435455ull,
|
||||||
|
189812530ull,
|
||||||
|
134217727ull};
|
||||||
|
|
||||||
|
NV_ASSERT_OR_RETURN((attackerAdvantage >= offset) &&
|
||||||
|
(attackerAdvantage <= (offset + NV_ARRAY_ELEMENTS(keyRotationUpperThreshold) - 1)),
|
||||||
|
NV_ERR_INVALID_ARGUMENT);
|
||||||
|
|
||||||
|
pConfCompute->keyRotationUpperLimit = keyRotationUpperThreshold[attackerAdvantage - offset];
|
||||||
|
pConfCompute->keyRotationLowerLimit = pConfCompute->keyRotationUpperLimit -
|
||||||
|
pConfCompute->keyRotationLimitDelta;
|
||||||
|
|
||||||
|
NV_PRINTF(LEVEL_INFO, "Setting key rotation attacker advantage to %llu.\n", attackerAdvantage);
|
||||||
|
NV_PRINTF(LEVEL_INFO, "Key rotation lower limit is %llu and upper limit is %llu.\n",
|
||||||
|
pConfCompute->keyRotationLowerLimit, pConfCompute->keyRotationUpperLimit);
|
||||||
|
|
||||||
|
return NV_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
NvBool confComputeIsUpperThresholdCrossed_IMPL(ConfidentialCompute *pConfCompute,
|
||||||
|
const KEY_ROTATION_STATS_INFO *pStatsInfo)
|
||||||
|
{
|
||||||
|
const NvU64 totalEncryptWork = (pStatsInfo->totalBytesEncrypted / 16) + pStatsInfo->totalEncryptOps;
|
||||||
|
|
||||||
|
return (totalEncryptWork > pConfCompute->keyRotationUpperLimit);
|
||||||
|
}
|
||||||
|
|
||||||
|
NvBool confComputeIsLowerThresholdCrossed_IMPL(ConfidentialCompute *pConfCompute,
|
||||||
|
const KEY_ROTATION_STATS_INFO *pStatsInfo)
|
||||||
|
{
|
||||||
|
const NvU64 totalEncryptWork = (pStatsInfo->totalBytesEncrypted / 16) + pStatsInfo->totalEncryptOps;
|
||||||
|
|
||||||
|
return (totalEncryptWork > pConfCompute->keyRotationLowerLimit);
|
||||||
|
}
|
@@ -3021,57 +3021,64 @@ kfifoTriggerPostSchedulingEnableCallback_IMPL
|
|||||||
{
|
{
|
||||||
NV_STATUS status = NV_OK;
|
NV_STATUS status = NV_OK;
|
||||||
FifoSchedulingHandlerEntry *pEntry;
|
FifoSchedulingHandlerEntry *pEntry;
|
||||||
NvBool bRetry = NV_FALSE;
|
NvBool bFirstPass = NV_TRUE;
|
||||||
|
NvBool bRetry;
|
||||||
|
|
||||||
for (pEntry = listHead(&pKernelFifo->postSchedulingEnableHandlerList);
|
do
|
||||||
pEntry != NULL;
|
|
||||||
pEntry = listNext(&pKernelFifo->postSchedulingEnableHandlerList, pEntry))
|
|
||||||
{
|
{
|
||||||
NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL,
|
NvBool bMadeProgress = NV_FALSE;
|
||||||
status = NV_ERR_INVALID_STATE; break;);
|
|
||||||
|
|
||||||
pEntry->bHandled = NV_FALSE;
|
bRetry = NV_FALSE;
|
||||||
status = pEntry->pCallback(pGpu, pEntry->pCallbackParam);
|
|
||||||
|
|
||||||
// Retry mechanism: Some callbacks depend on other callbacks in this list.
|
for (pEntry = listHead(&pKernelFifo->postSchedulingEnableHandlerList);
|
||||||
bRetry = bRetry || (status == NV_WARN_MORE_PROCESSING_REQUIRED);
|
pEntry != NULL;
|
||||||
|
pEntry = listNext(&pKernelFifo->postSchedulingEnableHandlerList, pEntry))
|
||||||
|
{
|
||||||
|
NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL,
|
||||||
|
status = NV_ERR_INVALID_STATE; break;);
|
||||||
|
|
||||||
if (status == NV_WARN_MORE_PROCESSING_REQUIRED)
|
if (bFirstPass)
|
||||||
// Quash retry status
|
{
|
||||||
status = NV_OK;
|
// Reset bHandled set by previous call (fore example, for dor suspend-resume)
|
||||||
else if (status == NV_OK)
|
pEntry->bHandled = NV_FALSE;
|
||||||
// Successfully handled, no need to retry
|
}
|
||||||
pEntry->bHandled = NV_TRUE;
|
else if (pEntry->bHandled)
|
||||||
else
|
{
|
||||||
// Actual error, abort
|
continue;
|
||||||
break;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// If we hit an actual error or completed everything successfully, return early.
|
status = pEntry->pCallback(pGpu, pEntry->pCallbackParam);
|
||||||
if ((status != NV_OK) || !bRetry)
|
|
||||||
return status;
|
|
||||||
|
|
||||||
// Second pass, retry anything that asked nicely to be deferred
|
if (status == NV_WARN_MORE_PROCESSING_REQUIRED)
|
||||||
for (pEntry = listHead(&pKernelFifo->postSchedulingEnableHandlerList);
|
{
|
||||||
pEntry != NULL;
|
// Retry mechanism: Some callbacks depend on other callbacks in this list.
|
||||||
pEntry = listNext(&pKernelFifo->postSchedulingEnableHandlerList, pEntry))
|
bRetry = NV_TRUE;
|
||||||
{
|
// Quash retry status
|
||||||
NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL,
|
status = NV_OK;
|
||||||
status = NV_ERR_INVALID_STATE; break;);
|
}
|
||||||
|
else if (status == NV_OK)
|
||||||
|
{
|
||||||
|
// Successfully handled, no need to retry
|
||||||
|
pEntry->bHandled = NV_TRUE;
|
||||||
|
bMadeProgress = NV_TRUE;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Actual error, abort
|
||||||
|
NV_ASSERT(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Skip anything that was completed successfully
|
// We are stuck in a loop, and all remaining callbacks are returning NV_WARN_MORE_PROCESSING_REQUIRED
|
||||||
if (pEntry->bHandled)
|
NV_ASSERT_OR_RETURN(bMadeProgress || status != NV_OK, NV_ERR_INVALID_STATE);
|
||||||
continue;
|
|
||||||
|
|
||||||
NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR,
|
bFirstPass = NV_FALSE;
|
||||||
pEntry->pCallback(pGpu, pEntry->pCallbackParam),
|
} while (bRetry && status == NV_OK);
|
||||||
break; );
|
|
||||||
}
|
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* @brief Notify handlers that scheduling will soon be disabled.
|
* @brief Notify handlers that scheduling will soon be disabled.
|
||||||
*
|
*
|
||||||
@@ -3089,53 +3096,60 @@ kfifoTriggerPreSchedulingDisableCallback_IMPL
|
|||||||
{
|
{
|
||||||
NV_STATUS status = NV_OK;
|
NV_STATUS status = NV_OK;
|
||||||
FifoSchedulingHandlerEntry *pEntry;
|
FifoSchedulingHandlerEntry *pEntry;
|
||||||
NvBool bRetry = NV_FALSE;
|
NvBool bFirstPass = NV_TRUE;
|
||||||
|
NvBool bRetry;
|
||||||
|
|
||||||
// First pass
|
do
|
||||||
for (pEntry = listHead(&pKernelFifo->preSchedulingDisableHandlerList);
|
|
||||||
pEntry != NULL;
|
|
||||||
pEntry = listNext(&pKernelFifo->preSchedulingDisableHandlerList, pEntry))
|
|
||||||
{
|
{
|
||||||
NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL,
|
NvBool bMadeProgress = NV_FALSE;
|
||||||
status = NV_ERR_INVALID_STATE; break;);
|
|
||||||
|
|
||||||
pEntry->bHandled = NV_FALSE;
|
bRetry = NV_FALSE;
|
||||||
status = pEntry->pCallback(pGpu, pEntry->pCallbackParam);
|
|
||||||
|
|
||||||
// Retry mechanism: Some callbacks depend on other callbacks in this list.
|
for (pEntry = listHead(&pKernelFifo->preSchedulingDisableHandlerList);
|
||||||
bRetry = bRetry || (status == NV_WARN_MORE_PROCESSING_REQUIRED);
|
pEntry != NULL;
|
||||||
|
pEntry = listNext(&pKernelFifo->preSchedulingDisableHandlerList, pEntry))
|
||||||
|
{
|
||||||
|
NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL,
|
||||||
|
status = NV_ERR_INVALID_STATE; break;);
|
||||||
|
|
||||||
if (status == NV_WARN_MORE_PROCESSING_REQUIRED)
|
if (bFirstPass)
|
||||||
// Quash retry status
|
{
|
||||||
status = NV_OK;
|
// Reset bHandled set by previous call (fore example, for dor suspend-resume)
|
||||||
else if (status == NV_OK)
|
pEntry->bHandled = NV_FALSE;
|
||||||
// Successfully handled, no need to retry
|
}
|
||||||
pEntry->bHandled = NV_TRUE;
|
else if (pEntry->bHandled)
|
||||||
else
|
{
|
||||||
// Actual error, abort
|
continue;
|
||||||
break;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// If we hit an actual error or completed everything successfully, return early.
|
status = pEntry->pCallback(pGpu, pEntry->pCallbackParam);
|
||||||
if ((status != NV_OK) || !bRetry)
|
|
||||||
return status;
|
|
||||||
|
|
||||||
// Second pass, retry anything that asked nicely to be deferred
|
if (status == NV_WARN_MORE_PROCESSING_REQUIRED)
|
||||||
for (pEntry = listHead(&pKernelFifo->preSchedulingDisableHandlerList);
|
{
|
||||||
pEntry != NULL;
|
// Retry mechanism: Some callbacks depend on other callbacks in this list.
|
||||||
pEntry = listNext(&pKernelFifo->preSchedulingDisableHandlerList, pEntry))
|
bRetry = NV_TRUE;
|
||||||
{
|
// Quash retry status
|
||||||
NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL,
|
status = NV_OK;
|
||||||
status = NV_ERR_INVALID_STATE; break;);
|
}
|
||||||
|
else if (status == NV_OK)
|
||||||
|
{
|
||||||
|
// Successfully handled, no need to retry
|
||||||
|
pEntry->bHandled = NV_TRUE;
|
||||||
|
bMadeProgress = NV_TRUE;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Actual error, abort
|
||||||
|
NV_ASSERT(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Skip anything that was completed successfully
|
// We are stuck in a loop, and all remaining callbacks are returning NV_WARN_MORE_PROCESSING_REQUIRED
|
||||||
if (pEntry->bHandled)
|
NV_ASSERT_OR_RETURN(bMadeProgress || status != NV_OK, NV_ERR_INVALID_STATE);
|
||||||
continue;
|
|
||||||
|
|
||||||
NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR,
|
bFirstPass = NV_FALSE;
|
||||||
pEntry->pCallback(pGpu, pEntry->pCallbackParam),
|
} while (bRetry && status == NV_OK);
|
||||||
break; );
|
|
||||||
}
|
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2012-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2012-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -47,9 +47,7 @@
|
|||||||
#include "class/cla06fsubch.h" // NVA06F_SUBCHANNEL_COPY_ENGINE
|
#include "class/cla06fsubch.h" // NVA06F_SUBCHANNEL_COPY_ENGINE
|
||||||
#include "class/cl003e.h" // NV01_MEMORY_SYSTEM
|
#include "class/cl003e.h" // NV01_MEMORY_SYSTEM
|
||||||
#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER
|
#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER
|
||||||
#include "class/cl0080.h" // NV01_DEVICE_0
|
|
||||||
#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL
|
#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL
|
||||||
#include "class/clc637.h" // AMPERE_SMC_PARTITION_REF
|
|
||||||
#include "class/cl00c2.h" // NV01_MEMORY_LOCAL_PHYSICAL
|
#include "class/cl00c2.h" // NV01_MEMORY_LOCAL_PHYSICAL
|
||||||
#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A
|
#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A
|
||||||
#include "class/clc8b5.h" // HOPPER_DMA_COPY_A
|
#include "class/clc8b5.h" // HOPPER_DMA_COPY_A
|
||||||
@@ -395,17 +393,16 @@ memmgrMemUtilsChannelInitialize_GM107
|
|||||||
{
|
{
|
||||||
NV_STATUS rmStatus;
|
NV_STATUS rmStatus;
|
||||||
NV_STATUS lockStatus;
|
NV_STATUS lockStatus;
|
||||||
RsClient *pRsClient;
|
RsClient *pRsClient = pChannel->pRsClient;
|
||||||
NvHandle hClient;
|
NvHandle hClient = pChannel->hClient;
|
||||||
NvHandle hDevice;
|
NvHandle hDevice = pChannel->deviceId;
|
||||||
NvHandle hPhysMem;
|
NvHandle hPhysMem = pChannel->physMemId;
|
||||||
NvU64 size;
|
NvU64 size = pChannel->channelSize;
|
||||||
NvHandle hChannel;
|
NvHandle hChannel = pChannel->channelId;
|
||||||
NvHandle hErrNotifierVirt;
|
NvHandle hErrNotifierVirt = pChannel->errNotifierIdVirt;
|
||||||
NvHandle hErrNotifierPhys;
|
NvHandle hErrNotifierPhys = pChannel->errNotifierIdPhys;
|
||||||
NvHandle hPushBuffer;
|
NvHandle hPushBuffer = pChannel->pushBufferId;
|
||||||
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
||||||
Heap *pHeap = GPU_GET_HEAP(pGpu);
|
|
||||||
NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
|
NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
|
||||||
NvU8 *pErrNotifierCpuVA = NULL;
|
NvU8 *pErrNotifierCpuVA = NULL;
|
||||||
NV_ADDRESS_SPACE userdAddrSpace;
|
NV_ADDRESS_SPACE userdAddrSpace;
|
||||||
@@ -422,61 +419,15 @@ memmgrMemUtilsChannelInitialize_GM107
|
|||||||
//
|
//
|
||||||
union
|
union
|
||||||
{
|
{
|
||||||
NV0080_ALLOC_PARAMETERS nv0080;
|
|
||||||
NV2080_ALLOC_PARAMETERS nv2080;
|
|
||||||
NVC637_ALLOCATION_PARAMETERS nvC637;
|
|
||||||
NV_VASPACE_ALLOCATION_PARAMETERS va;
|
NV_VASPACE_ALLOCATION_PARAMETERS va;
|
||||||
NV_MEMORY_ALLOCATION_PARAMS mem;
|
NV_MEMORY_ALLOCATION_PARAMS mem;
|
||||||
} *pParams = NULL;
|
} *pParams = NULL;
|
||||||
|
|
||||||
size = pChannel->channelSize;
|
|
||||||
hPhysMem = pChannel->physMemId;
|
|
||||||
hChannel = pChannel->channelId;
|
|
||||||
hErrNotifierVirt = pChannel->errNotifierIdVirt;
|
|
||||||
hErrNotifierPhys = pChannel->errNotifierIdPhys;
|
|
||||||
hPushBuffer = pChannel->pushBufferId;
|
|
||||||
|
|
||||||
if (pCl->getProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT))
|
if (pCl->getProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT))
|
||||||
{
|
{
|
||||||
cacheSnoopFlag = DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE);
|
cacheSnoopFlag = DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pChannel->bClientAllocated)
|
|
||||||
{
|
|
||||||
NV_CHECK_OK_OR_RETURN(
|
|
||||||
LEVEL_ERROR,
|
|
||||||
pRmApi->AllocWithHandle(pRmApi, NV01_NULL_OBJECT, NV01_NULL_OBJECT,
|
|
||||||
NV01_NULL_OBJECT, NV01_ROOT,
|
|
||||||
&pChannel->hClient, sizeof(pChannel->hClient)));
|
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
serverGetClientUnderLock(&g_resServ, pChannel->hClient, &pRsClient),
|
|
||||||
exit_free_client);
|
|
||||||
|
|
||||||
pChannel->pRsClient = pRsClient;
|
|
||||||
|
|
||||||
if (IS_VIRTUAL(pGpu))
|
|
||||||
{
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
clientSetHandleGenerator(pRsClient, RS_UNIQUE_HANDLE_BASE,
|
|
||||||
RS_UNIQUE_HANDLE_RANGE/2 - VGPU_RESERVED_HANDLE_RANGE),
|
|
||||||
exit_free_client);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
clientSetHandleGenerator(pRsClient, 1U, ~0U - 1U),
|
|
||||||
exit_free_client);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
pRsClient = pChannel->pRsClient;
|
|
||||||
|
|
||||||
hClient = pRsClient->hClient;
|
|
||||||
|
|
||||||
pParams = portMemAllocNonPaged(sizeof(*pParams));
|
pParams = portMemAllocNonPaged(sizeof(*pParams));
|
||||||
if (pParams == NULL)
|
if (pParams == NULL)
|
||||||
{
|
{
|
||||||
@@ -484,102 +435,11 @@ memmgrMemUtilsChannelInitialize_GM107
|
|||||||
goto exit_free_client;
|
goto exit_free_client;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pChannel->deviceId == NV01_NULL_OBJECT)
|
|
||||||
{
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
clientGenResourceHandle(pRsClient, &pChannel->deviceId),
|
|
||||||
exit_free_client);
|
|
||||||
|
|
||||||
NV0080_ALLOC_PARAMETERS *pNv0080 = &pParams->nv0080;
|
|
||||||
|
|
||||||
portMemSet(pNv0080, 0, sizeof(*pNv0080));
|
|
||||||
// Which device are we?
|
|
||||||
pNv0080->deviceId = gpuGetDeviceInstance(pGpu);
|
|
||||||
pNv0080->hClientShare = hClient;
|
|
||||||
|
|
||||||
NV_CHECK_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
LEVEL_ERROR,
|
|
||||||
pRmApi->AllocWithHandle(pRmApi, hClient, hClient, pChannel->deviceId,
|
|
||||||
NV01_DEVICE_0, pNv0080, sizeof(*pNv0080)),
|
|
||||||
exit_free_client);
|
|
||||||
}
|
|
||||||
hDevice = pChannel->deviceId;
|
|
||||||
|
|
||||||
// allocate a subdevice
|
|
||||||
if (pChannel->subdeviceId == NV01_NULL_OBJECT)
|
|
||||||
{
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
clientGenResourceHandle(pRsClient, &pChannel->subdeviceId),
|
|
||||||
exit_free_client);
|
|
||||||
|
|
||||||
NV2080_ALLOC_PARAMETERS *pNv2080 = &pParams->nv2080;
|
|
||||||
portMemSet(pNv2080, 0, sizeof(*pNv2080));
|
|
||||||
pNv2080->subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
|
|
||||||
|
|
||||||
NV_CHECK_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
LEVEL_ERROR,
|
|
||||||
pRmApi->AllocWithHandle(pRmApi, hClient, hDevice, pChannel->subdeviceId,
|
|
||||||
NV20_SUBDEVICE_0,
|
|
||||||
pNv2080,
|
|
||||||
sizeof(*pNv2080)),
|
|
||||||
exit_free_client);
|
|
||||||
}
|
|
||||||
|
|
||||||
// MIG support is only added for PMA scrubber
|
|
||||||
if (bMIGInUse && (pChannel->pKernelMIGGpuInstance != NULL))
|
|
||||||
{
|
|
||||||
NVC637_ALLOCATION_PARAMETERS *pNvC637 = &pParams->nvC637;
|
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
clientGenResourceHandle(pRsClient, &pChannel->hPartitionRef),
|
|
||||||
exit_free_client);
|
|
||||||
|
|
||||||
portMemSet(pNvC637, 0, sizeof(*pNvC637));
|
|
||||||
pNvC637->swizzId = pChannel->pKernelMIGGpuInstance->swizzId;
|
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
pRmApi->AllocWithHandle(pRmApi, hClient,
|
|
||||||
pChannel->subdeviceId,
|
|
||||||
pChannel->hPartitionRef,
|
|
||||||
AMPERE_SMC_PARTITION_REF,
|
|
||||||
pNvC637,
|
|
||||||
sizeof(*pNvC637)),
|
|
||||||
exit_free_client);
|
|
||||||
|
|
||||||
pHeap = pChannel->pKernelMIGGpuInstance->pMemoryPartitionHeap;
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// client allocated userd only supported on volta+
|
// client allocated userd only supported on volta+
|
||||||
// TODO: Use property to check if client allocated userd is supported
|
// TODO: Use property to check if client allocated userd is supported
|
||||||
//
|
//
|
||||||
pChannel->bClientUserd = NV_FALSE;
|
pChannel->bClientUserd = IsVOLTAorBetter(pGpu);
|
||||||
if (IsVOLTAorBetter(pGpu))
|
|
||||||
{
|
|
||||||
NvU32 pmaConfig = 0;
|
|
||||||
pmaConfig = PMA_QUERY_NUMA_ENABLED | PMA_QUERY_NUMA_ONLINED;
|
|
||||||
NV_ASSERT_OK_OR_GOTO(
|
|
||||||
rmStatus,
|
|
||||||
pmaQueryConfigs(&pHeap->pmaObject, &pmaConfig),
|
|
||||||
exit_free_client);
|
|
||||||
if (pmaConfig & PMA_QUERY_NUMA_ENABLED)
|
|
||||||
{
|
|
||||||
if (pmaConfig & PMA_QUERY_NUMA_ONLINED)
|
|
||||||
pChannel->bClientUserd = NV_TRUE;
|
|
||||||
else
|
|
||||||
pChannel->bClientUserd = NV_FALSE;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
pChannel->bClientUserd = NV_TRUE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// We need to allocate a VAS to use for CE copies, but also for
|
// We need to allocate a VAS to use for CE copies, but also for
|
||||||
@@ -1044,56 +904,6 @@ memmgrMemUtilsCopyEngineInitialize_GM107
|
|||||||
return rmStatus;
|
return rmStatus;
|
||||||
}
|
}
|
||||||
|
|
||||||
static NV_STATUS _memUtilsGetCe_GM107
|
|
||||||
(
|
|
||||||
OBJGPU *pGpu,
|
|
||||||
NvHandle hClient,
|
|
||||||
NvHandle hDevice,
|
|
||||||
KernelCE **ppKCe
|
|
||||||
)
|
|
||||||
{
|
|
||||||
KernelCE *pKCe = NULL;
|
|
||||||
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
|
|
||||||
NV_STATUS status = NV_OK;
|
|
||||||
NvU32 ceInst = 0;
|
|
||||||
|
|
||||||
*ppKCe = NULL;
|
|
||||||
|
|
||||||
if (IS_MIG_IN_USE(pGpu))
|
|
||||||
{
|
|
||||||
RsClient *pClient;
|
|
||||||
Device *pDevice;
|
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_RETURN(
|
|
||||||
serverGetClientUnderLock(&g_resServ, hClient, &pClient));
|
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_RETURN(
|
|
||||||
deviceGetByHandle(pClient, hDevice, &pDevice));
|
|
||||||
|
|
||||||
status = kmigmgrGetGPUInstanceScrubberCe(pGpu, GPU_GET_KERNEL_MIG_MANAGER(pGpu), pDevice, &ceInst);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, gpuUpdateEngineTable(pGpu));
|
|
||||||
|
|
||||||
KCE_ITER_ALL_BEGIN(pGpu, pKCe, 0)
|
|
||||||
if (kbusCheckEngine_HAL(pGpu, pKernelBus, ENG_CE(pKCe->publicID)) &&
|
|
||||||
!ceIsCeGrce(pGpu, RM_ENGINE_TYPE_COPY(pKCe->publicID)) &&
|
|
||||||
gpuCheckEngineTable(pGpu, RM_ENGINE_TYPE_COPY(pKCe->publicID)))
|
|
||||||
{
|
|
||||||
ceInst = kceInst;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
KCE_ITER_END_OR_RETURN_ERROR
|
|
||||||
}
|
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_RETURN(status);
|
|
||||||
|
|
||||||
*ppKCe = GPU_GET_KCE(pGpu, ceInst);
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static NV_STATUS _memUtilsAllocCe_GM107
|
static NV_STATUS _memUtilsAllocCe_GM107
|
||||||
(
|
(
|
||||||
OBJGPU *pGpu,
|
OBJGPU *pGpu,
|
||||||
@@ -1106,16 +916,11 @@ static NV_STATUS _memUtilsAllocCe_GM107
|
|||||||
|
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
KernelCE *pKCe = NULL;
|
NVC0B5_ALLOCATION_PARAMETERS createParams = {0};
|
||||||
NVC0B5_ALLOCATION_PARAMETERS createParams;
|
|
||||||
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
||||||
|
|
||||||
createParams.version = NVC0B5_ALLOCATION_PARAMETERS_VERSION_1;
|
createParams.version = NVC0B5_ALLOCATION_PARAMETERS_VERSION_1;
|
||||||
|
createParams.engineType = NV2080_ENGINE_TYPE_COPY(pChannel->ceId);
|
||||||
NV_ASSERT_OK_OR_RETURN(_memUtilsGetCe_GM107(pGpu, hClientId, hDeviceId, &pKCe));
|
|
||||||
NV_ASSERT_OR_RETURN((pKCe != NULL), NV_ERR_INVALID_STATE);
|
|
||||||
|
|
||||||
createParams.engineType = NV2080_ENGINE_TYPE_COPY(pKCe->publicID);
|
|
||||||
memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, &pChannel->hTdCopyClass);
|
memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, &pChannel->hTdCopyClass);
|
||||||
pChannel->engineType = gpuGetRmEngineType(createParams.engineType);
|
pChannel->engineType = gpuGetRmEngineType(createParams.engineType);
|
||||||
|
|
||||||
@@ -1135,7 +940,6 @@ static NV_STATUS _memUtilsAllocCe_GM107
|
|||||||
&createParams,
|
&createParams,
|
||||||
sizeof(createParams)));
|
sizeof(createParams)));
|
||||||
|
|
||||||
pChannel->ceId = pKCe->publicID;
|
|
||||||
return NV_OK;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1280,21 +1084,15 @@ _memUtilsAllocateChannel
|
|||||||
NvU32 hClass;
|
NvU32 hClass;
|
||||||
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
||||||
NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
|
NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
|
||||||
RM_ENGINE_TYPE engineType;
|
|
||||||
NvU32 flags = DRF_DEF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE);
|
NvU32 flags = DRF_DEF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE);
|
||||||
|
RM_ENGINE_TYPE engineType = (pChannel->type == SWL_SCRUBBER_CHANNEL) ?
|
||||||
|
RM_ENGINE_TYPE_SEC2 : RM_ENGINE_TYPE_COPY(pChannel->ceId);
|
||||||
|
|
||||||
if (pChannel->type == SWL_SCRUBBER_CHANNEL)
|
if (pChannel->bSecure)
|
||||||
{
|
{
|
||||||
engineType = RM_ENGINE_TYPE_SEC2;
|
|
||||||
flags |= DRF_DEF(OS04, _FLAGS, _CC_SECURE, _TRUE);
|
flags |= DRF_DEF(OS04, _FLAGS, _CC_SECURE, _TRUE);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
KernelCE *pKCe = NULL;
|
|
||||||
NV_ASSERT_OK_OR_RETURN(_memUtilsGetCe_GM107(pGpu, hClientId, hDeviceId, &pKCe));
|
|
||||||
NV_ASSERT_OR_RETURN((pKCe != NULL), NV_ERR_INVALID_STATE);
|
|
||||||
engineType = RM_ENGINE_TYPE_COPY(pKCe->publicID);
|
|
||||||
}
|
|
||||||
portMemSet(&channelGPFIFOAllocParams, 0, sizeof(NV_CHANNEL_ALLOC_PARAMS));
|
portMemSet(&channelGPFIFOAllocParams, 0, sizeof(NV_CHANNEL_ALLOC_PARAMS));
|
||||||
channelGPFIFOAllocParams.hObjectError = hObjectError;
|
channelGPFIFOAllocParams.hObjectError = hObjectError;
|
||||||
channelGPFIFOAllocParams.hObjectBuffer = hObjectBuffer;
|
channelGPFIFOAllocParams.hObjectBuffer = hObjectBuffer;
|
||||||
@@ -1363,7 +1161,6 @@ _memUtilsAllocateChannel
|
|||||||
SLI_LOOP_END
|
SLI_LOOP_END
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(
|
NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(
|
||||||
rmStatus,
|
rmStatus,
|
||||||
pRmApi->AllocWithHandle(pRmApi,
|
pRmApi->AllocWithHandle(pRmApi,
|
||||||
@@ -1534,6 +1331,8 @@ memmgrMemUtilsAllocateEccScrubber_GM107
|
|||||||
OBJCHANNEL *pChannel
|
OBJCHANNEL *pChannel
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
NV_ASSERT_OK_OR_RETURN(channelAllocSubdevice(pGpu, pChannel));
|
||||||
|
|
||||||
memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel);
|
memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel);
|
||||||
|
|
||||||
memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, pChannel);
|
memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, pChannel);
|
||||||
@@ -1565,6 +1364,8 @@ memmgrMemUtilsAllocateEccAllocScrubber_GM107
|
|||||||
NV_STATUS lockStatus;
|
NV_STATUS lockStatus;
|
||||||
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_RETURN(channelAllocSubdevice(pGpu, pChannel));
|
||||||
|
|
||||||
memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pEccSyncChannel);
|
memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pEccSyncChannel);
|
||||||
memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, pEccSyncChannel);
|
memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, pEccSyncChannel);
|
||||||
|
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -36,6 +36,7 @@
|
|||||||
#include "gpu/mem_mgr/ce_utils.h"
|
#include "gpu/mem_mgr/ce_utils.h"
|
||||||
#include "kernel/gpu/mem_mgr/ce_utils_sizes.h"
|
#include "kernel/gpu/mem_mgr/ce_utils_sizes.h"
|
||||||
#include "vgpu/rpc_headers.h"
|
#include "vgpu/rpc_headers.h"
|
||||||
|
#include "gpu/device/device.h"
|
||||||
|
|
||||||
#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A
|
#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A
|
||||||
#include "class/clc0b5.h" // PASCAL_DMA_COPY_A
|
#include "class/clc0b5.h" // PASCAL_DMA_COPY_A
|
||||||
@@ -47,6 +48,50 @@
|
|||||||
|
|
||||||
#include "class/cl0080.h"
|
#include "class/cl0080.h"
|
||||||
|
|
||||||
|
static NV_STATUS _memUtilsGetCe
|
||||||
|
(
|
||||||
|
OBJGPU *pGpu,
|
||||||
|
NvHandle hClient,
|
||||||
|
NvHandle hDevice,
|
||||||
|
NvU32 *pCeInstance
|
||||||
|
)
|
||||||
|
{
|
||||||
|
if (IS_MIG_IN_USE(pGpu))
|
||||||
|
{
|
||||||
|
RsClient *pClient;
|
||||||
|
Device *pDevice;
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_RETURN(
|
||||||
|
serverGetClientUnderLock(&g_resServ, hClient, &pClient));
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_RETURN(
|
||||||
|
deviceGetByHandle(pClient, hDevice, &pDevice));
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_RETURN(kmigmgrGetGPUInstanceScrubberCe(pGpu, GPU_GET_KERNEL_MIG_MANAGER(pGpu), pDevice, pCeInstance));
|
||||||
|
return NV_OK;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
|
||||||
|
|
||||||
|
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, gpuUpdateEngineTable(pGpu));
|
||||||
|
|
||||||
|
KernelCE *pKCe = NULL;
|
||||||
|
|
||||||
|
KCE_ITER_ALL_BEGIN(pGpu, pKCe, 0)
|
||||||
|
if (kbusCheckEngine_HAL(pGpu, pKernelBus, ENG_CE(pKCe->publicID)) &&
|
||||||
|
!ceIsCeGrce(pGpu, RM_ENGINE_TYPE_COPY(pKCe->publicID)) &&
|
||||||
|
gpuCheckEngineTable(pGpu, RM_ENGINE_TYPE_COPY(pKCe->publicID)))
|
||||||
|
{
|
||||||
|
*pCeInstance = pKCe->publicID;
|
||||||
|
return NV_OK;
|
||||||
|
}
|
||||||
|
KCE_ITER_END
|
||||||
|
}
|
||||||
|
|
||||||
|
return NV_ERR_INSUFFICIENT_RESOURCES;
|
||||||
|
}
|
||||||
|
|
||||||
NV_STATUS
|
NV_STATUS
|
||||||
ceutilsConstruct_IMPL
|
ceutilsConstruct_IMPL
|
||||||
(
|
(
|
||||||
@@ -58,6 +103,7 @@ ceutilsConstruct_IMPL
|
|||||||
{
|
{
|
||||||
NV_STATUS status = NV_OK;
|
NV_STATUS status = NV_OK;
|
||||||
NvU64 allocFlags = pAllocParams->flags;
|
NvU64 allocFlags = pAllocParams->flags;
|
||||||
|
NvBool bForceCeId = FLD_TEST_DRF(0050_CEUTILS, _FLAGS, _FORCE_CE_ID, _TRUE, allocFlags);
|
||||||
NV_ASSERT_OR_RETURN(pGpu, NV_ERR_INVALID_STATE);
|
NV_ASSERT_OR_RETURN(pGpu, NV_ERR_INVALID_STATE);
|
||||||
|
|
||||||
NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
|
NvBool bMIGInUse = IS_MIG_IN_USE(pGpu);
|
||||||
@@ -116,16 +162,14 @@ ceutilsConstruct_IMPL
|
|||||||
|
|
||||||
pChannel->bClientAllocated = NV_TRUE;
|
pChannel->bClientAllocated = NV_TRUE;
|
||||||
pChannel->pGpu = pGpu;
|
pChannel->pGpu = pGpu;
|
||||||
|
|
||||||
pChannel->deviceId = pCeUtils->hDevice;
|
|
||||||
pChannel->subdeviceId = pCeUtils->hSubdevice;
|
|
||||||
|
|
||||||
pChannel->pKernelMIGGpuInstance = pKernelMIGGPUInstance;
|
pChannel->pKernelMIGGpuInstance = pKernelMIGGPUInstance;
|
||||||
|
|
||||||
// We'll allocate new VAS for now. Sharing client VAS will be added later
|
// We'll allocate new VAS for now. Sharing client VAS will be added later
|
||||||
pChannel->hVASpaceId = NV01_NULL_OBJECT;
|
pChannel->hVASpaceId = NV01_NULL_OBJECT;
|
||||||
pChannel->bUseVasForCeCopy = FLD_TEST_DRF(0050_CEUTILS, _FLAGS, _VIRTUAL_MODE, _TRUE, allocFlags);
|
pChannel->bUseVasForCeCopy = FLD_TEST_DRF(0050_CEUTILS, _FLAGS, _VIRTUAL_MODE, _TRUE, allocFlags);
|
||||||
|
|
||||||
|
pChannel->bSecure = FLD_TEST_DRF(0050_CEUTILS, _FLAGS, _CC_SECURE, _TRUE, allocFlags);
|
||||||
|
|
||||||
// Detect if we can enable fast scrub on this channel
|
// Detect if we can enable fast scrub on this channel
|
||||||
status = memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, &pCeUtils->hTdCopyClass);
|
status = memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, &pCeUtils->hTdCopyClass);
|
||||||
NV_ASSERT_OR_GOTO(status == NV_OK, free_channel);
|
NV_ASSERT_OR_GOTO(status == NV_OK, free_channel);
|
||||||
@@ -158,6 +202,19 @@ ceutilsConstruct_IMPL
|
|||||||
|
|
||||||
channelSetupChannelBufferSizes(pChannel);
|
channelSetupChannelBufferSizes(pChannel);
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, channelAllocSubdevice(pGpu, pChannel), free_client);
|
||||||
|
|
||||||
|
if (bForceCeId)
|
||||||
|
{
|
||||||
|
pChannel->ceId = pAllocParams->forceCeId;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status,
|
||||||
|
_memUtilsGetCe(pGpu, pChannel->hClient, pChannel->deviceId, &pChannel->ceId),
|
||||||
|
free_client);
|
||||||
|
}
|
||||||
|
|
||||||
status = memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel);
|
status = memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel);
|
||||||
NV_ASSERT_OR_GOTO(status == NV_OK, free_channel);
|
NV_ASSERT_OR_GOTO(status == NV_OK, free_channel);
|
||||||
|
|
||||||
@@ -331,7 +388,7 @@ _ceutilsSubmitPushBuffer
|
|||||||
// Use BAR1 if CPU access is allowed, otherwise allocate and init shadow
|
// Use BAR1 if CPU access is allowed, otherwise allocate and init shadow
|
||||||
// buffer for DMA access
|
// buffer for DMA access
|
||||||
//
|
//
|
||||||
NvU32 transferFlags = (TRANSFER_FLAGS_USE_BAR1 |
|
NvU32 transferFlags = (TRANSFER_FLAGS_USE_BAR1 |
|
||||||
TRANSFER_FLAGS_SHADOW_ALLOC |
|
TRANSFER_FLAGS_SHADOW_ALLOC |
|
||||||
TRANSFER_FLAGS_SHADOW_INIT_MEM);
|
TRANSFER_FLAGS_SHADOW_INIT_MEM);
|
||||||
NV_PRINTF(LEVEL_INFO, "Actual size of copying to be pushed: %x\n", pChannelPbInfo->size);
|
NV_PRINTF(LEVEL_INFO, "Actual size of copying to be pushed: %x\n", pChannelPbInfo->size);
|
||||||
@@ -563,6 +620,11 @@ ceutilsMemcopy_IMPL
|
|||||||
channelPbInfo.srcCpuCacheAttrib = pSrcMemDesc->_cpuCacheAttrib;
|
channelPbInfo.srcCpuCacheAttrib = pSrcMemDesc->_cpuCacheAttrib;
|
||||||
channelPbInfo.dstCpuCacheAttrib = pDstMemDesc->_cpuCacheAttrib;
|
channelPbInfo.dstCpuCacheAttrib = pDstMemDesc->_cpuCacheAttrib;
|
||||||
|
|
||||||
|
channelPbInfo.bSecureCopy = pParams->bSecureCopy;
|
||||||
|
channelPbInfo.bEncrypt = pParams->bEncrypt;
|
||||||
|
channelPbInfo.authTagAddr = pParams->authTagAddr;
|
||||||
|
channelPbInfo.encryptIvAddr = pParams->encryptIvAddr;
|
||||||
|
|
||||||
srcPageGranularity = pSrcMemDesc->pageArrayGranularity;
|
srcPageGranularity = pSrcMemDesc->pageArrayGranularity;
|
||||||
dstPageGranularity = pDstMemDesc->pageArrayGranularity;
|
dstPageGranularity = pDstMemDesc->pageArrayGranularity;
|
||||||
bSrcContig = memdescGetContiguity(pSrcMemDesc, AT_GPU);
|
bSrcContig = memdescGetContiguity(pSrcMemDesc, AT_GPU);
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -28,10 +28,14 @@
|
|||||||
#include "utils/nvassert.h"
|
#include "utils/nvassert.h"
|
||||||
#include "core/locks.h"
|
#include "core/locks.h"
|
||||||
#include "gpu/mem_mgr/mem_mgr.h"
|
#include "gpu/mem_mgr/mem_mgr.h"
|
||||||
|
#include "vgpu/rpc.h"
|
||||||
|
|
||||||
#include "kernel/gpu/mem_mgr/ce_utils_sizes.h"
|
#include "kernel/gpu/mem_mgr/ce_utils_sizes.h"
|
||||||
#include "kernel/gpu/mem_mgr/channel_utils.h"
|
#include "kernel/gpu/mem_mgr/channel_utils.h"
|
||||||
|
|
||||||
#include "class/clcba2.h"
|
#include "class/clcba2.h"
|
||||||
|
#include "class/cl0080.h" // NV01_DEVICE_0
|
||||||
|
#include "class/clc637.h" // AMPERE_SMC_PARTITION_REF
|
||||||
|
|
||||||
#define SEC2_WL_METHOD_ARRAY_SIZE 16
|
#define SEC2_WL_METHOD_ARRAY_SIZE 16
|
||||||
#define SHA_256_HASH_SIZE_BYTE 32
|
#define SHA_256_HASH_SIZE_BYTE 32
|
||||||
@@ -40,7 +44,7 @@
|
|||||||
static NvU32 channelPushMemoryProperties(OBJCHANNEL *pChannel, CHANNEL_PB_INFO *pChannelPbInfo, NvU32 **ppPtr);
|
static NvU32 channelPushMemoryProperties(OBJCHANNEL *pChannel, CHANNEL_PB_INFO *pChannelPbInfo, NvU32 **ppPtr);
|
||||||
static void channelPushMethod(OBJCHANNEL *pChannel, CHANNEL_PB_INFO *pChannelPbInfo,
|
static void channelPushMethod(OBJCHANNEL *pChannel, CHANNEL_PB_INFO *pChannelPbInfo,
|
||||||
NvBool bPipelined, NvBool bInsertFinishPayload,
|
NvBool bPipelined, NvBool bInsertFinishPayload,
|
||||||
NvU32 launchType, NvU32 semaValue, NvU32 **ppPtr);
|
NvU32 launchType, NvU32 semaValue, NvU32 copyType, NvU32 **ppPtr);
|
||||||
|
|
||||||
/* Public APIs */
|
/* Public APIs */
|
||||||
NV_STATUS
|
NV_STATUS
|
||||||
@@ -91,6 +95,125 @@ channelSetupIDs
|
|||||||
return NV_OK;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NV_STATUS
|
||||||
|
channelAllocSubdevice
|
||||||
|
(
|
||||||
|
OBJGPU *pGpu,
|
||||||
|
OBJCHANNEL *pChannel
|
||||||
|
)
|
||||||
|
{
|
||||||
|
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
|
||||||
|
NV_STATUS rmStatus = NV_OK;
|
||||||
|
RsClient *pRsClient;
|
||||||
|
|
||||||
|
if (!pChannel->bClientAllocated)
|
||||||
|
{
|
||||||
|
NV_CHECK_OK_OR_RETURN(
|
||||||
|
LEVEL_ERROR,
|
||||||
|
pRmApi->AllocWithHandle(pRmApi, NV01_NULL_OBJECT, NV01_NULL_OBJECT,
|
||||||
|
NV01_NULL_OBJECT, NV01_ROOT,
|
||||||
|
&pChannel->hClient, sizeof(pChannel->hClient)));
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
serverGetClientUnderLock(&g_resServ, pChannel->hClient, &pRsClient),
|
||||||
|
exit_free_client);
|
||||||
|
|
||||||
|
pChannel->pRsClient = pRsClient;
|
||||||
|
|
||||||
|
if (IS_VIRTUAL(pGpu))
|
||||||
|
{
|
||||||
|
NV_ASSERT_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
clientSetHandleGenerator(pRsClient, RS_UNIQUE_HANDLE_BASE,
|
||||||
|
RS_UNIQUE_HANDLE_RANGE/2 - VGPU_RESERVED_HANDLE_RANGE),
|
||||||
|
exit_free_client);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
NV_ASSERT_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
clientSetHandleGenerator(pRsClient, 1U, ~0U - 1U),
|
||||||
|
exit_free_client);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
pRsClient = pChannel->pRsClient;
|
||||||
|
|
||||||
|
if (pChannel->deviceId == NV01_NULL_OBJECT)
|
||||||
|
{
|
||||||
|
NV_ASSERT_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
clientGenResourceHandle(pRsClient, &pChannel->deviceId),
|
||||||
|
exit_free_client);
|
||||||
|
|
||||||
|
NV0080_ALLOC_PARAMETERS params = {0};
|
||||||
|
|
||||||
|
// Which device are we?
|
||||||
|
params.deviceId = gpuGetDeviceInstance(pGpu);
|
||||||
|
params.hClientShare = pChannel->hClient;
|
||||||
|
|
||||||
|
NV_CHECK_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
LEVEL_ERROR,
|
||||||
|
pRmApi->AllocWithHandle(pRmApi, pChannel->hClient, pChannel->hClient, pChannel->deviceId,
|
||||||
|
NV01_DEVICE_0, ¶ms, sizeof(params)),
|
||||||
|
exit_free_client);
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate a subdevice
|
||||||
|
if (pChannel->subdeviceId == NV01_NULL_OBJECT)
|
||||||
|
{
|
||||||
|
NV_ASSERT_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
clientGenResourceHandle(pRsClient, &pChannel->subdeviceId),
|
||||||
|
exit_free_client);
|
||||||
|
|
||||||
|
NV2080_ALLOC_PARAMETERS params = {0};
|
||||||
|
params.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
|
||||||
|
|
||||||
|
NV_CHECK_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
LEVEL_ERROR,
|
||||||
|
pRmApi->AllocWithHandle(pRmApi, pChannel->hClient, pChannel->deviceId, pChannel->subdeviceId,
|
||||||
|
NV20_SUBDEVICE_0,
|
||||||
|
¶ms,
|
||||||
|
sizeof(params)),
|
||||||
|
exit_free_client);
|
||||||
|
}
|
||||||
|
|
||||||
|
// MIG support is only added for PMA scrubber
|
||||||
|
if (IS_MIG_IN_USE(pGpu) && (pChannel->pKernelMIGGpuInstance != NULL))
|
||||||
|
{
|
||||||
|
NV_ASSERT_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
clientGenResourceHandle(pRsClient, &pChannel->hPartitionRef),
|
||||||
|
exit_free_client);
|
||||||
|
|
||||||
|
NVC637_ALLOCATION_PARAMETERS params = {0};
|
||||||
|
params.swizzId = pChannel->pKernelMIGGpuInstance->swizzId;
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(
|
||||||
|
rmStatus,
|
||||||
|
pRmApi->AllocWithHandle(pRmApi, pChannel->hClient,
|
||||||
|
pChannel->subdeviceId,
|
||||||
|
pChannel->hPartitionRef,
|
||||||
|
AMPERE_SMC_PARTITION_REF,
|
||||||
|
¶ms,
|
||||||
|
sizeof(params)),
|
||||||
|
exit_free_client);
|
||||||
|
}
|
||||||
|
|
||||||
|
exit_free_client:
|
||||||
|
if(rmStatus != NV_OK && !pChannel->bClientAllocated)
|
||||||
|
{
|
||||||
|
pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hClient);
|
||||||
|
}
|
||||||
|
|
||||||
|
return rmStatus;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void
|
void
|
||||||
channelSetupChannelBufferSizes
|
channelSetupChannelBufferSizes
|
||||||
(
|
(
|
||||||
@@ -335,15 +458,15 @@ channelFillGpFifo
|
|||||||
NvU32 methodsLength
|
NvU32 methodsLength
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
OBJGPU *pGpu = pChannel->pGpu;
|
||||||
|
KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
|
||||||
|
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
|
||||||
|
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
|
||||||
|
NvBool bReleaseMapping = NV_FALSE;
|
||||||
NvU32 *pGpEntry;
|
NvU32 *pGpEntry;
|
||||||
NvU32 GpEntry0;
|
NvU32 GpEntry0;
|
||||||
NvU32 GpEntry1;
|
NvU32 GpEntry1;
|
||||||
NvU64 pbPutOffset;
|
NvU64 pbPutOffset;
|
||||||
OBJGPU *pGpu;
|
|
||||||
KernelBus *pKernelBus;
|
|
||||||
MemoryManager *pMemoryManager;
|
|
||||||
NvBool bReleaseMapping = NV_FALSE;
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Use BAR1 if CPU access is allowed, otherwise allocate and init shadow
|
// Use BAR1 if CPU access is allowed, otherwise allocate and init shadow
|
||||||
// buffer for DMA access
|
// buffer for DMA access
|
||||||
@@ -353,13 +476,6 @@ channelFillGpFifo
|
|||||||
TRANSFER_FLAGS_SHADOW_INIT_MEM);
|
TRANSFER_FLAGS_SHADOW_INIT_MEM);
|
||||||
|
|
||||||
NV_ASSERT_OR_RETURN(putIndex < pChannel->channelNumGpFifioEntries, NV_ERR_INVALID_STATE);
|
NV_ASSERT_OR_RETURN(putIndex < pChannel->channelNumGpFifioEntries, NV_ERR_INVALID_STATE);
|
||||||
NV_ASSERT_OR_RETURN(pChannel != NULL, NV_ERR_INVALID_STATE);
|
|
||||||
|
|
||||||
pGpu = pChannel->pGpu;
|
|
||||||
NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_STATE);
|
|
||||||
|
|
||||||
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
|
|
||||||
pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
|
|
||||||
|
|
||||||
if (pChannel->pbCpuVA == NULL)
|
if (pChannel->pbCpuVA == NULL)
|
||||||
{
|
{
|
||||||
@@ -424,8 +540,27 @@ channelFillGpFifo
|
|||||||
return NV_ERR_GENERIC;
|
return NV_ERR_GENERIC;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update doorbell with work submission token
|
if (RMCFG_FEATURE_PLATFORM_GSP ||
|
||||||
if (pChannel->bUseDoorbellRegister)
|
kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo))
|
||||||
|
{
|
||||||
|
KernelChannel *pKernelChannel;
|
||||||
|
NvU32 workSubmitToken;
|
||||||
|
|
||||||
|
{
|
||||||
|
RsClient *pClient;
|
||||||
|
NV_ASSERT_OK(serverGetClientUnderLock(&g_resServ, pChannel->hClient, &pClient));
|
||||||
|
NV_ASSERT_OK(CliGetKernelChannel(pClient, pChannel->channelId, &pKernelChannel));
|
||||||
|
}
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_RETURN(
|
||||||
|
kfifoGenerateWorkSubmitToken_HAL(pGpu,
|
||||||
|
pKernelFifo,
|
||||||
|
pKernelChannel,
|
||||||
|
&workSubmitToken, NV_TRUE));
|
||||||
|
|
||||||
|
kfifoUpdateUsermodeDoorbell_HAL(pGpu, pKernelFifo, workSubmitToken, kchannelGetRunlistId(pKernelChannel));
|
||||||
|
}
|
||||||
|
else if (pChannel->bUseDoorbellRegister)
|
||||||
{
|
{
|
||||||
if (pChannel->pTokenFromNotifier == NULL)
|
if (pChannel->pTokenFromNotifier == NULL)
|
||||||
{
|
{
|
||||||
@@ -592,6 +727,53 @@ channelAddHostSema
|
|||||||
*ppPtr = pPtr;
|
*ppPtr = pPtr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static NvU32
|
||||||
|
channelPushSecureCopyProperties
|
||||||
|
(
|
||||||
|
OBJCHANNEL *pChannel,
|
||||||
|
CHANNEL_PB_INFO *pChannelPbInfo,
|
||||||
|
NvU32 *pCopyType,
|
||||||
|
NvU32 **ppPtr
|
||||||
|
)
|
||||||
|
{
|
||||||
|
NvU32 *pPtr = *ppPtr;
|
||||||
|
|
||||||
|
if (!pChannelPbInfo->bSecureCopy)
|
||||||
|
{
|
||||||
|
*pCopyType = FLD_SET_DRF(C8B5, _LAUNCH_DMA, _COPY_TYPE, _DEFAULT, *pCopyType);
|
||||||
|
return NV_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
NV_ASSERT_OR_RETURN(gpuIsCCFeatureEnabled(pChannel->pGpu), NV_ERR_NOT_SUPPORTED);
|
||||||
|
NV_ASSERT_OR_RETURN(pChannel->bSecure, NV_ERR_NOT_SUPPORTED);
|
||||||
|
NV_ASSERT_OR_RETURN(pChannel->hTdCopyClass >= HOPPER_DMA_COPY_A, NV_ERR_NOT_SUPPORTED);
|
||||||
|
|
||||||
|
if (pChannelPbInfo->bEncrypt)
|
||||||
|
{
|
||||||
|
NV_PUSH_INC_1U(RM_SUBCHANNEL,
|
||||||
|
NVC8B5_SET_SECURE_COPY_MODE, DRF_DEF(C8B5, _SET_SECURE_COPY_MODE, _MODE, _ENCRYPT));
|
||||||
|
|
||||||
|
NV_PUSH_INC_4U(RM_SUBCHANNEL,
|
||||||
|
NVC8B5_SET_ENCRYPT_AUTH_TAG_ADDR_UPPER, NvU64_HI32(pChannelPbInfo->authTagAddr),
|
||||||
|
NVC8B5_SET_ENCRYPT_AUTH_TAG_ADDR_LOWER, NvU64_LO32(pChannelPbInfo->authTagAddr),
|
||||||
|
NVC8B5_SET_ENCRYPT_IV_ADDR_UPPER, NvU64_HI32(pChannelPbInfo->encryptIvAddr),
|
||||||
|
NVC8B5_SET_ENCRYPT_IV_ADDR_LOWER, NvU64_LO32(pChannelPbInfo->encryptIvAddr));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
NV_PUSH_INC_1U(RM_SUBCHANNEL,
|
||||||
|
NVC8B5_SET_SECURE_COPY_MODE, DRF_DEF(C8B5, _SET_SECURE_COPY_MODE, _MODE, _DECRYPT));
|
||||||
|
|
||||||
|
NV_PUSH_INC_2U(RM_SUBCHANNEL,
|
||||||
|
NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER, NvU64_HI32(pChannelPbInfo->authTagAddr),
|
||||||
|
NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER, NvU64_LO32(pChannelPbInfo->authTagAddr));
|
||||||
|
}
|
||||||
|
|
||||||
|
*ppPtr = pPtr;
|
||||||
|
*pCopyType = FLD_SET_DRF(C8B5, _LAUNCH_DMA, _COPY_TYPE, _SECURE, *pCopyType);
|
||||||
|
return NV_OK;
|
||||||
|
}
|
||||||
|
|
||||||
/** single helper function to fill the push buffer with the methods needed for
|
/** single helper function to fill the push buffer with the methods needed for
|
||||||
* memsetting using CE. This function is much more efficient in the sense it
|
* memsetting using CE. This function is much more efficient in the sense it
|
||||||
* decouples the mem(set/copy) operation from managing channel resources.
|
* decouples the mem(set/copy) operation from managing channel resources.
|
||||||
@@ -607,6 +789,7 @@ channelFillCePb
|
|||||||
CHANNEL_PB_INFO *pChannelPbInfo
|
CHANNEL_PB_INFO *pChannelPbInfo
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
NvU32 copyType = 0;
|
||||||
NvU32 launchType = 0;
|
NvU32 launchType = 0;
|
||||||
NvU32 *pPtr = (NvU32 *)((NvU8 *)pChannel->pbCpuVA + (putIndex * pChannel->methodSizePerBlock));
|
NvU32 *pPtr = (NvU32 *)((NvU8 *)pChannel->pbCpuVA + (putIndex * pChannel->methodSizePerBlock));
|
||||||
NvU32 *pStartPtr = pPtr;
|
NvU32 *pStartPtr = pPtr;
|
||||||
@@ -616,6 +799,9 @@ channelFillCePb
|
|||||||
|
|
||||||
NV_PUSH_INC_1U(RM_SUBCHANNEL, NV906F_SET_OBJECT, pChannel->classEngineID);
|
NV_PUSH_INC_1U(RM_SUBCHANNEL, NV906F_SET_OBJECT, pChannel->classEngineID);
|
||||||
|
|
||||||
|
if (channelPushSecureCopyProperties(pChannel, pChannelPbInfo, ©Type, &pPtr) != NV_OK)
|
||||||
|
return 0;
|
||||||
|
|
||||||
// Side effect - pushed target addresses, aperture and REMAP method for memset
|
// Side effect - pushed target addresses, aperture and REMAP method for memset
|
||||||
launchType = channelPushMemoryProperties(pChannel, pChannelPbInfo, &pPtr);
|
launchType = channelPushMemoryProperties(pChannel, pChannelPbInfo, &pPtr);
|
||||||
|
|
||||||
@@ -635,7 +821,10 @@ channelFillCePb
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Side effect - pushed LAUNCH_DMA methods
|
// Side effect - pushed LAUNCH_DMA methods
|
||||||
channelPushMethod(pChannel, pChannelPbInfo, bPipelined, bInsertFinishPayload, launchType, semaValue, &pPtr);
|
channelPushMethod(pChannel, pChannelPbInfo, bPipelined, bInsertFinishPayload,
|
||||||
|
launchType, semaValue,
|
||||||
|
copyType,
|
||||||
|
&pPtr);
|
||||||
|
|
||||||
channelAddHostSema(pChannel, putIndex, &pPtr);
|
channelAddHostSema(pChannel, putIndex, &pPtr);
|
||||||
|
|
||||||
@@ -899,6 +1088,7 @@ channelPushMethod
|
|||||||
NvBool bInsertFinishPayload,
|
NvBool bInsertFinishPayload,
|
||||||
NvU32 launchType,
|
NvU32 launchType,
|
||||||
NvU32 semaValue,
|
NvU32 semaValue,
|
||||||
|
NvU32 copyType,
|
||||||
NvU32 **ppPtr
|
NvU32 **ppPtr
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
@@ -952,6 +1142,7 @@ channelPushMethod
|
|||||||
launchType |
|
launchType |
|
||||||
pipelinedValue |
|
pipelinedValue |
|
||||||
flushValue |
|
flushValue |
|
||||||
semaValue);
|
semaValue |
|
||||||
|
copyType);
|
||||||
*ppPtr = pPtr;
|
*ppPtr = pPtr;
|
||||||
}
|
}
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -221,6 +221,8 @@ sec2utilsConstruct_IMPL
|
|||||||
pChannel->type = SWL_SCRUBBER_CHANNEL;
|
pChannel->type = SWL_SCRUBBER_CHANNEL;
|
||||||
pChannel->engineType = RM_ENGINE_TYPE_SEC2;
|
pChannel->engineType = RM_ENGINE_TYPE_SEC2;
|
||||||
|
|
||||||
|
pChannel->bSecure = NV_TRUE;
|
||||||
|
|
||||||
// Detect if we can enable fast scrub on this channel
|
// Detect if we can enable fast scrub on this channel
|
||||||
NV_ASSERT_OK_OR_GOTO(status, _sec2GetClass(pGpu, &pSec2Utils->sec2Class), free_client);
|
NV_ASSERT_OK_OR_GOTO(status, _sec2GetClass(pGpu, &pSec2Utils->sec2Class), free_client);
|
||||||
pChannel->sec2Class = pSec2Utils->sec2Class;
|
pChannel->sec2Class = pSec2Utils->sec2Class;
|
||||||
@@ -240,6 +242,8 @@ sec2utilsConstruct_IMPL
|
|||||||
|
|
||||||
pChannel->engineType = NV2080_ENGINE_TYPE_SEC2;
|
pChannel->engineType = NV2080_ENGINE_TYPE_SEC2;
|
||||||
|
|
||||||
|
NV_ASSERT_OK_OR_GOTO(status, channelAllocSubdevice(pGpu, pChannel), free_client);
|
||||||
|
|
||||||
pMemoryManager->bScrubChannelSetupInProgress = NV_TRUE;
|
pMemoryManager->bScrubChannelSetupInProgress = NV_TRUE;
|
||||||
NV_ASSERT_OK_OR_GOTO(status, memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel), free_channel);
|
NV_ASSERT_OK_OR_GOTO(status, memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel), free_channel);
|
||||||
pMemoryManager->bScrubChannelSetupInProgress = NV_FALSE;
|
pMemoryManager->bScrubChannelSetupInProgress = NV_FALSE;
|
||||||
|
@@ -765,7 +765,7 @@ _kmigmgrHandlePostSchedulingEnableCallback
|
|||||||
if ((pKernelMIGManager == NULL) || !kmigmgrIsMIGSupported(pGpu, pKernelMIGManager))
|
if ((pKernelMIGManager == NULL) || !kmigmgrIsMIGSupported(pGpu, pKernelMIGManager))
|
||||||
{
|
{
|
||||||
NV_PRINTF(LEVEL_INFO, "MIG not supported on this GPU.\n");
|
NV_PRINTF(LEVEL_INFO, "MIG not supported on this GPU.\n");
|
||||||
return NV_ERR_NOT_SUPPORTED;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IS_MIG_ENABLED(pGpu) && !IS_VIRTUAL(pGpu) &&
|
if (!IS_MIG_ENABLED(pGpu) && !IS_VIRTUAL(pGpu) &&
|
||||||
|
@@ -49,6 +49,7 @@
|
|||||||
#include "gpu/conf_compute/conf_compute.h"
|
#include "gpu/conf_compute/conf_compute.h"
|
||||||
#include "gpu/gpu_fabric_probe.h"
|
#include "gpu/gpu_fabric_probe.h"
|
||||||
#include "gpu/mig_mgr/gpu_instance_subscription.h"
|
#include "gpu/mig_mgr/gpu_instance_subscription.h"
|
||||||
|
#include "ctrl/ctrlc56f.h"
|
||||||
|
|
||||||
// local static funcs
|
// local static funcs
|
||||||
static void gpumgrSetAttachInfo(OBJGPU *, GPUATTACHARG *);
|
static void gpumgrSetAttachInfo(OBJGPU *, GPUATTACHARG *);
|
||||||
@@ -255,6 +256,8 @@ gpumgrConstruct_IMPL(OBJGPUMGR *pGpuMgr)
|
|||||||
|
|
||||||
portMemSet(pGpuMgr->cachedMIGInfo, 0, sizeof(pGpuMgr->cachedMIGInfo));
|
portMemSet(pGpuMgr->cachedMIGInfo, 0, sizeof(pGpuMgr->cachedMIGInfo));
|
||||||
|
|
||||||
|
pGpuMgr->ccAttackerAdvantage = SECURITY_POLICY_ATTACKER_ADVANTAGE_DEFAULT;
|
||||||
|
|
||||||
return NV_OK;
|
return NV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
NVIDIA_VERSION = 550.76
|
NVIDIA_VERSION = 550.78
|
||||||
|
|
||||||
# This file.
|
# This file.
|
||||||
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))
|
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))
|
||||||
|
Reference in New Issue
Block a user