[COMMIT osv master] ena: convert ena_com/ena_com.cc to C++ and eliminate unneeded code

1 view
Skip to first unread message

Commit Bot

unread,
Jan 11, 2024, 12:43:11 PMJan 11
to osv...@googlegroups.com, Waldemar Kozaczuk
From: Waldemar Kozaczuk <jwkoz...@gmail.com>
Committer: WALDEMAR KOZACZUK <jwkoz...@gmail.com>
Branch: master

ena: convert ena_com/ena_com.cc to C++ and eliminate unneeded code

The ena_com.cc is the 2nd of the 2 source files that make up
a low-level ena_com API. This part is used in the intermediate level
to mainly implement the admin functionality like for example
creating I/O queues. See https://github.com/amzn/amzn-drivers/tree/master/kernel/fbsd/ena#ena-source-code-directory-structure
for more insight.

This patch:

- uses C++ constructs to apply type conversions where necessary.

- eliminates the MSI-X interrupt-based logic to handle completions
of admin commands (see ena_com_wait_and_process_admin_cq_interrupts())
and leaves the polling mode logic the default one

- eliminates the RSS (Receive-Side Scaling) related code for now

- implements busy_sleep() used by ENA_USLEEP and EN_UDELAT macros

Signed-off-by: Waldemar Kozaczuk <jwkoz...@gmail.com>

---
diff --git a/bsd/sys/contrib/ena_com/ena_com.cc b/bsd/sys/contrib/ena_com/ena_com.cc
--- a/bsd/sys/contrib/ena_com/ena_com.cc
+++ b/bsd/sys/contrib/ena_com/ena_com.cc
@@ -32,6 +32,8 @@
*/

#include "ena_com.h"
+#include <sys/mutex.h>
+#include <sys/cddl/compat/opensolaris/sys/kcondvar.h>

/*****************************************************************************/
/*****************************************************************************/
@@ -74,6 +76,22 @@

#define ENA_MAX_ADMIN_POLL_US 5000

+#include <osv/mmu.hh>
+#include <osv/mempool.hh>
+
+static inline void busy_sleep(u64 nanoseconds)
+{
+ auto end = osv::clock::uptime::now().time_since_epoch().count() + nanoseconds;
+ while (osv::clock::uptime::now().time_since_epoch().count() < end) {
+#ifdef __x86_64__
+ __asm __volatile("pause");
+#endif
+#ifdef __aarch64__
+ __asm __volatile("isb sy");
+#endif
+ }
+}
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -86,7 +104,6 @@ enum ena_cmd_status {
};

struct ena_comp_ctx {
- ena_wait_event_t wait_event;
struct ena_admin_acq_entry *user_cqe;
u32 comp_size;
enum ena_cmd_status status;
@@ -118,11 +135,10 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,

static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);

- ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(admin_queue->q_dmadev), size, sq->entries, sq->dma_addr,
sq->mem_handle);

if (!sq->entries) {
@@ -141,11 +157,10 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)

static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);

- ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(admin_queue->q_dmadev), size, cq->entries, cq->dma_addr,
cq->mem_handle);

if (!cq->entries) {
@@ -168,7 +183,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,

ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev), size,
aenq->entries,
aenq->dma_addr,
aenq->mem_handle);
@@ -261,7 +276,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
if (cnt >= admin_queue->q_depth) {
ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
- return ERR_PTR(ENA_COM_NO_SPACE);
+ return static_cast<ena_comp_ctx *>(ERR_PTR(ENA_COM_NO_SPACE));
}

cmd_id = admin_queue->curr_cmd_id;
@@ -274,15 +289,13 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu

comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
if (unlikely(!comp_ctx))
- return ERR_PTR(ENA_COM_INVAL);
+ return static_cast<ena_comp_ctx *>(ERR_PTR(ENA_COM_INVAL));

comp_ctx->status = ENA_CMD_SUBMITTED;
comp_ctx->comp_size = (u32)comp_size_in_bytes;
comp_ctx->user_cqe = comp;
comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;

- ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
-
memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);

admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
@@ -303,23 +316,14 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu

static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_dev *ena_dev = admin_queue->ena_dev;
size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
- struct ena_comp_ctx *comp_ctx;
- u16 i;

- admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
+ admin_queue->comp_ctx = static_cast<ena_comp_ctx *>(ENA_MEM_ALLOC(admin_queue->q_dmadev, size));
if (unlikely(!admin_queue->comp_ctx)) {
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}

- for (i = 0; i < admin_queue->q_depth; i++) {
- comp_ctx = get_comp_ctxt(admin_queue, i, false);
- if (comp_ctx)
- ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
- }
-
return 0;
}

@@ -335,7 +339,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
if (unlikely(!admin_queue->running_state)) {
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- return ERR_PTR(ENA_COM_NO_DEVICE);
+ return static_cast<ena_comp_ctx *>(ERR_PTR(ENA_COM_NO_DEVICE));
}
comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
cmd_size_in_bytes,
@@ -367,15 +371,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->bus = ena_dev->bus;

if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT_NODE(static_cast<device_t>(ena_dev->dmadev),
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle,
ctx->numa_node,
dev_node);
if (!io_sq->desc_addr.virt_addr) {
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev),
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
@@ -405,7 +409,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ctx->numa_node,
dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
- io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ io_sq->bounce_buf_ctrl.base_buffer = static_cast<uint8_t*>(ENA_MEM_ALLOC(ena_dev->dmadev, size));

if (!io_sq->bounce_buf_ctrl.base_buffer) {
ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
@@ -455,7 +459,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
io_cq->bus = ena_dev->bus;

- ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(static_cast<device_t>(ena_dev->dmadev),
size,
io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr,
@@ -464,7 +468,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
prev_node,
ENA_CDESC_RING_SIZE_ALIGNMENT);
if (!io_cq->cdesc_addr.virt_addr) {
- ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT_ALIGNED(static_cast<device_t>(ena_dev->dmadev),
size,
io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr,
@@ -505,9 +509,6 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a

if (comp_ctx->user_cqe)
memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
-
- if (!admin_queue->polling)
- ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
}

static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
@@ -803,55 +804,6 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
return rc;
}

-static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
-{
- unsigned long flags = 0;
- int ret;
-
- ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
- admin_queue->completion_timeout);
-
- /* In case the command wasn't completed find out the root cause.
- * There might be 2 kinds of errors
- * 1) No completion (timeout reached)
- * 2) There is completion but the device didn't get any msi-x interrupt.
- */
- if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
- ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- admin_queue->stats.no_completion++;
- ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
-
- if (comp_ctx->status == ENA_CMD_COMPLETED) {
- ena_trc_err(admin_queue->ena_dev,
- "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
- /* Check if fallback to polling is enabled */
- if (admin_queue->auto_polling)
- admin_queue->polling = true;
- } else {
- ena_trc_err(admin_queue->ena_dev,
- "The ena device didn't send a completion for the admin cmd %d status %d\n",
- comp_ctx->cmd_opcode, comp_ctx->status);
- }
- /* Check if shifted to polling mode.
- * This will happen if there is a completion without an interrupt
- * and autopolling mode is enabled. Continuing normal execution in such case
- */
- if (!admin_queue->polling) {
- admin_queue->running_state = false;
- ret = ENA_COM_TIMER_EXPIRED;
- goto err;
- }
- }
-
- ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
-err:
- comp_ctxt_release(admin_queue, comp_ctx);
- return ret;
-}
-
/* This method read the hardware device register through posting writes
* and waiting for response
* On timeout the function will return ENA_MMIO_READ_TIMEOUT
@@ -925,12 +877,8 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- if (admin_queue->polling)
- return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
- admin_queue);
-
- return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
- admin_queue);
+ return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
+ admin_queue);
}

static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
@@ -1124,7 +1072,8 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
struct ena_admin_feature_rss_flow_hash_control *hash_key =
(ena_dev->rss).hash_key;

- ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
+ //We should not need RSS support for now at least
+ //ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
/* The key buffer is stored in the device in an array of
* uint32 elements.
*/
@@ -1138,7 +1087,7 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
return ENA_COM_UNSUPPORTED;

- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev),
sizeof(*rss->hash_key),
rss->hash_key,
rss->hash_key_dma_addr,
@@ -1167,7 +1116,7 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;

- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev),
sizeof(*rss->hash_ctrl),
rss->hash_ctrl,
rss->hash_ctrl_dma_addr,
@@ -1217,7 +1166,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);

- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev),
tbl_size,
rss->rss_ind_tbl,
rss->rss_ind_tbl_dma_addr,
@@ -1227,7 +1176,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,

tbl_size = (1ULL << log_size) * sizeof(u16);
rss->host_rss_ind_tbl =
- ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
+ static_cast<uint16_t *>(ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size));
if (unlikely(!rss->host_rss_ind_tbl))
goto mem_err2;

@@ -1523,8 +1472,6 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
break;

comp_ctx->status = ENA_CMD_ABORTED;
-
- ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
}
}

@@ -1716,7 +1663,6 @@ ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
if (!admin_queue->comp_ctx)
return;

- ENA_WAIT_EVENTS_DESTROY(admin_queue);
ENA_MEM_FREE(ena_dev->dmadev,
admin_queue->comp_ctx,
(admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
@@ -1782,7 +1728,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;

ENA_SPINLOCK_INIT(mmio_read->lock);
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev),
sizeof(*mmio_read->read_resp),
mmio_read->read_resp,
mmio_read->read_resp_dma_addr,
@@ -2330,7 +2276,7 @@ int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
dma_addr_t phys_addr;
int ret;

- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev), len,
virt_addr, phys_addr, mem_handle);
if (!virt_addr) {
ret = ENA_COM_NO_MEM;
@@ -2535,33 +2481,6 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
}

-int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func)
-{
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- int rc;
-
- if (unlikely(!func))
- return ENA_COM_INVAL;
-
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION,
- rss->hash_key_dma_addr,
- sizeof(*rss->hash_key), 0);
- if (unlikely(rc))
- return rc;
-
- /* ENA_FFS() returns 1 in case the lsb is set */
- rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
- if (rss->hash_func)
- rss->hash_func--;
-
- *func = rss->hash_func;
-
- return 0;
-}
-
int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
{
struct ena_admin_feature_rss_flow_hash_control *hash_key =
@@ -2641,104 +2560,6 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
return ret;
}

-int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
-{
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_hash_control *hash_ctrl =
- rss->hash_ctrl;
- u16 available_fields = 0;
- int rc, i;
-
- /* Get the supported hash input */
- rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
- if (unlikely(rc))
- return rc;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
- ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
- ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
-
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
- ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
-
- for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
- available_fields = hash_ctrl->selected_fields[i].fields &
- hash_ctrl->supported_fields[i].fields;
- if (available_fields != hash_ctrl->selected_fields[i].fields) {
- ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
- i, hash_ctrl->supported_fields[i].fields,
- hash_ctrl->selected_fields[i].fields);
- return ENA_COM_UNSUPPORTED;
- }
- }
-
- rc = ena_com_set_hash_ctrl(ena_dev);
-
- /* In case of failure, restore the old hash ctrl */
- if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, 0, NULL);
-
- return rc;
-}
-
-int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
- enum ena_admin_flow_hash_proto proto,
- u16 hash_fields)
-{
- struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
- u16 supported_fields;
- int rc;
-
- if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
- return ENA_COM_INVAL;
- }
-
- /* Get the ctrl table */
- rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
- if (unlikely(rc))
- return rc;
-
- /* Make sure all the fields are supported */
- supported_fields = hash_ctrl->supported_fields[proto].fields;
- if ((hash_fields & supported_fields) != hash_fields) {
- ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
- proto, hash_fields, supported_fields);
- }
-
- hash_ctrl->selected_fields[proto].fields = hash_fields;
-
- rc = ena_com_set_hash_ctrl(ena_dev);
-
- /* In case of failure, restore the old hash ctrl */
- if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, 0, NULL);
-
- return 0;
-}
-
int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
u16 entry_idx, u16 entry_value)
{
@@ -2882,7 +2703,7 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;

- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev),
SZ_4K,
host_attr->host_info,
host_attr->host_info_dma_addr,
@@ -2902,7 +2723,7 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;

- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ ENA_MEM_ALLOC_COHERENT(static_cast<device_t>(ena_dev->dmadev),
debug_area_size,
host_attr->debug_area_virt_addr,
host_attr->debug_area_dma_addr,
Reply all
Reply to author
Forward
0 new messages