DPDK 21.11.0
rte_lpm.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
4 */
5
6#ifndef _RTE_LPM_H_
7#define _RTE_LPM_H_
8
14#include <errno.h>
15#include <sys/queue.h>
16#include <stdint.h>
17#include <stdlib.h>
19#include <rte_byteorder.h>
20#include <rte_config.h>
21#include <rte_memory.h>
22#include <rte_common.h>
23#include <rte_vect.h>
24#include <rte_rcu_qsbr.h>
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
31#define RTE_LPM_NAMESIZE 32
32
34#define RTE_LPM_MAX_DEPTH 32
35
37#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
38
40#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
41
43#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
44
46#define RTE_LPM_TBL8_NUM_GROUPS 256
47
49#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
50 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
51
53#if defined(RTE_LIBRTE_LPM_DEBUG)
54#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
55 if (cond) return (retval); \
56} while (0)
57#else
58#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
59#endif
60
62#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
63
65#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
66
68#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
69
76};
77
78#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
80__extension__
81struct rte_lpm_tbl_entry {
87 uint32_t next_hop :24;
88 /* Using single uint8_t to store 3 values. */
89 uint32_t valid :1;
97 uint32_t valid_group :1;
98 uint32_t depth :6;
99};
100
101#else
102
103__extension__
104struct rte_lpm_tbl_entry {
105 uint32_t depth :6;
106 uint32_t valid_group :1;
107 uint32_t valid :1;
108 uint32_t next_hop :24;
109
110};
111
112#endif
113
116 uint32_t max_rules;
117 uint32_t number_tbl8s;
118 int flags;
119};
120
122struct rte_lpm {
123 /* LPM Tables. */
124 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
126 struct rte_lpm_tbl_entry *tbl8;
127};
128
131 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
132 /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
133 * '0' for default: create defer queue for reclaim.
134 */
135 enum rte_lpm_qsbr_mode mode;
136 uint32_t dq_size; /* RCU defer queue size.
137 * default: lpm->number_tbl8s.
138 */
139 uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
140 uint32_t reclaim_max; /* Max entries to reclaim in one go.
141 * default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
142 */
143};
144
164struct rte_lpm *
165rte_lpm_create(const char *name, int socket_id,
166 const struct rte_lpm_config *config);
167
178struct rte_lpm *
179rte_lpm_find_existing(const char *name);
180
189void
190rte_lpm_free(struct rte_lpm *lpm);
191
210__rte_experimental
211int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg);
212
227int
228rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
229
245int
246rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
247uint32_t *next_hop);
248
261int
262rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
263
270void
271rte_lpm_delete_all(struct rte_lpm *lpm);
272
285static inline int
286rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
287{
288 unsigned tbl24_index = (ip >> 8);
289 uint32_t tbl_entry;
290 const uint32_t *ptbl;
291
292 /* DEBUG: Check user input arguments. */
293 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
294
295 /* Copy tbl24 entry */
296 ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
297 tbl_entry = *ptbl;
298
299 /* Memory ordering is not required in lookup. Because dataflow
300 * dependency exists, compiler or HW won't be able to re-order
301 * the operations.
302 */
303 /* Copy tbl8 entry (only if needed) */
304 if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
305 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
306
307 unsigned tbl8_index = (uint8_t)ip +
308 (((uint32_t)tbl_entry & 0x00FFFFFF) *
309 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
310
311 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
312 tbl_entry = *ptbl;
313 }
314
315 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
316 return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
317}
318
339#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
340 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
341
342static inline int
343rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
344 uint32_t *next_hops, const unsigned n)
345{
346 unsigned i;
347 unsigned tbl24_indexes[n];
348 const uint32_t *ptbl;
349
350 /* DEBUG: Check user input arguments. */
351 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
352 (next_hops == NULL)), -EINVAL);
353
354 for (i = 0; i < n; i++) {
355 tbl24_indexes[i] = ips[i] >> 8;
356 }
357
358 for (i = 0; i < n; i++) {
359 /* Simply copy tbl24 entry to output */
360 ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
361 next_hops[i] = *ptbl;
362
363 /* Overwrite output with tbl8 entry if needed */
364 if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
365 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
366
367 unsigned tbl8_index = (uint8_t)ips[i] +
368 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
369 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
370
371 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
372 next_hops[i] = *ptbl;
373 }
374 }
375 return 0;
376}
377
378/* Mask four results. */
379#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
380
400static inline void
401rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
402 uint32_t defv);
403
404#if defined(RTE_ARCH_ARM)
405#ifdef RTE_HAS_SVE_ACLE
406#include "rte_lpm_sve.h"
407#else
408#include "rte_lpm_neon.h"
409#endif
410#elif defined(RTE_ARCH_PPC_64)
411#include "rte_lpm_altivec.h"
412#else
413#include "rte_lpm_sse.h"
414#endif
415
416#ifdef __cplusplus
417}
418#endif
419
420#endif /* _RTE_LPM_H_ */
#define unlikely(x)
#define __rte_cache_aligned
Definition: rte_common.h:402
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
static int rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
Definition: rte_lpm.h:286
__rte_experimental int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
void rte_lpm_free(struct rte_lpm *lpm)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
#define RTE_LPM_LOOKUP_SUCCESS
Definition: rte_lpm.h:65
rte_lpm_qsbr_mode
Definition: rte_lpm.h:71
@ RTE_LPM_QSBR_MODE_DQ
Definition: rte_lpm.h:73
@ RTE_LPM_QSBR_MODE_SYNC
Definition: rte_lpm.h:75
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
struct rte_lpm * rte_lpm_find_existing(const char *name)
uint32_t number_tbl8s
Definition: rte_lpm.h:117
uint32_t max_rules
Definition: rte_lpm.h:116