DPDK  17.11.4
rte_ring.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Derived from FreeBSD's bufring.h
36  *
37  **************************************************************************
38  *
39  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions are met:
44  *
45  * 1. Redistributions of source code must retain the above copyright notice,
46  * this list of conditions and the following disclaimer.
47  *
48  * 2. The name of Kip Macy nor the names of other
49  * contributors may be used to endorse or promote products derived from
50  * this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62  * POSSIBILITY OF SUCH DAMAGE.
63  *
64  ***************************************************************************/
65 
66 #ifndef _RTE_RING_H_
67 #define _RTE_RING_H_
68 
90 #ifdef __cplusplus
91 extern "C" {
92 #endif
93 
94 #include <stdio.h>
95 #include <stdint.h>
96 #include <sys/queue.h>
97 #include <errno.h>
98 #include <rte_common.h>
99 #include <rte_config.h>
100 #include <rte_memory.h>
101 #include <rte_lcore.h>
102 #include <rte_atomic.h>
103 #include <rte_branch_prediction.h>
104 #include <rte_memzone.h>
105 #include <rte_pause.h>
106 
107 #define RTE_TAILQ_RING_NAME "RTE_RING"
108 
109 enum rte_ring_queue_behavior {
110  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
111  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
112 };
113 
114 #define RTE_RING_MZ_PREFIX "RG_"
115 
116 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
117  sizeof(RTE_RING_MZ_PREFIX) + 1)
118 
119 struct rte_memzone; /* forward declaration, so as not to require memzone.h */
120 
121 #if RTE_CACHE_LINE_SIZE < 128
122 #define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
123 #define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
124 #else
125 #define PROD_ALIGN RTE_CACHE_LINE_SIZE
126 #define CONS_ALIGN RTE_CACHE_LINE_SIZE
127 #endif
128 
129 /* structure to hold a pair of head/tail values and other metadata */
130 struct rte_ring_headtail {
131  volatile uint32_t head;
132  volatile uint32_t tail;
133  uint32_t single;
134 };
135 
146 struct rte_ring {
147  /*
148  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
149  * compatibility requirements, it could be changed to RTE_RING_NAMESIZE
150  * next time the ABI changes
151  */
153  int flags;
154  const struct rte_memzone *memzone;
156  uint32_t size;
157  uint32_t mask;
158  uint32_t capacity;
161  struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
162 
164  struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
165 };
166 
167 #define RING_F_SP_ENQ 0x0001
168 #define RING_F_SC_DEQ 0x0002
177 #define RING_F_EXACT_SZ 0x0004
178 #define RTE_RING_SZ_MASK (0x7fffffffU)
180 /* @internal defines for passing to the enqueue dequeue worker functions */
181 #define __IS_SP 1
182 #define __IS_MP 0
183 #define __IS_SC 1
184 #define __IS_MC 0
185 
200 ssize_t rte_ring_get_memsize(unsigned count);
201 
236 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
237  unsigned flags);
238 
278 struct rte_ring *rte_ring_create(const char *name, unsigned count,
279  int socket_id, unsigned flags);
286 void rte_ring_free(struct rte_ring *r);
287 
296 void rte_ring_dump(FILE *f, const struct rte_ring *r);
297 
298 /* the actual enqueue of pointers on the ring.
299  * Placed here since identical code needed in both
300  * single and multi producer enqueue functions */
301 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
302  unsigned int i; \
303  const uint32_t size = (r)->size; \
304  uint32_t idx = prod_head & (r)->mask; \
305  obj_type *ring = (obj_type *)ring_start; \
306  if (likely(idx + n < size)) { \
307  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
308  ring[idx] = obj_table[i]; \
309  ring[idx+1] = obj_table[i+1]; \
310  ring[idx+2] = obj_table[i+2]; \
311  ring[idx+3] = obj_table[i+3]; \
312  } \
313  switch (n & 0x3) { \
314  case 3: \
315  ring[idx++] = obj_table[i++]; /* fallthrough */ \
316  case 2: \
317  ring[idx++] = obj_table[i++]; /* fallthrough */ \
318  case 1: \
319  ring[idx++] = obj_table[i++]; \
320  } \
321  } else { \
322  for (i = 0; idx < size; i++, idx++)\
323  ring[idx] = obj_table[i]; \
324  for (idx = 0; i < n; i++, idx++) \
325  ring[idx] = obj_table[i]; \
326  } \
327 } while (0)
328 
329 /* the actual copy of pointers on the ring to obj_table.
330  * Placed here since identical code needed in both
331  * single and multi consumer dequeue functions */
332 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
333  unsigned int i; \
334  uint32_t idx = cons_head & (r)->mask; \
335  const uint32_t size = (r)->size; \
336  obj_type *ring = (obj_type *)ring_start; \
337  if (likely(idx + n < size)) { \
338  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
339  obj_table[i] = ring[idx]; \
340  obj_table[i+1] = ring[idx+1]; \
341  obj_table[i+2] = ring[idx+2]; \
342  obj_table[i+3] = ring[idx+3]; \
343  } \
344  switch (n & 0x3) { \
345  case 3: \
346  obj_table[i++] = ring[idx++]; /* fallthrough */ \
347  case 2: \
348  obj_table[i++] = ring[idx++]; /* fallthrough */ \
349  case 1: \
350  obj_table[i++] = ring[idx++]; \
351  } \
352  } else { \
353  for (i = 0; idx < size; i++, idx++) \
354  obj_table[i] = ring[idx]; \
355  for (idx = 0; i < n; i++, idx++) \
356  obj_table[i] = ring[idx]; \
357  } \
358 } while (0)
359 
360 static __rte_always_inline void
361 update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
362  uint32_t single)
363 {
364  /*
365  * If there are other enqueues/dequeues in progress that preceded us,
366  * we need to wait for them to complete
367  */
368  if (!single)
369  while (unlikely(ht->tail != old_val))
370  rte_pause();
371 
372  ht->tail = new_val;
373 }
374 
398 static __rte_always_inline unsigned int
399 __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
400  unsigned int n, enum rte_ring_queue_behavior behavior,
401  uint32_t *old_head, uint32_t *new_head,
402  uint32_t *free_entries)
403 {
404  const uint32_t capacity = r->capacity;
405  unsigned int max = n;
406  int success;
407 
408  do {
409  /* Reset n to the initial burst count */
410  n = max;
411 
412  *old_head = r->prod.head;
413 
414  /* add rmb barrier to avoid load/load reorder in weak
415  * memory model. It is noop on x86
416  */
417  rte_smp_rmb();
418 
419  const uint32_t cons_tail = r->cons.tail;
420  /*
421  * The subtraction is done between two unsigned 32bits value
422  * (the result is always modulo 32 bits even if we have
423  * *old_head > cons_tail). So 'free_entries' is always between 0
424  * and capacity (which is < size).
425  */
426  *free_entries = (capacity + cons_tail - *old_head);
427 
428  /* check that we have enough room in ring */
429  if (unlikely(n > *free_entries))
430  n = (behavior == RTE_RING_QUEUE_FIXED) ?
431  0 : *free_entries;
432 
433  if (n == 0)
434  return 0;
435 
436  *new_head = *old_head + n;
437  if (is_sp)
438  r->prod.head = *new_head, success = 1;
439  else
440  success = rte_atomic32_cmpset(&r->prod.head,
441  *old_head, *new_head);
442  } while (unlikely(success == 0));
443  return n;
444 }
445 
466 static __rte_always_inline unsigned int
467 __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
468  unsigned int n, enum rte_ring_queue_behavior behavior,
469  int is_sp, unsigned int *free_space)
470 {
471  uint32_t prod_head, prod_next;
472  uint32_t free_entries;
473 
474  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
475  &prod_head, &prod_next, &free_entries);
476  if (n == 0)
477  goto end;
478 
479  ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
480  rte_smp_wmb();
481 
482  update_tail(&r->prod, prod_head, prod_next, is_sp);
483 end:
484  if (free_space != NULL)
485  *free_space = free_entries - n;
486  return n;
487 }
488 
512 static __rte_always_inline unsigned int
513 __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
514  unsigned int n, enum rte_ring_queue_behavior behavior,
515  uint32_t *old_head, uint32_t *new_head,
516  uint32_t *entries)
517 {
518  unsigned int max = n;
519  int success;
520 
521  /* move cons.head atomically */
522  do {
523  /* Restore n as it may change every loop */
524  n = max;
525 
526  *old_head = r->cons.head;
527 
528  /* add rmb barrier to avoid load/load reorder in weak
529  * memory model. It is noop on x86
530  */
531  rte_smp_rmb();
532 
533  const uint32_t prod_tail = r->prod.tail;
534  /* The subtraction is done between two unsigned 32bits value
535  * (the result is always modulo 32 bits even if we have
536  * cons_head > prod_tail). So 'entries' is always between 0
537  * and size(ring)-1. */
538  *entries = (prod_tail - *old_head);
539 
540  /* Set the actual entries for dequeue */
541  if (n > *entries)
542  n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
543 
544  if (unlikely(n == 0))
545  return 0;
546 
547  *new_head = *old_head + n;
548  if (is_sc)
549  r->cons.head = *new_head, success = 1;
550  else
551  success = rte_atomic32_cmpset(&r->cons.head, *old_head,
552  *new_head);
553  } while (unlikely(success == 0));
554  return n;
555 }
556 
577 static __rte_always_inline unsigned int
578 __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
579  unsigned int n, enum rte_ring_queue_behavior behavior,
580  int is_sc, unsigned int *available)
581 {
582  uint32_t cons_head, cons_next;
583  uint32_t entries;
584 
585  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
586  &cons_head, &cons_next, &entries);
587  if (n == 0)
588  goto end;
589 
590  DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
591  rte_smp_rmb();
592 
593  update_tail(&r->cons, cons_head, cons_next, is_sc);
594 
595 end:
596  if (available != NULL)
597  *available = entries - n;
598  return n;
599 }
600 
619 static __rte_always_inline unsigned int
620 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
621  unsigned int n, unsigned int *free_space)
622 {
623  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
624  __IS_MP, free_space);
625 }
626 
642 static __rte_always_inline unsigned int
643 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
644  unsigned int n, unsigned int *free_space)
645 {
646  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
647  __IS_SP, free_space);
648 }
649 
669 static __rte_always_inline unsigned int
670 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
671  unsigned int n, unsigned int *free_space)
672 {
673  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
674  r->prod.single, free_space);
675 }
676 
691 static __rte_always_inline int
692 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
693 {
694  return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
695 }
696 
708 static __rte_always_inline int
709 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
710 {
711  return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
712 }
713 
729 static __rte_always_inline int
730 rte_ring_enqueue(struct rte_ring *r, void *obj)
731 {
732  return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
733 }
734 
753 static __rte_always_inline unsigned int
754 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
755  unsigned int n, unsigned int *available)
756 {
757  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
758  __IS_MC, available);
759 }
760 
777 static __rte_always_inline unsigned int
778 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
779  unsigned int n, unsigned int *available)
780 {
781  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
782  __IS_SC, available);
783 }
784 
804 static __rte_always_inline unsigned int
805 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
806  unsigned int *available)
807 {
808  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
809  r->cons.single, available);
810 }
811 
827 static __rte_always_inline int
828 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
829 {
830  return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
831 }
832 
845 static __rte_always_inline int
846 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
847 {
848  return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
849 }
850 
867 static __rte_always_inline int
868 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
869 {
870  return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
871 }
872 
881 static inline unsigned
882 rte_ring_count(const struct rte_ring *r)
883 {
884  uint32_t prod_tail = r->prod.tail;
885  uint32_t cons_tail = r->cons.tail;
886  uint32_t count = (prod_tail - cons_tail) & r->mask;
887  return (count > r->capacity) ? r->capacity : count;
888 }
889 
898 static inline unsigned
900 {
901  return r->capacity - rte_ring_count(r);
902 }
903 
913 static inline int
914 rte_ring_full(const struct rte_ring *r)
915 {
916  return rte_ring_free_count(r) == 0;
917 }
918 
928 static inline int
929 rte_ring_empty(const struct rte_ring *r)
930 {
931  return rte_ring_count(r) == 0;
932 }
933 
944 static inline unsigned int
945 rte_ring_get_size(const struct rte_ring *r)
946 {
947  return r->size;
948 }
949 
958 static inline unsigned int
960 {
961  return r->capacity;
962 }
963 
970 void rte_ring_list_dump(FILE *f);
971 
982 struct rte_ring *rte_ring_lookup(const char *name);
983 
1002 static __rte_always_inline unsigned
1003 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1004  unsigned int n, unsigned int *free_space)
1005 {
1006  return __rte_ring_do_enqueue(r, obj_table, n,
1007  RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
1008 }
1009 
1025 static __rte_always_inline unsigned
1026 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1027  unsigned int n, unsigned int *free_space)
1028 {
1029  return __rte_ring_do_enqueue(r, obj_table, n,
1030  RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
1031 }
1032 
1052 static __rte_always_inline unsigned
1053 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1054  unsigned int n, unsigned int *free_space)
1055 {
1056  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
1057  r->prod.single, free_space);
1058 }
1059 
1080 static __rte_always_inline unsigned
1081 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
1082  unsigned int n, unsigned int *available)
1083 {
1084  return __rte_ring_do_dequeue(r, obj_table, n,
1085  RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
1086 }
1087 
1105 static __rte_always_inline unsigned
1106 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
1107  unsigned int n, unsigned int *available)
1108 {
1109  return __rte_ring_do_dequeue(r, obj_table, n,
1110  RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
1111 }
1112 
1132 static __rte_always_inline unsigned
1133 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
1134  unsigned int n, unsigned int *available)
1135 {
1136  return __rte_ring_do_dequeue(r, obj_table, n,
1137  RTE_RING_QUEUE_VARIABLE,
1138  r->cons.single, available);
1139 }
1140 
1141 #ifdef __cplusplus
1142 }
1143 #endif
1144 
1145 #endif /* _RTE_RING_H_ */
static void rte_smp_rmb(void)
static int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
#define __rte_always_inline
Definition: rte_common.h:150
const struct rte_memzone * memzone
Definition: rte_ring.h:154
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:670
int flags
Definition: rte_ring.h:153
char name [RTE_MEMZONE_NAMESIZE] __rte_cache_aligned
Definition: rte_ring.h:152
static __rte_always_inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:868
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:1081
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN)
static int rte_ring_empty(const struct rte_ring *r)
Definition: rte_ring.h:929
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:1003
static __rte_always_inline int rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:828
void rte_ring_list_dump(FILE *f)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:620
#define unlikely(x)
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:709
static __rte_always_inline int rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:692
static unsigned int rte_ring_get_capacity(const struct rte_ring *r)
Definition: rte_ring.h:959
static unsigned int rte_ring_get_size(const struct rte_ring *r)
Definition: rte_ring.h:945
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:846
uint32_t size
Definition: rte_ring.h:156
void rte_ring_free(struct rte_ring *r)
static void rte_pause(void)
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:1026
static void rte_smp_wmb(void)
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:1106
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:1133
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:643
void rte_ring_dump(FILE *f, const struct rte_ring *r)
static unsigned rte_ring_count(const struct rte_ring *r)
Definition: rte_ring.h:882
uint32_t mask
Definition: rte_ring.h:157
struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:805
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:778
struct rte_ring * rte_ring_lookup(const char *name)
uint32_t capacity
Definition: rte_ring.h:158
static unsigned rte_ring_free_count(const struct rte_ring *r)
Definition: rte_ring.h:899
static __rte_always_inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:730
static __rte_always_inline unsigned rte_ring_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:1053
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:754
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count, unsigned flags)
static int rte_ring_full(const struct rte_ring *r)
Definition: rte_ring.h:914
ssize_t rte_ring_get_memsize(unsigned count)
#define RTE_MEMZONE_NAMESIZE
Definition: rte_memzone.h:78