1 /* SPDX-License-Identifier:    GPL-2.0
2  *
3  * Copyright (C) 2018 Marvell International Ltd.
4  */
5 
6 /**
7  * Atomically adds a signed value to a 64 bit (aligned) memory location,
8  * and returns previous value.
9  *
10  * This version does not perform 'sync' operations to enforce memory
11  * operations.  This should only be used when there are no memory operation
12  * ordering constraints.  (This should NOT be used for reference counting -
13  * use the standard version instead.)
14  *
15  * @param ptr    address in memory to add incr to
16  * @param incr   amount to increment memory location by (signed)
17  *
18  * @return Value of memory location before increment
19  */
atomic_fetch_and_add64_nosync(s64 * ptr,s64 incr)20 static inline s64 atomic_fetch_and_add64_nosync(s64 *ptr, s64 incr)
21 {
22 	s64 result;
23 	/* Atomic add with no ordering */
24 	asm volatile("ldadd %x[i], %x[r], [%[b]]"
25 		     : [r] "=r" (result), "+m" (*ptr)
26 		     : [i] "r" (incr), [b] "r" (ptr)
27 		     : "memory");
28 	return result;
29 }
30 
lmt_cancel(const struct nix * nix)31 static inline void lmt_cancel(const struct nix *nix)
32 {
33 	writeq(0, nix->lmt_base + LMT_LF_LMTCANCEL());
34 }
35 
lmt_store_ptr(struct nix * nix)36 static inline u64 *lmt_store_ptr(struct nix *nix)
37 {
38 	return (u64 *)((u8 *)(nix->lmt_base) +
39 				       LMT_LF_LMTLINEX(0));
40 }
41 
lmt_submit(u64 io_address)42 static inline s64 lmt_submit(u64 io_address)
43 {
44 	s64 result = 0;
45 
46 	asm volatile("ldeor xzr, %x[rf],[%[rs]]"
47 			: [rf] "=r"(result) : [rs] "r"(io_address));
48 	return result;
49 }
50