1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013-2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_FENCE_H__
8 #define __MSM_FENCE_H__
9 
10 #include "msm_drv.h"
11 
12 /**
13  * struct msm_fence_context - fence context for gpu
14  *
15  * Each ringbuffer has a single fence context, with the GPU writing an
16  * incrementing fence seqno at the end of each submit
17  */
18 struct msm_fence_context {
19 	struct drm_device *dev;
20 	/** name: human readable name for fence timeline */
21 	char name[32];
22 	/** context: see dma_fence_context_alloc() */
23 	unsigned context;
24 
25 	/**
26 	 * last_fence:
27 	 *
28 	 * Last assigned fence, incremented each time a fence is created
29 	 * on this fence context.  If last_fence == completed_fence,
30 	 * there is no remaining pending work
31 	 */
32 	uint32_t last_fence;
33 
34 	/**
35 	 * completed_fence:
36 	 *
37 	 * The last completed fence, updated from the CPU after interrupt
38 	 * from GPU
39 	 */
40 	uint32_t completed_fence;
41 
42 	/**
43 	 * fenceptr:
44 	 *
45 	 * The address that the GPU directly writes with completed fence
46 	 * seqno.  This can be ahead of completed_fence.  We can peek at
47 	 * this to see if a fence has already signaled but the CPU hasn't
48 	 * gotten around to handling the irq and updating completed_fence
49 	 */
50 	volatile uint32_t *fenceptr;
51 
52 	spinlock_t spinlock;
53 };
54 
55 struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
56 		volatile uint32_t *fenceptr, const char *name);
57 void msm_fence_context_free(struct msm_fence_context *fctx);
58 
59 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
60 
61 struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
62 
63 #endif
64