1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <core/option.h>
27 #include <subdev/top.h>
28
29 void
nvkm_mc_unk260(struct nvkm_device * device,u32 data)30 nvkm_mc_unk260(struct nvkm_device *device, u32 data)
31 {
32 struct nvkm_mc *mc = device->mc;
33 if (likely(mc) && mc->func->unk260)
34 mc->func->unk260(mc, data);
35 }
36
37 void
nvkm_mc_intr_mask(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,bool en)38 nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, bool en)
39 {
40 struct nvkm_mc *mc = device->mc;
41 const struct nvkm_mc_map *map;
42 if (likely(mc) && mc->func->intr_mask) {
43 u32 mask = nvkm_top_intr_mask(device, type, inst);
44 for (map = mc->func->intr; !mask && map->stat; map++) {
45 if (map->type == type && map->inst == inst)
46 mask = map->stat;
47 }
48 mc->func->intr_mask(mc, mask, en ? mask : 0);
49 }
50 }
51
52 void
nvkm_mc_intr_unarm(struct nvkm_device * device)53 nvkm_mc_intr_unarm(struct nvkm_device *device)
54 {
55 struct nvkm_mc *mc = device->mc;
56 if (likely(mc))
57 mc->func->intr_unarm(mc);
58 }
59
60 void
nvkm_mc_intr_rearm(struct nvkm_device * device)61 nvkm_mc_intr_rearm(struct nvkm_device *device)
62 {
63 struct nvkm_mc *mc = device->mc;
64 if (likely(mc))
65 mc->func->intr_rearm(mc);
66 }
67
68 static u32
nvkm_mc_intr_stat(struct nvkm_mc * mc)69 nvkm_mc_intr_stat(struct nvkm_mc *mc)
70 {
71 u32 intr = mc->func->intr_stat(mc);
72 if (WARN_ON_ONCE(intr == 0xffffffff))
73 intr = 0; /* likely fallen off the bus */
74 return intr;
75 }
76
77 void
nvkm_mc_intr(struct nvkm_device * device,bool * handled)78 nvkm_mc_intr(struct nvkm_device *device, bool *handled)
79 {
80 struct nvkm_mc *mc = device->mc;
81 struct nvkm_top *top = device->top;
82 struct nvkm_top_device *tdev;
83 struct nvkm_subdev *subdev;
84 const struct nvkm_mc_map *map;
85 u32 stat, intr;
86
87 if (unlikely(!mc))
88 return;
89
90 stat = intr = nvkm_mc_intr_stat(mc);
91
92 if (top) {
93 list_for_each_entry(tdev, &top->device, head) {
94 if (tdev->intr >= 0 && (stat & BIT(tdev->intr))) {
95 subdev = nvkm_device_subdev(device, tdev->type, tdev->inst);
96 if (subdev) {
97 nvkm_subdev_intr(subdev);
98 stat &= ~BIT(tdev->intr);
99 if (!stat)
100 break;
101 }
102 }
103 }
104 }
105
106 for (map = mc->func->intr; map->stat; map++) {
107 if (intr & map->stat) {
108 subdev = nvkm_device_subdev(device, map->type, map->inst);
109 if (subdev)
110 nvkm_subdev_intr(subdev);
111 stat &= ~map->stat;
112 }
113 }
114
115 if (stat)
116 nvkm_error(&mc->subdev, "intr %08x\n", stat);
117 *handled = intr != 0;
118 }
119
120 static u32
nvkm_mc_reset_mask(struct nvkm_device * device,bool isauto,enum nvkm_subdev_type type,int inst)121 nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, enum nvkm_subdev_type type, int inst)
122 {
123 struct nvkm_mc *mc = device->mc;
124 const struct nvkm_mc_map *map;
125 u64 pmc_enable = 0;
126 if (likely(mc)) {
127 if (!(pmc_enable = nvkm_top_reset(device, type, inst))) {
128 for (map = mc->func->reset; map && map->stat; map++) {
129 if (!isauto || !map->noauto) {
130 if (map->type == type && map->inst == inst) {
131 pmc_enable = map->stat;
132 break;
133 }
134 }
135 }
136 }
137 }
138 return pmc_enable;
139 }
140
141 void
nvkm_mc_reset(struct nvkm_device * device,enum nvkm_subdev_type type,int inst)142 nvkm_mc_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
143 {
144 u64 pmc_enable = nvkm_mc_reset_mask(device, true, type, inst);
145 if (pmc_enable) {
146 nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
147 nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
148 nvkm_rd32(device, 0x000200);
149 }
150 }
151
152 void
nvkm_mc_disable(struct nvkm_device * device,enum nvkm_subdev_type type,int inst)153 nvkm_mc_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
154 {
155 u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
156 if (pmc_enable)
157 nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
158 }
159
160 void
nvkm_mc_enable(struct nvkm_device * device,enum nvkm_subdev_type type,int inst)161 nvkm_mc_enable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
162 {
163 u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
164 if (pmc_enable) {
165 nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
166 nvkm_rd32(device, 0x000200);
167 }
168 }
169
170 bool
nvkm_mc_enabled(struct nvkm_device * device,enum nvkm_subdev_type type,int inst)171 nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
172 {
173 u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
174
175 return (pmc_enable != 0) &&
176 ((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable);
177 }
178
179
180 static int
nvkm_mc_fini(struct nvkm_subdev * subdev,bool suspend)181 nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
182 {
183 nvkm_mc_intr_unarm(subdev->device);
184 return 0;
185 }
186
187 static int
nvkm_mc_init(struct nvkm_subdev * subdev)188 nvkm_mc_init(struct nvkm_subdev *subdev)
189 {
190 struct nvkm_mc *mc = nvkm_mc(subdev);
191 if (mc->func->init)
192 mc->func->init(mc);
193 nvkm_mc_intr_rearm(subdev->device);
194 return 0;
195 }
196
197 static void *
nvkm_mc_dtor(struct nvkm_subdev * subdev)198 nvkm_mc_dtor(struct nvkm_subdev *subdev)
199 {
200 return nvkm_mc(subdev);
201 }
202
203 static const struct nvkm_subdev_func
204 nvkm_mc = {
205 .dtor = nvkm_mc_dtor,
206 .init = nvkm_mc_init,
207 .fini = nvkm_mc_fini,
208 };
209
210 void
nvkm_mc_ctor(const struct nvkm_mc_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_mc * mc)211 nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
212 enum nvkm_subdev_type type, int inst, struct nvkm_mc *mc)
213 {
214 nvkm_subdev_ctor(&nvkm_mc, device, type, inst, &mc->subdev);
215 mc->func = func;
216 }
217
218 int
nvkm_mc_new_(const struct nvkm_mc_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_mc ** pmc)219 nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
220 enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
221 {
222 struct nvkm_mc *mc;
223 if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
224 return -ENOMEM;
225 nvkm_mc_ctor(func, device, type, inst, *pmc);
226 return 0;
227 }
228