1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "kv_dpm.h"
28
kv_notify_message_to_smu(struct radeon_device * rdev,u32 id)29 int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
30 {
31 u32 i;
32 u32 tmp = 0;
33
34 WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
35
36 for (i = 0; i < rdev->usec_timeout; i++) {
37 if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
38 break;
39 udelay(1);
40 }
41 tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
42
43 if (tmp != 1) {
44 if (tmp == 0xFF)
45 return -EINVAL;
46 else if (tmp == 0xFE)
47 return -EINVAL;
48 }
49
50 return 0;
51 }
52
kv_dpm_get_enable_mask(struct radeon_device * rdev,u32 * enable_mask)53 int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
54 {
55 int ret;
56
57 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
58
59 if (ret == 0)
60 *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
61
62 return ret;
63 }
64
kv_send_msg_to_smc_with_parameter(struct radeon_device * rdev,PPSMC_Msg msg,u32 parameter)65 int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
66 PPSMC_Msg msg, u32 parameter)
67 {
68
69 WREG32(SMC_MSG_ARG_0, parameter);
70
71 return kv_notify_message_to_smu(rdev, msg);
72 }
73
kv_set_smc_sram_address(struct radeon_device * rdev,u32 smc_address,u32 limit)74 static int kv_set_smc_sram_address(struct radeon_device *rdev,
75 u32 smc_address, u32 limit)
76 {
77 if (smc_address & 3)
78 return -EINVAL;
79 if ((smc_address + 3) > limit)
80 return -EINVAL;
81
82 WREG32(SMC_IND_INDEX_0, smc_address);
83 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
84
85 return 0;
86 }
87
kv_read_smc_sram_dword(struct radeon_device * rdev,u32 smc_address,u32 * value,u32 limit)88 int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
89 u32 *value, u32 limit)
90 {
91 int ret;
92
93 ret = kv_set_smc_sram_address(rdev, smc_address, limit);
94 if (ret)
95 return ret;
96
97 *value = RREG32(SMC_IND_DATA_0);
98 return 0;
99 }
100
kv_smc_dpm_enable(struct radeon_device * rdev,bool enable)101 int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
102 {
103 if (enable)
104 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
105 else
106 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
107 }
108
kv_smc_bapm_enable(struct radeon_device * rdev,bool enable)109 int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
110 {
111 if (enable)
112 return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
113 else
114 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
115 }
116
kv_copy_bytes_to_smc(struct radeon_device * rdev,u32 smc_start_address,const u8 * src,u32 byte_count,u32 limit)117 int kv_copy_bytes_to_smc(struct radeon_device *rdev,
118 u32 smc_start_address,
119 const u8 *src, u32 byte_count, u32 limit)
120 {
121 int ret;
122 u32 data, original_data, addr, extra_shift, t_byte, count, mask;
123
124 if ((smc_start_address + byte_count) > limit)
125 return -EINVAL;
126
127 addr = smc_start_address;
128 t_byte = addr & 3;
129
130 /* RMW for the initial bytes */
131 if (t_byte != 0) {
132 addr -= t_byte;
133
134 ret = kv_set_smc_sram_address(rdev, addr, limit);
135 if (ret)
136 return ret;
137
138 original_data = RREG32(SMC_IND_DATA_0);
139
140 data = 0;
141 mask = 0;
142 count = 4;
143 while (count > 0) {
144 if (t_byte > 0) {
145 mask = (mask << 8) | 0xff;
146 t_byte--;
147 } else if (byte_count > 0) {
148 data = (data << 8) + *src++;
149 byte_count--;
150 mask <<= 8;
151 } else {
152 data <<= 8;
153 mask = (mask << 8) | 0xff;
154 }
155 count--;
156 }
157
158 data |= original_data & mask;
159
160 ret = kv_set_smc_sram_address(rdev, addr, limit);
161 if (ret)
162 return ret;
163
164 WREG32(SMC_IND_DATA_0, data);
165
166 addr += 4;
167 }
168
169 while (byte_count >= 4) {
170 /* SMC address space is BE */
171 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
172
173 ret = kv_set_smc_sram_address(rdev, addr, limit);
174 if (ret)
175 return ret;
176
177 WREG32(SMC_IND_DATA_0, data);
178
179 src += 4;
180 byte_count -= 4;
181 addr += 4;
182 }
183
184 /* RMW for the final bytes */
185 if (byte_count > 0) {
186 data = 0;
187
188 ret = kv_set_smc_sram_address(rdev, addr, limit);
189 if (ret)
190 return ret;
191
192 original_data= RREG32(SMC_IND_DATA_0);
193
194 extra_shift = 8 * (4 - byte_count);
195
196 while (byte_count > 0) {
197 /* SMC address space is BE */
198 data = (data << 8) + *src++;
199 byte_count--;
200 }
201
202 data <<= extra_shift;
203
204 data |= (original_data & ~((~0UL) << extra_shift));
205
206 ret = kv_set_smc_sram_address(rdev, addr, limit);
207 if (ret)
208 return ret;
209
210 WREG32(SMC_IND_DATA_0, data);
211 }
212 return 0;
213 }
214
215