1 /*
2 * vpmu.c: PMU virtualization for HVM domain.
3 *
4 * Copyright (c) 2010, Advanced Micro Devices, Inc.
5 * Parts of this code are Copyright (c) 2007, Intel Corporation
6 *
7 * Author: Wei Wang <wei.wang2@amd.com>
8 * Tested by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; If not, see <http://www.gnu.org/licenses/>.
21 *
22 */
23
24 #include <xen/xenoprof.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <asm/apic.h>
28 #include <asm/vpmu.h>
29 #include <asm/hvm/save.h>
30 #include <asm/hvm/vlapic.h>
31 #include <public/pmu.h>
32
33 #define MSR_F10H_EVNTSEL_GO_SHIFT 40
34 #define MSR_F10H_EVNTSEL_EN_SHIFT 22
35 #define MSR_F10H_COUNTER_LENGTH 48
36
37 #define is_guest_mode(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
38 #define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT))
39 #define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
40 #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1))))
41
42 static unsigned int __read_mostly num_counters;
43 static const u32 __read_mostly *counters;
44 static const u32 __read_mostly *ctrls;
45 static bool_t __read_mostly k7_counters_mirrored;
46
47 /* Total size of PMU registers block (copied to/from PV(H) guest) */
48 static unsigned int __read_mostly regs_sz;
49
50 #define F10H_NUM_COUNTERS 4
51 #define F15H_NUM_COUNTERS 6
52 #define MAX_NUM_COUNTERS F15H_NUM_COUNTERS
53
54 /* PMU Counter MSRs. */
55 static const u32 AMD_F10H_COUNTERS[] = {
56 MSR_K7_PERFCTR0,
57 MSR_K7_PERFCTR1,
58 MSR_K7_PERFCTR2,
59 MSR_K7_PERFCTR3
60 };
61
62 /* PMU Control MSRs. */
63 static const u32 AMD_F10H_CTRLS[] = {
64 MSR_K7_EVNTSEL0,
65 MSR_K7_EVNTSEL1,
66 MSR_K7_EVNTSEL2,
67 MSR_K7_EVNTSEL3
68 };
69
70 static const u32 AMD_F15H_COUNTERS[] = {
71 MSR_AMD_FAM15H_PERFCTR0,
72 MSR_AMD_FAM15H_PERFCTR1,
73 MSR_AMD_FAM15H_PERFCTR2,
74 MSR_AMD_FAM15H_PERFCTR3,
75 MSR_AMD_FAM15H_PERFCTR4,
76 MSR_AMD_FAM15H_PERFCTR5
77 };
78
79 static const u32 AMD_F15H_CTRLS[] = {
80 MSR_AMD_FAM15H_EVNTSEL0,
81 MSR_AMD_FAM15H_EVNTSEL1,
82 MSR_AMD_FAM15H_EVNTSEL2,
83 MSR_AMD_FAM15H_EVNTSEL3,
84 MSR_AMD_FAM15H_EVNTSEL4,
85 MSR_AMD_FAM15H_EVNTSEL5
86 };
87
88 /* Bits [63:42], [39:36], 21 and 19 are reserved */
89 #define CTRL_RSVD_MASK ((-1ULL & (~((1ULL << 42) - 1))) | \
90 (0xfULL << 36) | (1ULL << 21) | (1ULL << 19))
91 static uint64_t __read_mostly ctrl_rsvd[MAX_NUM_COUNTERS];
92
93 /* Use private context as a flag for MSR bitmap */
94 #define msr_bitmap_on(vpmu) do { \
95 (vpmu)->priv_context = (void *)-1L; \
96 } while (0)
97 #define msr_bitmap_off(vpmu) do { \
98 (vpmu)->priv_context = NULL; \
99 } while (0)
100 #define is_msr_bitmap_on(vpmu) ((vpmu)->priv_context != NULL)
101
get_pmu_reg_type(u32 addr,unsigned int * idx)102 static inline int get_pmu_reg_type(u32 addr, unsigned int *idx)
103 {
104 if ( (addr >= MSR_K7_EVNTSEL0) && (addr <= MSR_K7_EVNTSEL3) )
105 {
106 *idx = addr - MSR_K7_EVNTSEL0;
107 return MSR_TYPE_CTRL;
108 }
109
110 if ( (addr >= MSR_K7_PERFCTR0) && (addr <= MSR_K7_PERFCTR3) )
111 {
112 *idx = addr - MSR_K7_PERFCTR0;
113 return MSR_TYPE_COUNTER;
114 }
115
116 if ( (addr >= MSR_AMD_FAM15H_EVNTSEL0) &&
117 (addr <= MSR_AMD_FAM15H_PERFCTR5 ) )
118 {
119 *idx = (addr - MSR_AMD_FAM15H_EVNTSEL0) >> 1;
120 if (addr & 1)
121 return MSR_TYPE_COUNTER;
122 else
123 return MSR_TYPE_CTRL;
124 }
125
126 /* unsupported registers */
127 return -1;
128 }
129
get_fam15h_addr(u32 addr)130 static inline u32 get_fam15h_addr(u32 addr)
131 {
132 switch ( addr )
133 {
134 case MSR_K7_PERFCTR0:
135 return MSR_AMD_FAM15H_PERFCTR0;
136 case MSR_K7_PERFCTR1:
137 return MSR_AMD_FAM15H_PERFCTR1;
138 case MSR_K7_PERFCTR2:
139 return MSR_AMD_FAM15H_PERFCTR2;
140 case MSR_K7_PERFCTR3:
141 return MSR_AMD_FAM15H_PERFCTR3;
142 case MSR_K7_EVNTSEL0:
143 return MSR_AMD_FAM15H_EVNTSEL0;
144 case MSR_K7_EVNTSEL1:
145 return MSR_AMD_FAM15H_EVNTSEL1;
146 case MSR_K7_EVNTSEL2:
147 return MSR_AMD_FAM15H_EVNTSEL2;
148 case MSR_K7_EVNTSEL3:
149 return MSR_AMD_FAM15H_EVNTSEL3;
150 default:
151 break;
152 }
153
154 return addr;
155 }
156
amd_vpmu_init_regs(struct xen_pmu_amd_ctxt * ctxt)157 static void amd_vpmu_init_regs(struct xen_pmu_amd_ctxt *ctxt)
158 {
159 unsigned i;
160 uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
161
162 memset(&ctxt->regs[0], 0, regs_sz);
163 for ( i = 0; i < num_counters; i++ )
164 ctrl_regs[i] = ctrl_rsvd[i];
165 }
166
amd_vpmu_set_msr_bitmap(struct vcpu * v)167 static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
168 {
169 unsigned int i;
170 struct vpmu_struct *vpmu = vcpu_vpmu(v);
171
172 for ( i = 0; i < num_counters; i++ )
173 {
174 svm_intercept_msr(v, counters[i], MSR_INTERCEPT_NONE);
175 svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE);
176 }
177
178 msr_bitmap_on(vpmu);
179 }
180
amd_vpmu_unset_msr_bitmap(struct vcpu * v)181 static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
182 {
183 unsigned int i;
184 struct vpmu_struct *vpmu = vcpu_vpmu(v);
185
186 for ( i = 0; i < num_counters; i++ )
187 {
188 svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW);
189 svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
190 }
191
192 msr_bitmap_off(vpmu);
193 }
194
amd_vpmu_do_interrupt(struct cpu_user_regs * regs)195 static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
196 {
197 return 1;
198 }
199
context_load(struct vcpu * v)200 static inline void context_load(struct vcpu *v)
201 {
202 unsigned int i;
203 struct vpmu_struct *vpmu = vcpu_vpmu(v);
204 struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
205 uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
206 uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
207
208 for ( i = 0; i < num_counters; i++ )
209 {
210 wrmsrl(counters[i], counter_regs[i]);
211 wrmsrl(ctrls[i], ctrl_regs[i]);
212 }
213 }
214
amd_vpmu_load(struct vcpu * v,bool_t from_guest)215 static int amd_vpmu_load(struct vcpu *v, bool_t from_guest)
216 {
217 struct vpmu_struct *vpmu = vcpu_vpmu(v);
218 struct xen_pmu_amd_ctxt *ctxt;
219 uint64_t *ctrl_regs;
220 unsigned int i;
221
222 vpmu_reset(vpmu, VPMU_FROZEN);
223
224 if ( !from_guest && vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
225 {
226 ctxt = vpmu->context;
227 ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
228
229 for ( i = 0; i < num_counters; i++ )
230 wrmsrl(ctrls[i], ctrl_regs[i]);
231
232 return 0;
233 }
234
235 if ( from_guest )
236 {
237 bool_t is_running = 0;
238 struct xen_pmu_amd_ctxt *guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
239
240 ASSERT(!has_vlapic(v->domain));
241
242 ctxt = vpmu->context;
243 ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
244
245 memcpy(&ctxt->regs[0], &guest_ctxt->regs[0], regs_sz);
246
247 for ( i = 0; i < num_counters; i++ )
248 {
249 if ( (ctrl_regs[i] & CTRL_RSVD_MASK) != ctrl_rsvd[i] )
250 {
251 /*
252 * Not necessary to re-init context since we should never load
253 * it until guest provides valid values. But just to be safe.
254 */
255 amd_vpmu_init_regs(ctxt);
256 return -EINVAL;
257 }
258
259 if ( is_pmu_enabled(ctrl_regs[i]) )
260 is_running = 1;
261 }
262
263 if ( is_running )
264 vpmu_set(vpmu, VPMU_RUNNING);
265 else
266 vpmu_reset(vpmu, VPMU_RUNNING);
267 }
268
269 vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
270
271 context_load(v);
272
273 return 0;
274 }
275
context_save(struct vcpu * v)276 static inline void context_save(struct vcpu *v)
277 {
278 unsigned int i;
279 struct vpmu_struct *vpmu = vcpu_vpmu(v);
280 struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
281 uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
282
283 /* No need to save controls -- they are saved in amd_vpmu_do_wrmsr */
284 for ( i = 0; i < num_counters; i++ )
285 rdmsrl(counters[i], counter_regs[i]);
286 }
287
amd_vpmu_save(struct vcpu * v,bool_t to_guest)288 static int amd_vpmu_save(struct vcpu *v, bool_t to_guest)
289 {
290 struct vpmu_struct *vpmu = vcpu_vpmu(v);
291 unsigned int i;
292
293 /* Stop the counters. */
294 for ( i = 0; i < num_counters; i++ )
295 wrmsrl(ctrls[i], 0);
296
297 if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
298 {
299 vpmu_set(vpmu, VPMU_FROZEN);
300 return 0;
301 }
302
303 if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
304 return 0;
305
306 context_save(v);
307
308 if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
309 is_msr_bitmap_on(vpmu) )
310 amd_vpmu_unset_msr_bitmap(v);
311
312 if ( to_guest )
313 {
314 struct xen_pmu_amd_ctxt *guest_ctxt, *ctxt;
315
316 ASSERT(!has_vlapic(v->domain));
317 ctxt = vpmu->context;
318 guest_ctxt = &vpmu->xenpmu_data->pmu.c.amd;
319 memcpy(&guest_ctxt->regs[0], &ctxt->regs[0], regs_sz);
320 }
321
322 return 1;
323 }
324
context_update(unsigned int msr,u64 msr_content)325 static void context_update(unsigned int msr, u64 msr_content)
326 {
327 unsigned int i;
328 struct vcpu *v = current;
329 struct vpmu_struct *vpmu = vcpu_vpmu(v);
330 struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
331 uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
332 uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
333
334 if ( k7_counters_mirrored &&
335 ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
336 {
337 msr = get_fam15h_addr(msr);
338 }
339
340 for ( i = 0; i < num_counters; i++ )
341 {
342 if ( msr == ctrls[i] )
343 {
344 ctrl_regs[i] = msr_content;
345 return;
346 }
347 else if (msr == counters[i] )
348 {
349 counter_regs[i] = msr_content;
350 return;
351 }
352 }
353 }
354
amd_vpmu_do_wrmsr(unsigned int msr,uint64_t msr_content,uint64_t supported)355 static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
356 uint64_t supported)
357 {
358 struct vcpu *v = current;
359 struct vpmu_struct *vpmu = vcpu_vpmu(v);
360 unsigned int idx = 0;
361 int type = get_pmu_reg_type(msr, &idx);
362
363 ASSERT(!supported);
364
365 if ( (type == MSR_TYPE_CTRL ) &&
366 ((msr_content & CTRL_RSVD_MASK) != ctrl_rsvd[idx]) )
367 return -EINVAL;
368
369 /* For all counters, enable guest only mode for HVM guest */
370 if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
371 !is_guest_mode(msr_content) )
372 {
373 set_guest_mode(msr_content);
374 }
375
376 /* check if the first counter is enabled */
377 if ( (type == MSR_TYPE_CTRL) &&
378 is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
379 {
380 if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
381 return 0;
382 vpmu_set(vpmu, VPMU_RUNNING);
383
384 if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
385 amd_vpmu_set_msr_bitmap(v);
386 }
387
388 /* stop saving & restore if guest stops first counter */
389 if ( (type == MSR_TYPE_CTRL) &&
390 (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
391 {
392 vpmu_reset(vpmu, VPMU_RUNNING);
393 if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
394 amd_vpmu_unset_msr_bitmap(v);
395 release_pmu_ownership(PMU_OWNER_HVM);
396 }
397
398 if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
399 || vpmu_is_set(vpmu, VPMU_FROZEN) )
400 {
401 context_load(v);
402 vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
403 vpmu_reset(vpmu, VPMU_FROZEN);
404 }
405
406 /* Update vpmu context immediately */
407 context_update(msr, msr_content);
408
409 /* Write to hw counters */
410 wrmsrl(msr, msr_content);
411 return 0;
412 }
413
amd_vpmu_do_rdmsr(unsigned int msr,uint64_t * msr_content)414 static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
415 {
416 struct vcpu *v = current;
417 struct vpmu_struct *vpmu = vcpu_vpmu(v);
418
419 if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
420 || vpmu_is_set(vpmu, VPMU_FROZEN) )
421 {
422 context_load(v);
423 vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
424 vpmu_reset(vpmu, VPMU_FROZEN);
425 }
426
427 rdmsrl(msr, *msr_content);
428
429 return 0;
430 }
431
amd_vpmu_destroy(struct vcpu * v)432 static void amd_vpmu_destroy(struct vcpu *v)
433 {
434 struct vpmu_struct *vpmu = vcpu_vpmu(v);
435
436 if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
437 amd_vpmu_unset_msr_bitmap(v);
438
439 xfree(vpmu->context);
440 vpmu->context = NULL;
441 vpmu->priv_context = NULL;
442
443 if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
444 release_pmu_ownership(PMU_OWNER_HVM);
445
446 vpmu_clear(vpmu);
447 }
448
449 /* VPMU part of the 'q' keyhandler */
amd_vpmu_dump(const struct vcpu * v)450 static void amd_vpmu_dump(const struct vcpu *v)
451 {
452 const struct vpmu_struct *vpmu = vcpu_vpmu(v);
453 const struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
454 const uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
455 const uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
456 unsigned int i;
457
458 printk(" VPMU state: 0x%x ", vpmu->flags);
459 if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
460 {
461 printk("\n");
462 return;
463 }
464
465 printk("(");
466 if ( vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) )
467 printk("PASSIVE_DOMAIN_ALLOCATED, ");
468 if ( vpmu_is_set(vpmu, VPMU_FROZEN) )
469 printk("FROZEN, ");
470 if ( vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
471 printk("SAVE, ");
472 if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
473 printk("RUNNING, ");
474 if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
475 printk("LOADED, ");
476 printk("ALLOCATED)\n");
477
478 for ( i = 0; i < num_counters; i++ )
479 {
480 uint64_t ctrl, cntr;
481
482 rdmsrl(ctrls[i], ctrl);
483 rdmsrl(counters[i], cntr);
484 printk(" %#x: %#lx (%#lx in HW) %#x: %#lx (%#lx in HW)\n",
485 ctrls[i], ctrl_regs[i], ctrl,
486 counters[i], counter_regs[i], cntr);
487 }
488 }
489
490 static const struct arch_vpmu_ops amd_vpmu_ops = {
491 .do_wrmsr = amd_vpmu_do_wrmsr,
492 .do_rdmsr = amd_vpmu_do_rdmsr,
493 .do_interrupt = amd_vpmu_do_interrupt,
494 .arch_vpmu_destroy = amd_vpmu_destroy,
495 .arch_vpmu_save = amd_vpmu_save,
496 .arch_vpmu_load = amd_vpmu_load,
497 .arch_vpmu_dump = amd_vpmu_dump
498 };
499
svm_vpmu_initialise(struct vcpu * v)500 int svm_vpmu_initialise(struct vcpu *v)
501 {
502 struct xen_pmu_amd_ctxt *ctxt;
503 struct vpmu_struct *vpmu = vcpu_vpmu(v);
504
505 if ( vpmu_mode == XENPMU_MODE_OFF )
506 return 0;
507
508 if ( !counters )
509 return -EINVAL;
510
511 ctxt = xmalloc_bytes(sizeof(*ctxt) + regs_sz);
512 if ( !ctxt )
513 {
514 printk(XENLOG_G_WARNING "Insufficient memory for PMU, "
515 " PMU feature is unavailable on domain %d vcpu %d.\n",
516 v->vcpu_id, v->domain->domain_id);
517 return -ENOMEM;
518 }
519
520 ctxt->counters = sizeof(*ctxt);
521 ctxt->ctrls = ctxt->counters + sizeof(uint64_t) * num_counters;
522 amd_vpmu_init_regs(ctxt);
523
524 vpmu->context = ctxt;
525 vpmu->priv_context = NULL;
526
527 if ( !has_vlapic(v->domain) )
528 {
529 /* Copy register offsets to shared area */
530 ASSERT(vpmu->xenpmu_data);
531 memcpy(&vpmu->xenpmu_data->pmu.c.amd, ctxt,
532 offsetof(struct xen_pmu_amd_ctxt, regs));
533 }
534
535 vpmu->arch_vpmu_ops = &amd_vpmu_ops;
536
537 vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
538 return 0;
539 }
540
common_init(void)541 static int __init common_init(void)
542 {
543 unsigned int i;
544
545 if ( !num_counters )
546 {
547 printk(XENLOG_WARNING "VPMU: Unsupported CPU family %#x\n",
548 current_cpu_data.x86);
549 return -EINVAL;
550 }
551
552 if ( sizeof(struct xen_pmu_data) +
553 2 * sizeof(uint64_t) * num_counters > PAGE_SIZE )
554 {
555 printk(XENLOG_WARNING
556 "VPMU: Register bank does not fit into VPMU shared page\n");
557 counters = ctrls = NULL;
558 num_counters = 0;
559 return -ENOSPC;
560 }
561
562 for ( i = 0; i < num_counters; i++ )
563 {
564 rdmsrl(ctrls[i], ctrl_rsvd[i]);
565 ctrl_rsvd[i] &= CTRL_RSVD_MASK;
566 }
567
568 regs_sz = 2 * sizeof(uint64_t) * num_counters;
569
570 return 0;
571 }
572
amd_vpmu_init(void)573 int __init amd_vpmu_init(void)
574 {
575 switch ( current_cpu_data.x86 )
576 {
577 case 0x15:
578 case 0x17:
579 case 0x19:
580 num_counters = F15H_NUM_COUNTERS;
581 counters = AMD_F15H_COUNTERS;
582 ctrls = AMD_F15H_CTRLS;
583 k7_counters_mirrored = 1;
584 break;
585
586 case 0x10:
587 case 0x12:
588 case 0x14:
589 case 0x16:
590 num_counters = F10H_NUM_COUNTERS;
591 counters = AMD_F10H_COUNTERS;
592 ctrls = AMD_F10H_CTRLS;
593 k7_counters_mirrored = 0;
594 break;
595 }
596
597 return common_init();
598 }
599
hygon_vpmu_init(void)600 int __init hygon_vpmu_init(void)
601 {
602 switch ( current_cpu_data.x86 )
603 {
604 case 0x18:
605 num_counters = F15H_NUM_COUNTERS;
606 counters = AMD_F15H_COUNTERS;
607 ctrls = AMD_F15H_CTRLS;
608 k7_counters_mirrored = 1;
609 break;
610 }
611
612 return common_init();
613 }
614