1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "xc_private.h"
21 #include "xg_private.h"
22 #include "xc_dom.h"
23 #include <stdarg.h>
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <pthread.h>
27 #include <assert.h>
28
xc_interface_open(xentoollog_logger * logger,xentoollog_logger * dombuild_logger,unsigned open_flags)29 struct xc_interface_core *xc_interface_open(xentoollog_logger *logger,
30 xentoollog_logger *dombuild_logger,
31 unsigned open_flags)
32 {
33 struct xc_interface_core xch_buf = { 0 }, *xch = &xch_buf;
34
35 xch->flags = open_flags;
36 xch->dombuild_logger_file = 0;
37 xc_clear_last_error(xch);
38
39 xch->error_handler = logger; xch->error_handler_tofree = 0;
40 xch->dombuild_logger = dombuild_logger; xch->dombuild_logger_tofree = 0;
41
42 if (!xch->error_handler) {
43 xch->error_handler = xch->error_handler_tofree =
44 (xentoollog_logger*)
45 xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
46 if (!xch->error_handler)
47 goto err;
48 }
49
50 xch = malloc(sizeof(*xch));
51 if (!xch) {
52 xch = &xch_buf;
53 PERROR("Could not allocate new xc_interface struct");
54 goto err;
55 }
56 *xch = xch_buf;
57
58 if (open_flags & XC_OPENFLAG_DUMMY)
59 return xch; /* We are done */
60
61 xch->xcall = xencall_open(xch->error_handler,
62 open_flags & XC_OPENFLAG_NON_REENTRANT ? XENCALL_OPENFLAG_NON_REENTRANT : 0U);
63 if ( xch->xcall == NULL )
64 goto err;
65
66 xch->fmem = xenforeignmemory_open(xch->error_handler, 0);
67 if ( xch->fmem == NULL )
68 goto err;
69
70 xch->dmod = xendevicemodel_open(xch->error_handler, 0);
71 if ( xch->dmod == NULL )
72 goto err;
73
74 return xch;
75
76 err:
77 xenforeignmemory_close(xch->fmem);
78 xencall_close(xch->xcall);
79 xtl_logger_destroy(xch->error_handler_tofree);
80 if (xch != &xch_buf) free(xch);
81 return NULL;
82 }
83
xc_interface_close(xc_interface * xch)84 int xc_interface_close(xc_interface *xch)
85 {
86 int rc = 0;
87
88 if (!xch)
89 return 0;
90
91 rc = xencall_close(xch->xcall);
92 if (rc) PERROR("Could not close xencall interface");
93
94 rc = xenforeignmemory_close(xch->fmem);
95 if (rc) PERROR("Could not close foreign memory interface");
96
97 rc = xendevicemodel_close(xch->dmod);
98 if (rc) PERROR("Could not close device model interface");
99
100 xtl_logger_destroy(xch->dombuild_logger_tofree);
101 xtl_logger_destroy(xch->error_handler_tofree);
102
103 free(xch);
104 return rc;
105 }
106
xc_interface_xcall_handle(xc_interface * xch)107 xencall_handle *xc_interface_xcall_handle(xc_interface *xch)
108 {
109 return xch->xcall;
110 }
111
xc_interface_fmem_handle(xc_interface * xch)112 struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch)
113 {
114 return xch->fmem;
115 }
116
xc_interface_dmod_handle(xc_interface * xch)117 struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch)
118 {
119 return xch->dmod;
120 }
121
122 static pthread_key_t errbuf_pkey;
123 static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
124
xc_get_last_error(xc_interface * xch)125 const xc_error *xc_get_last_error(xc_interface *xch)
126 {
127 return &xch->last_error;
128 }
129
xc_clear_last_error(xc_interface * xch)130 void xc_clear_last_error(xc_interface *xch)
131 {
132 xch->last_error.code = XC_ERROR_NONE;
133 xch->last_error.message[0] = '\0';
134 }
135
xc_error_code_to_desc(int code)136 const char *xc_error_code_to_desc(int code)
137 {
138 /* Sync to members of xc_error_code enumeration in xenctrl.h */
139 switch ( code )
140 {
141 case XC_ERROR_NONE:
142 return "No error details";
143 case XC_INTERNAL_ERROR:
144 return "Internal error";
145 case XC_INVALID_KERNEL:
146 return "Invalid kernel";
147 case XC_INVALID_PARAM:
148 return "Invalid configuration";
149 case XC_OUT_OF_MEMORY:
150 return "Out of memory";
151 }
152
153 return "Unknown error code";
154 }
155
xc_reportv(xc_interface * xch,xentoollog_logger * lg,xentoollog_level level,int code,const char * fmt,va_list args)156 void xc_reportv(xc_interface *xch, xentoollog_logger *lg,
157 xentoollog_level level, int code,
158 const char *fmt, va_list args) {
159 int saved_errno = errno;
160 char msgbuf[XC_MAX_ERROR_MSG_LEN];
161 char *msg;
162
163 /* Strip newlines from messages.
164 * XXX really the messages themselves should have the newlines removed.
165 */
166 char fmt_nonewline[512];
167 int fmt_l;
168
169 fmt_l = strlen(fmt);
170 if (fmt_l && fmt[fmt_l-1]=='\n' && fmt_l < sizeof(fmt_nonewline)) {
171 memcpy(fmt_nonewline, fmt, fmt_l-1);
172 fmt_nonewline[fmt_l-1] = 0;
173 fmt = fmt_nonewline;
174 }
175
176 if ( level >= XTL_ERROR ) {
177 msg = xch->last_error.message;
178 xch->last_error.code = code;
179 } else {
180 msg = msgbuf;
181 }
182 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
183 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
184
185 xtl_log(lg, level, -1, "xc",
186 "%s" "%s%s", msg,
187 code?": ":"", code ? xc_error_code_to_desc(code) : "");
188
189 errno = saved_errno;
190 }
191
xc_report(xc_interface * xch,xentoollog_logger * lg,xentoollog_level level,int code,const char * fmt,...)192 void xc_report(xc_interface *xch, xentoollog_logger *lg,
193 xentoollog_level level, int code, const char *fmt, ...) {
194 va_list args;
195 va_start(args,fmt);
196 xc_reportv(xch,lg,level,code,fmt,args);
197 va_end(args);
198 }
199
xc_report_error(xc_interface * xch,int code,const char * fmt,...)200 void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
201 {
202 va_list args;
203 va_start(args, fmt);
204 xc_reportv(xch, xch->error_handler, XTL_ERROR, code, fmt, args);
205 va_end(args);
206 }
207
xc_set_progress_prefix(xc_interface * xch,const char * doing)208 const char *xc_set_progress_prefix(xc_interface *xch, const char *doing)
209 {
210 const char *old = xch->currently_progress_reporting;
211
212 xch->currently_progress_reporting = doing;
213 return old;
214 }
215
xc_report_progress_single(xc_interface * xch,const char * doing)216 void xc_report_progress_single(xc_interface *xch, const char *doing)
217 {
218 assert(doing);
219 xtl_progress(xch->error_handler, "xc", doing, 0, 0);
220 }
221
xc_report_progress_step(xc_interface * xch,unsigned long done,unsigned long total)222 void xc_report_progress_step(xc_interface *xch,
223 unsigned long done, unsigned long total)
224 {
225 assert(xch->currently_progress_reporting);
226 xtl_progress(xch->error_handler, "xc",
227 xch->currently_progress_reporting, done, total);
228 }
229
xc_get_pfn_type_batch(xc_interface * xch,uint32_t dom,unsigned int num,xen_pfn_t * arr)230 int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
231 unsigned int num, xen_pfn_t *arr)
232 {
233 int rc;
234 DECLARE_DOMCTL;
235 DECLARE_HYPERCALL_BOUNCE(arr, sizeof(*arr) * num, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
236 if ( xc_hypercall_bounce_pre(xch, arr) )
237 return -1;
238 domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
239 domctl.domain = dom;
240 domctl.u.getpageframeinfo3.num = num;
241 set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
242 rc = do_domctl_retry_efault(xch, &domctl);
243 xc_hypercall_bounce_post(xch, arr);
244 return rc;
245 }
246
xc_mmuext_op(xc_interface * xch,struct mmuext_op * op,unsigned int nr_ops,uint32_t dom)247 int xc_mmuext_op(
248 xc_interface *xch,
249 struct mmuext_op *op,
250 unsigned int nr_ops,
251 uint32_t dom)
252 {
253 DECLARE_HYPERCALL_BOUNCE(op, nr_ops*sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
254 long ret = -1;
255
256 if ( xc_hypercall_bounce_pre(xch, op) )
257 {
258 PERROR("Could not bounce memory for mmuext op hypercall");
259 goto out1;
260 }
261
262 ret = xencall4(xch->xcall, __HYPERVISOR_mmuext_op,
263 HYPERCALL_BUFFER_AS_ARG(op),
264 nr_ops, 0, dom);
265
266 xc_hypercall_bounce_post(xch, op);
267
268 out1:
269 return ret;
270 }
271
flush_mmu_updates(xc_interface * xch,struct xc_mmu * mmu)272 static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
273 {
274 int rc, err = 0;
275 DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
276
277 if ( mmu->idx == 0 )
278 return 0;
279
280 if ( xc_hypercall_bounce_pre(xch, updates) )
281 {
282 PERROR("flush_mmu_updates: bounce buffer failed");
283 err = 1;
284 goto out;
285 }
286
287 rc = xencall4(xch->xcall, __HYPERVISOR_mmu_update,
288 HYPERCALL_BUFFER_AS_ARG(updates),
289 mmu->idx, 0, mmu->subject);
290 if ( rc < 0 )
291 {
292 ERROR("Failure when submitting mmu updates");
293 err = 1;
294 }
295
296 mmu->idx = 0;
297
298 xc_hypercall_bounce_post(xch, updates);
299
300 out:
301 return err;
302 }
303
xc_alloc_mmu_updates(xc_interface * xch,unsigned int subject)304 struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, unsigned int subject)
305 {
306 struct xc_mmu *mmu = malloc(sizeof(*mmu));
307 if ( mmu == NULL )
308 return mmu;
309 mmu->idx = 0;
310 mmu->subject = subject;
311 return mmu;
312 }
313
xc_add_mmu_update(xc_interface * xch,struct xc_mmu * mmu,unsigned long long ptr,unsigned long long val)314 int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
315 unsigned long long ptr, unsigned long long val)
316 {
317 mmu->updates[mmu->idx].ptr = ptr;
318 mmu->updates[mmu->idx].val = val;
319
320 if ( ++mmu->idx == MAX_MMU_UPDATES )
321 return flush_mmu_updates(xch, mmu);
322
323 return 0;
324 }
325
xc_flush_mmu_updates(xc_interface * xch,struct xc_mmu * mmu)326 int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
327 {
328 return flush_mmu_updates(xch, mmu);
329 }
330
do_memory_op(xc_interface * xch,int cmd,void * arg,size_t len)331 long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
332 {
333 DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
334 long ret = -1;
335
336 if ( xc_hypercall_bounce_pre(xch, arg) )
337 {
338 PERROR("Could not bounce memory for XENMEM hypercall");
339 goto out1;
340 }
341
342 ret = xencall2(xch->xcall, __HYPERVISOR_memory_op,
343 cmd, HYPERCALL_BUFFER_AS_ARG(arg));
344
345 xc_hypercall_bounce_post(xch, arg);
346 out1:
347 return ret;
348 }
349
xc_maximum_ram_page(xc_interface * xch,unsigned long * max_mfn)350 int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn)
351 {
352 long rc = do_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
353
354 if ( rc >= 0 )
355 {
356 *max_mfn = rc;
357 rc = 0;
358 }
359 return rc;
360 }
361
xc_domain_get_cpu_usage(xc_interface * xch,uint32_t domid,int vcpu)362 long long xc_domain_get_cpu_usage(xc_interface *xch, uint32_t domid, int vcpu)
363 {
364 DECLARE_DOMCTL;
365
366 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
367 domctl.domain = domid;
368 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
369 if ( (do_domctl(xch, &domctl) < 0) )
370 {
371 PERROR("Could not get info on domain");
372 return -1;
373 }
374 return domctl.u.getvcpuinfo.cpu_time;
375 }
376
xc_machphys_mfn_list(xc_interface * xch,unsigned long max_extents,xen_pfn_t * extent_start)377 int xc_machphys_mfn_list(xc_interface *xch,
378 unsigned long max_extents,
379 xen_pfn_t *extent_start)
380 {
381 int rc;
382 DECLARE_HYPERCALL_BOUNCE(extent_start, max_extents * sizeof(xen_pfn_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
383 struct xen_machphys_mfn_list xmml = {
384 .max_extents = max_extents,
385 };
386
387 if ( xc_hypercall_bounce_pre(xch, extent_start) )
388 {
389 PERROR("Could not bounce memory for XENMEM_machphys_mfn_list hypercall");
390 return -1;
391 }
392
393 set_xen_guest_handle(xmml.extent_start, extent_start);
394 rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
395 if (rc || xmml.nr_extents != max_extents)
396 rc = -1;
397 else
398 rc = 0;
399
400 xc_hypercall_bounce_post(xch, extent_start);
401
402 return rc;
403 }
404
xc_get_tot_pages(xc_interface * xch,uint32_t domid)405 long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
406 {
407 xc_dominfo_t info;
408 if ( (xc_domain_getinfo(xch, domid, 1, &info) != 1) ||
409 (info.domid != domid) )
410 return -1;
411 return info.nr_pages;
412 }
413
xc_copy_to_domain_page(xc_interface * xch,uint32_t domid,unsigned long dst_pfn,const char * src_page)414 int xc_copy_to_domain_page(xc_interface *xch,
415 uint32_t domid,
416 unsigned long dst_pfn,
417 const char *src_page)
418 {
419 void *vaddr = xc_map_foreign_range(
420 xch, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
421 if ( vaddr == NULL )
422 return -1;
423 memcpy(vaddr, src_page, PAGE_SIZE);
424 munmap(vaddr, PAGE_SIZE);
425 xc_domain_cacheflush(xch, domid, dst_pfn, 1);
426 return 0;
427 }
428
xc_clear_domain_pages(xc_interface * xch,uint32_t domid,unsigned long dst_pfn,int num)429 int xc_clear_domain_pages(xc_interface *xch,
430 uint32_t domid,
431 unsigned long dst_pfn,
432 int num)
433 {
434 size_t size = num * PAGE_SIZE;
435 void *vaddr = xc_map_foreign_range(
436 xch, domid, size, PROT_WRITE, dst_pfn);
437 if ( vaddr == NULL )
438 return -1;
439 memset(vaddr, 0, size);
440 munmap(vaddr, size);
441 xc_domain_cacheflush(xch, domid, dst_pfn, num);
442 return 0;
443 }
444
xc_domctl(xc_interface * xch,struct xen_domctl * domctl)445 int xc_domctl(xc_interface *xch, struct xen_domctl *domctl)
446 {
447 return do_domctl(xch, domctl);
448 }
449
xc_sysctl(xc_interface * xch,struct xen_sysctl * sysctl)450 int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
451 {
452 return do_sysctl(xch, sysctl);
453 }
454
xc_version(xc_interface * xch,int cmd,void * arg)455 int xc_version(xc_interface *xch, int cmd, void *arg)
456 {
457 DECLARE_HYPERCALL_BOUNCE(arg, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); /* Size unknown until cmd decoded */
458 size_t sz;
459 int rc;
460
461 switch ( cmd )
462 {
463 case XENVER_version:
464 sz = 0;
465 break;
466 case XENVER_extraversion:
467 sz = sizeof(xen_extraversion_t);
468 break;
469 case XENVER_compile_info:
470 sz = sizeof(xen_compile_info_t);
471 break;
472 case XENVER_capabilities:
473 sz = sizeof(xen_capabilities_info_t);
474 break;
475 case XENVER_changeset:
476 sz = sizeof(xen_changeset_info_t);
477 break;
478 case XENVER_platform_parameters:
479 sz = sizeof(xen_platform_parameters_t);
480 break;
481 case XENVER_get_features:
482 sz = sizeof(xen_feature_info_t);
483 break;
484 case XENVER_pagesize:
485 sz = 0;
486 break;
487 case XENVER_guest_handle:
488 sz = sizeof(xen_domain_handle_t);
489 break;
490 case XENVER_commandline:
491 sz = sizeof(xen_commandline_t);
492 break;
493 case XENVER_build_id:
494 {
495 xen_build_id_t *build_id = (xen_build_id_t *)arg;
496 sz = sizeof(*build_id) + build_id->len;
497 HYPERCALL_BOUNCE_SET_DIR(arg, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
498 break;
499 }
500 default:
501 ERROR("xc_version: unknown command %d\n", cmd);
502 return -EINVAL;
503 }
504
505 HYPERCALL_BOUNCE_SET_SIZE(arg, sz);
506
507 if ( (sz != 0) && xc_hypercall_bounce_pre(xch, arg) )
508 {
509 PERROR("Could not bounce buffer for version hypercall");
510 return -ENOMEM;
511 }
512
513 rc = do_xen_version(xch, cmd, HYPERCALL_BUFFER(arg));
514
515 if ( sz != 0 )
516 xc_hypercall_bounce_post(xch, arg);
517
518 return rc;
519 }
520
xc_make_page_below_4G(xc_interface * xch,uint32_t domid,unsigned long mfn)521 unsigned long xc_make_page_below_4G(
522 xc_interface *xch, uint32_t domid, unsigned long mfn)
523 {
524 xen_pfn_t old_mfn = mfn;
525 xen_pfn_t new_mfn;
526
527 if ( xc_domain_decrease_reservation_exact(
528 xch, domid, 1, 0, &old_mfn) != 0 )
529 {
530 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
531 return 0;
532 }
533
534 if ( xc_domain_increase_reservation_exact(
535 xch, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
536 {
537 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
538 return 0;
539 }
540
541 return new_mfn;
542 }
543
544 static void
_xc_clean_errbuf(void * m)545 _xc_clean_errbuf(void * m)
546 {
547 free(m);
548 pthread_setspecific(errbuf_pkey, NULL);
549 }
550
551 static void
_xc_init_errbuf(void)552 _xc_init_errbuf(void)
553 {
554 pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
555 }
556
xc_strerror(xc_interface * xch,int errcode)557 const char *xc_strerror(xc_interface *xch, int errcode)
558 {
559 if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
560 {
561 return strerror(errcode);
562 }
563 else
564 {
565 #define XS_BUFSIZE 32
566 char *errbuf;
567 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
568 char *strerror_str;
569
570 pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
571
572 errbuf = pthread_getspecific(errbuf_pkey);
573 if (errbuf == NULL) {
574 errbuf = malloc(XS_BUFSIZE);
575 if ( errbuf == NULL )
576 return "(failed to allocate errbuf)";
577 pthread_setspecific(errbuf_pkey, errbuf);
578 }
579
580 /*
581 * Thread-unsafe strerror() is protected by a local mutex. We copy the
582 * string to a thread-private buffer before releasing the mutex.
583 */
584 pthread_mutex_lock(&mutex);
585 strerror_str = strerror(errcode);
586 strncpy(errbuf, strerror_str, XS_BUFSIZE);
587 errbuf[XS_BUFSIZE-1] = '\0';
588 pthread_mutex_unlock(&mutex);
589
590 return errbuf;
591 }
592 }
593
bitmap_64_to_byte(uint8_t * bp,const uint64_t * lp,int nbits)594 void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
595 {
596 uint64_t l;
597 int i, j, b;
598
599 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
600 l = lp[i];
601 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
602 bp[b+j] = l;
603 l >>= 8;
604 nbits -= 8;
605 }
606 }
607 }
608
bitmap_byte_to_64(uint64_t * lp,const uint8_t * bp,int nbits)609 void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
610 {
611 uint64_t l;
612 int i, j, b;
613
614 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
615 l = 0;
616 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
617 l |= (uint64_t)bp[b+j] << (j*8);
618 nbits -= 8;
619 }
620 lp[i] = l;
621 }
622 }
623
read_exact(int fd,void * data,size_t size)624 int read_exact(int fd, void *data, size_t size)
625 {
626 size_t offset = 0;
627 ssize_t len;
628
629 while ( offset < size )
630 {
631 len = read(fd, (char *)data + offset, size - offset);
632 if ( (len == -1) && (errno == EINTR) )
633 continue;
634 if ( len == 0 )
635 errno = 0;
636 if ( len <= 0 )
637 return -1;
638 offset += len;
639 }
640
641 return 0;
642 }
643
write_exact(int fd,const void * data,size_t size)644 int write_exact(int fd, const void *data, size_t size)
645 {
646 size_t offset = 0;
647 ssize_t len;
648
649 while ( offset < size )
650 {
651 len = write(fd, (const char *)data + offset, size - offset);
652 if ( (len == -1) && (errno == EINTR) )
653 continue;
654 if ( len <= 0 )
655 return -1;
656 offset += len;
657 }
658
659 return 0;
660 }
661
662 #if defined(__MINIOS__)
663 /*
664 * MiniOS's libc doesn't know about writev(). Implement it as multiple write()s.
665 */
writev_exact(int fd,const struct iovec * iov,int iovcnt)666 int writev_exact(int fd, const struct iovec *iov, int iovcnt)
667 {
668 int rc, i;
669
670 for ( i = 0; i < iovcnt; ++i )
671 {
672 rc = write_exact(fd, iov[i].iov_base, iov[i].iov_len);
673 if ( rc )
674 return rc;
675 }
676
677 return 0;
678 }
679 #else
writev_exact(int fd,const struct iovec * iov,int iovcnt)680 int writev_exact(int fd, const struct iovec *iov, int iovcnt)
681 {
682 struct iovec *local_iov = NULL;
683 int rc = 0, iov_idx = 0, saved_errno = 0;
684 ssize_t len;
685
686 while ( iov_idx < iovcnt )
687 {
688 /*
689 * Skip over iov[] entries with 0 length.
690 *
691 * This is needed to cover the case where we took a partial write and
692 * all remaining vectors are of 0 length. In such a case, the results
693 * from writev() are indistinguishable from EOF.
694 */
695 while ( iov[iov_idx].iov_len == 0 )
696 if ( ++iov_idx == iovcnt )
697 goto out;
698
699 len = writev(fd, &iov[iov_idx], min(iovcnt - iov_idx, IOV_MAX));
700 saved_errno = errno;
701
702 if ( (len == -1) && (errno == EINTR) )
703 continue;
704 if ( len <= 0 )
705 {
706 rc = -1;
707 goto out;
708 }
709
710 /* Check iov[] to see whether we had a partial or complete write. */
711 while ( (len > 0) && (iov_idx < iovcnt) )
712 {
713 if ( len >= iov[iov_idx].iov_len )
714 len -= iov[iov_idx++].iov_len;
715 else
716 {
717 /* Partial write of iov[iov_idx]. Copy iov so we can adjust
718 * element iov_idx and resubmit the rest. */
719 if ( !local_iov )
720 {
721 local_iov = malloc(iovcnt * sizeof(*iov));
722 if ( !local_iov )
723 {
724 saved_errno = ENOMEM;
725 rc = -1;
726 goto out;
727 }
728
729 iov = memcpy(local_iov, iov, iovcnt * sizeof(*iov));
730 }
731
732 local_iov[iov_idx].iov_base += len;
733 local_iov[iov_idx].iov_len -= len;
734 break;
735 }
736 }
737 }
738
739 saved_errno = 0;
740
741 out:
742 free(local_iov);
743 errno = saved_errno;
744 return rc;
745 }
746 #endif
747
xc_ffs8(uint8_t x)748 int xc_ffs8(uint8_t x)
749 {
750 int i;
751 for ( i = 0; i < 8; i++ )
752 if ( x & (1u << i) )
753 return i+1;
754 return 0;
755 }
756
xc_ffs16(uint16_t x)757 int xc_ffs16(uint16_t x)
758 {
759 uint8_t h = x>>8, l = x;
760 return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
761 }
762
xc_ffs32(uint32_t x)763 int xc_ffs32(uint32_t x)
764 {
765 uint16_t h = x>>16, l = x;
766 return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
767 }
768
xc_ffs64(uint64_t x)769 int xc_ffs64(uint64_t x)
770 {
771 uint32_t h = x>>32, l = x;
772 return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
773 }
774
775 /*
776 * Local variables:
777 * mode: C
778 * c-file-style: "BSD"
779 * c-basic-offset: 4
780 * tab-width: 4
781 * indent-tabs-mode: nil
782 * End:
783 */
784