1: /* This file contains essentially all of the process and message handling.
   2:  * It has two main entry points from the outside:
   3:  *
   4:  *   sys_call:   called when a process or task does SEND, RECEIVE or SENDREC
   5:  *   interrupt: called by interrupt routines to send a message to task
   6:  *
   7:  * It also has several minor entry points:
   8:  *
   9:  *   lock_ready:      put a process on one of the ready queues so it can be run
  10:  *   lock_unready:    remove a process from the ready queues
  11:  *   lock_sched:      a process has run too long; schedule another one
  12:  *   lock_mini_send:  send a message (used by interrupt signals, etc.)
  13:  *   lock_pick_proc:  pick a process to run (used by system initialization)
  14:  *   unhold:          repeat all held-up interrupts
  15:  */
  16: 
  17: #include "kernel.h"
  18: #include <minix/callnr.h>
  19: #include <minix/com.h>
  20: #include "proc.h"
  21: 
  22: PRIVATE unsigned char switching;        /* nonzero to inhibit interrupt() */
  23: 
  24: FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dest,
  25:                 message *m_ptr) );
  26: FORWARD _PROTOTYPE( int mini_rec, (struct proc *caller_ptr, int src,
  27:                 message *m_ptr) );
  28: FORWARD _PROTOTYPE( void ready, (struct proc *rp) );
  29: FORWARD _PROTOTYPE( void sched, (void) );
  30: FORWARD _PROTOTYPE( void unready, (struct proc *rp) );
  31: FORWARD _PROTOTYPE( void pick_proc, (void) );
  32: 
  33: #if (CHIP == M68000)
  34: FORWARD _PROTOTYPE( void cp_mess, (int src, struct proc *src_p, message *src_m,
  35:                 struct proc *dst_p, message *dst_m) );
  36: #endif
  37: 
  38: #if (CHIP == INTEL)
  39: #define CopyMess(s,sp,sm,dp,dm) \
  40:         cp_mess(s, (sp)->p_map[D].mem_phys, (vir_bytes)sm, (dp)->p_map[D].mem_phys, (vir_bytes)dm)
  41: #endif
  42: 
  43: #if (CHIP == M68000)
  44: #define CopyMess(s,sp,sm,dp,dm) \
  45:         cp_mess(s,sp,sm,dp,dm)
  46: #endif
  47: 
  48: /*===========================================================================*
  49:  *                              interrupt                                    * 
  50:  *===========================================================================*/
  51: PUBLIC void interrupt(task)
  52: int task;                       /* number of task to be started */
  53: {
  54: /* An interrupt has occurred.  Schedule the task that handles it. */
  55: 
  56:   register struct proc *rp;     /* pointer to task's proc entry */
  57: 
  58:   rp = proc_addr(task);
  59: 
  60:   /* If this call would compete with other process-switching functions, put
  61:    * it on the 'held' queue to be flushed at the next non-competing restart().
  62:    * The competing conditions are:
  63:    * (1) k_reenter == (typeof k_reenter) -1:
  64:    *     Call from the task level, typically from an output interrupt
  65:    *     routine.  An interrupt handler might reenter interrupt().  Rare,
  66:    *     so not worth special treatment.
  67:    * (2) k_reenter > 0:
  68:    *     Call from a nested interrupt handler.  A previous interrupt handler
  69:    *     might be inside interrupt() or sys_call().
  70:    * (3) switching != 0:
  71:    *     Some process-switching function other than interrupt() is being
  72:    *     called from the task level, typically sched() from CLOCK.  An
  73:    *     interrupt handler might call interrupt and pass the k_reenter test.
  74:    */
  75:   if (k_reenter != 0 || switching) {
  76:         lock();
  77:         if (!rp->p_int_held) {
  78:                 rp->p_int_held = TRUE;
  79:                 if (held_head != NIL_PROC)
  80:                         held_tail->p_nextheld = rp;
  81:                 else
  82:                         held_head = rp;
  83:                 held_tail = rp;
  84:                 rp->p_nextheld = NIL_PROC;
  85:         }
  86:         unlock();
  87:         return;
  88:   }
  89:   switching = TRUE;
  90: 
  91:   /* If task is not waiting for an interrupt, record the blockage. */
  92:   if ( (rp->p_flags & (RECEIVING | SENDING)) != RECEIVING ||
  93:       !isrxhardware(rp->p_getfrom)) {
  94:         rp->p_int_blocked = TRUE;
  95:         switching = FALSE;
  96:         return;
  97:   }
  98: 
  99:   /* Destination is waiting for an interrupt.
 100:    * Send it a message with source HARDWARE and type HARD_INT.
 101:    * No more information can be reliably provided since interrupt messages
 102:    * are not queued.
 103:    */
 104:   rp->p_messbuf->m_source = HARDWARE;
 105:   rp->p_messbuf->m_type = HARD_INT;
 106:   rp->p_flags &= ~RECEIVING;
 107:   rp->p_int_blocked = FALSE;
 108: 
 109:   ready(rp);
 110:   switching = FALSE;
 111: }
 112: 
 113: /*===========================================================================*
 114:  *                              sys_call                                     * 
 115:  *===========================================================================*/
 116: PUBLIC int sys_call(function, src_dest, m_ptr)
 117: int function;                   /* SEND, RECEIVE, or BOTH */
 118: int src_dest;                   /* source to receive from or dest to send to */
 119: message *m_ptr;                 /* pointer to message */
 120: {
 121: /* The only system calls that exist in MINIX are sending and receiving
 122:  * messages.  These are done by trapping to the kernel with an INT instruction.
 123:  * The trap is caught and sys_call() is called to send or receive a message
 124:  * (or both). The caller is always given by proc_ptr.
 125:  */
 126: 
 127:   register struct proc *rp;
 128:   int n;
 129: 
 130:   /* Check for bad system call parameters. */
 131:   if (!isoksrc_dest(src_dest)) return(E_BAD_DEST);
 132:   rp = proc_ptr;
 133: 
 134:   if (isuserp(rp) && function != BOTH) return(E_NO_PERM);
 135:   
 136:   /* The parameters are ok. Do the call. */
 137:   if (function & SEND) {
 138:         /* Function = SEND or BOTH. */
 139:         n = mini_send(rp, src_dest, m_ptr);
 140:         if (function == SEND || n != OK)
 141:                 return(n);      /* done, or SEND failed */
 142:   }
 143: 
 144:   /* Function = RECEIVE or BOTH.
 145:    * We have checked user calls are BOTH, and trust 'function' otherwise.
 146:    */
 147:   return(mini_rec(rp, src_dest, m_ptr));
 148: }
 149: 
 150: /*===========================================================================*
 151:  *                              mini_send                                    * 
 152:  *===========================================================================*/
 153: PRIVATE int mini_send(caller_ptr, dest, m_ptr)
 154: register struct proc *caller_ptr;       /* who is trying to send a message? */
 155: int dest;                       /* to whom is message being sent? */
 156: message *m_ptr;                 /* pointer to message buffer */
 157: {
 158: /* Send a message from 'caller_ptr' to 'dest'. If 'dest' is blocked waiting
 159:  * for this message, copy the message to it and unblock 'dest'. If 'dest' is
 160:  * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
 161:  */
 162: 
 163:   register struct proc *dest_ptr, *next_ptr;
 164:   vir_bytes vb;                 /* message buffer pointer as vir_bytes */
 165:   vir_clicks vlo, vhi;          /* virtual clicks containing message to send */
 166: 
 167:   /* User processes are only allowed to send to FS and MM.  Check for this. */
 168:   if (isuserp(caller_ptr) && !issysentn(dest)) return(E_BAD_DEST);
 169:   dest_ptr = proc_addr(dest);   /* pointer to destination's proc entry */
 170:   if (isemptyp(dest_ptr)) return(E_BAD_DEST);   /* dead dest */
 171: 
 172: #if ALLOW_GAP_MESSAGES
 173:   /* This check allows a message to be anywhere in data or stack or gap. 
 174:    * It will have to be made more elaborate later for machines which
 175:    * don't have the gap mapped.
 176:    */
 177:   vb = (vir_bytes) m_ptr;
 178:   vlo = vb >> CLICK_SHIFT;        /* vir click for bottom of message */
 179:   vhi = (vb + MESS_SIZE - 1) >> CLICK_SHIFT;      /* vir click for top of msg */
 180:   if (vlo < caller_ptr->p_map[D].mem_vir || vlo > vhi ||
 181:       vhi >= caller_ptr->p_map[S].mem_vir + caller_ptr->p_map[S].mem_len)
 182:         return(EFAULT); 
 183: #else
 184:   /* Check for messages wrapping around top of memory or outside data seg. */
 185:   vb = (vir_bytes) m_ptr;
 186:   vlo = vb >> CLICK_SHIFT;        /* vir click for bottom of message */
 187:   vhi = (vb + MESS_SIZE - 1) >> CLICK_SHIFT;      /* vir click for top of msg */
 188:   if (vhi < vlo ||
 189:       vhi - caller_ptr->p_map[D].mem_vir >= caller_ptr->p_map[D].mem_len)
 190:         return(EFAULT);
 191: #endif
 192: 
 193:   /* Check for deadlock by 'caller_ptr' and 'dest' sending to each other. */
 194:   if (dest_ptr->p_flags & SENDING) {
 195:         next_ptr = proc_addr(dest_ptr->p_sendto);
 196:         while (TRUE) {
 197:                 if (next_ptr == caller_ptr) return(ELOCKED);
 198:                 if (next_ptr->p_flags & SENDING)
 199:                         next_ptr = proc_addr(next_ptr->p_sendto);
 200:                 else
 201:                         break;
 202:         }
 203:   }
 204: 
 205:   /* Check to see if 'dest' is blocked waiting for this message. */
 206:   if ( (dest_ptr->p_flags & (RECEIVING | SENDING)) == RECEIVING &&
 207:        (dest_ptr->p_getfrom == ANY ||
 208:         dest_ptr->p_getfrom == proc_number(caller_ptr))) {
 209:         /* Destination is indeed waiting for this message. */
 210:         CopyMess(proc_number(caller_ptr), caller_ptr, m_ptr, dest_ptr,
 211:                  dest_ptr->p_messbuf);
 212:         dest_ptr->p_flags &= ~RECEIVING;  /* deblock destination */
 213:         if (dest_ptr->p_flags == 0) ready(dest_ptr);
 214:   } else {
 215:         /* Destination is not waiting.  Block and queue caller. */
 216:         caller_ptr->p_messbuf = m_ptr;
 217:         if (caller_ptr->p_flags == 0) unready(caller_ptr);
 218:         caller_ptr->p_flags |= SENDING;
 219:         caller_ptr->p_sendto= dest;
 220: 
 221:         /* Process is now blocked.  Put in on the destination's queue. */
 222:         if ( (next_ptr = dest_ptr->p_callerq) == NIL_PROC)
 223:                 dest_ptr->p_callerq = caller_ptr;
 224:         else {
 225:                 while (next_ptr->p_sendlink != NIL_PROC)
 226:                         next_ptr = next_ptr->p_sendlink;
 227:                 next_ptr->p_sendlink = caller_ptr;
 228:         }
 229:         caller_ptr->p_sendlink = NIL_PROC;
 230:   }
 231:   return(OK);
 232: }
 233: 
 234: /*===========================================================================*
 235:  *                              mini_rec                                     * 
 236:  *===========================================================================*/
 237: PRIVATE int mini_rec(caller_ptr, src, m_ptr)
 238: register struct proc *caller_ptr;       /* process trying to get message */
 239: int src;                        /* which message source is wanted (or ANY) */
 240: message *m_ptr;                 /* pointer to message buffer */
 241: {
 242: /* A process or task wants to get a message.  If one is already queued,
 243:  * acquire it and deblock the sender.  If no message from the desired source
 244:  * is available, block the caller.  No need to check parameters for validity.
 245:  * Users calls are always sendrec(), and mini_send() has checked already.  
 246:  * Calls from the tasks, MM, and FS are trusted.
 247:  */
 248: 
 249:   register struct proc *sender_ptr;
 250:   register struct proc *previous_ptr;
 251: 
 252:   /* Check to see if a message from desired source is already available. */
 253:   if (!(caller_ptr->p_flags & SENDING)) {
 254:         /* Check caller queue. */
 255:     for (sender_ptr = caller_ptr->p_callerq; sender_ptr != NIL_PROC;
 256:          previous_ptr = sender_ptr, sender_ptr = sender_ptr->p_sendlink) {
 257:         if (src == ANY || src == proc_number(sender_ptr)) {
 258:                 /* An acceptable message has been found. */
 259:                 CopyMess(proc_number(sender_ptr), sender_ptr,
 260:                          sender_ptr->p_messbuf, caller_ptr, m_ptr);
 261:                 if (sender_ptr == caller_ptr->p_callerq)
 262:                         caller_ptr->p_callerq = sender_ptr->p_sendlink;
 263:                 else
 264:                         previous_ptr->p_sendlink = sender_ptr->p_sendlink;
 265:                 if ((sender_ptr->p_flags &= ~SENDING) == 0)
 266:                         ready(sender_ptr);      /* deblock sender */
 267:                 return(OK);
 268:         }
 269:     }
 270: 
 271:     /* Check for blocked interrupt. */
 272:     if (caller_ptr->p_int_blocked && isrxhardware(src)) {
 273:         m_ptr->m_source = HARDWARE;
 274:         m_ptr->m_type = HARD_INT;
 275:         caller_ptr->p_int_blocked = FALSE;
 276:         return(OK);
 277:     }
 278:   }
 279: 
 280:   /* No suitable message is available.  Block the process trying to receive. */
 281:   caller_ptr->p_getfrom = src;
 282:   caller_ptr->p_messbuf = m_ptr;
 283:   if (caller_ptr->p_flags == 0) unready(caller_ptr);
 284:   caller_ptr->p_flags |= RECEIVING;
 285: 
 286:   /* If MM has just blocked and there are kernel signals pending, now is the
 287:    * time to tell MM about them, since it will be able to accept the message.
 288:    */
 289:   if (sig_procs > 0 && proc_number(caller_ptr) == MM_PROC_NR && src == ANY)
 290:         inform();
 291:   return(OK);
 292: }
 293: 
 294: /*===========================================================================*
 295:  *                              pick_proc                                    * 
 296:  *===========================================================================*/
 297: PRIVATE void pick_proc()
 298: {
 299: /* Decide who to run now.  A new process is selected by setting 'proc_ptr'.
 300:  * When a fresh user (or idle) process is selected, record it in 'bill_ptr',
 301:  * so the clock task can tell who to bill for system time.
 302:  */
 303: 
 304:   register struct proc *rp;     /* process to run */
 305: 
 306:   if ( (rp = rdy_head[TASK_Q]) != NIL_PROC) {
 307:         proc_ptr = rp;
 308:         return;
 309:   }
 310:   if ( (rp = rdy_head[SERVER_Q]) != NIL_PROC) {
 311:         proc_ptr = rp;
 312:         return;
 313:   }
 314:   if ( (rp = rdy_head[USER_Q]) != NIL_PROC) {
 315:         proc_ptr = rp;
 316:         bill_ptr = rp;
 317:         return;
 318:   }
 319:   /* No one is ready.  Run the idle task.  The idle task might be made an
 320:    * always-ready user task to avoid this special case.
 321:    */
 322:   bill_ptr = proc_ptr = proc_addr(IDLE);
 323: }
 324: 
 325: /*===========================================================================*
 326:  *                              ready                                        * 
 327:  *===========================================================================*/
 328: PRIVATE void ready(rp)
 329: register struct proc *rp;       /* this process is now runnable */
 330: {
 331: /* Add 'rp' to the end of one of the queues of runnable processes. Three
 332:  * queues are maintained:
 333:  *   TASK_Q   - (highest priority) for runnable tasks
 334:  *   SERVER_Q - (middle priority) for MM and FS only
 335:  *   USER_Q   - (lowest priority) for user processes
 336:  */
 337: 
 338:   if (istaskp(rp)) {
 339:         if (rdy_head[TASK_Q] != NIL_PROC)
 340:                 /* Add to tail of nonempty queue. */
 341:                 rdy_tail[TASK_Q]->p_nextready = rp;
 342:         else {
 343:                 proc_ptr =              /* run fresh task next */
 344:                 rdy_head[TASK_Q] = rp;  /* add to empty queue */
 345:         }
 346:         rdy_tail[TASK_Q] = rp;
 347:         rp->p_nextready = NIL_PROC;      /* new entry has no successor */
 348:         return;
 349:   }
 350:   if (isservp(rp)) {            /* others are similar */
 351:         if (rdy_head[SERVER_Q] != NIL_PROC)
 352:                 rdy_tail[SERVER_Q]->p_nextready = rp;
 353:         else
 354:                 rdy_head[SERVER_Q] = rp;
 355:         rdy_tail[SERVER_Q] = rp;
 356:         rp->p_nextready = NIL_PROC;
 357:         return;
 358:   }
 359:   /* Add user process to the front of the queue.  (Is a bit fairer to I/O
 360:    * bound processes.)
 361:    */
 362:   if (rdy_head[USER_Q] == NIL_PROC)
 363:         rdy_tail[USER_Q] = rp;
 364:   rp->p_nextready = rdy_head[USER_Q];
 365:   rdy_head[USER_Q] = rp;
 366: }
 367: 
 368: /*===========================================================================*
 369:  *                              unready                                      * 
 370:  *===========================================================================*/
 371: PRIVATE void unready(rp)
 372: register struct proc *rp;       /* this process is no longer runnable */
 373: {
 374: /* A process has blocked. */
 375: 
 376:   register struct proc *xp;
 377:   register struct proc **qtail;  /* TASK_Q, SERVER_Q, or USER_Q rdy_tail */
 378: 
 379:   if (istaskp(rp)) {
 380:         /* task stack still ok? */
 381:         if (*rp->p_stguard != STACK_GUARD)
 382:                 panic("stack overrun by task", proc_number(rp));
 383: 
 384:         if ( (xp = rdy_head[TASK_Q]) == NIL_PROC) return;
 385:         if (xp == rp) {
 386:                 /* Remove head of queue */
 387:                 rdy_head[TASK_Q] = xp->p_nextready;
 388:                 if (rp == proc_ptr) pick_proc();
 389:                 return;
 390:         }
 391:         qtail = &rdy_tail[TASK_Q];
 392:   }
 393:   else if (isservp(rp)) {
 394:         if ( (xp = rdy_head[SERVER_Q]) == NIL_PROC) return;
 395:         if (xp == rp) {
 396:                 rdy_head[SERVER_Q] = xp->p_nextready;
 397: #if (CHIP == M68000)
 398:                 if (rp == proc_ptr)
 399: #endif
 400:                 pick_proc();
 401:                 return;
 402:         }
 403:         qtail = &rdy_tail[SERVER_Q];
 404:   } else {
 405:         if ( (xp = rdy_head[USER_Q]) == NIL_PROC) return;
 406:         if (xp == rp) {
 407:                 rdy_head[USER_Q] = xp->p_nextready;
 408: #if (CHIP == M68000)
 409:                 if (rp == proc_ptr)
 410: #endif
 411:                 pick_proc();
 412:                 return;
 413:         }
 414:         qtail = &rdy_tail[USER_Q];
 415:   }
 416: 
 417:   /* Search body of queue.  A process can be made unready even if it is
 418:    * not running by being sent a signal that kills it.
 419:    */
 420:   while (xp->p_nextready != rp)
 421:         if ( (xp = xp->p_nextready) == NIL_PROC) return;
 422:   xp->p_nextready = xp->p_nextready->p_nextready;
 423:   if (*qtail == rp) *qtail = xp;
 424: }
 425: 
 426: /*===========================================================================*
 427:  *                              sched                                        * 
 428:  *===========================================================================*/
 429: PRIVATE void sched()
 430: {
 431: /* The current process has run too long.  If another low priority (user)
 432:  * process is runnable, put the current process on the end of the user queue,
 433:  * possibly promoting another user to head of the queue.
 434:  */
 435: 
 436:   if (rdy_head[USER_Q] == NIL_PROC) return;
 437: 
 438:   /* One or more user processes queued. */
 439:   rdy_tail[USER_Q]->p_nextready = rdy_head[USER_Q];
 440:   rdy_tail[USER_Q] = rdy_head[USER_Q];
 441:   rdy_head[USER_Q] = rdy_head[USER_Q]->p_nextready;
 442:   rdy_tail[USER_Q]->p_nextready = NIL_PROC;
 443:   pick_proc();
 444: }
 445: 
 446: /*==========================================================================*
 447:  *                              lock_mini_send                              *
 448:  *==========================================================================*/
 449: PUBLIC int lock_mini_send(caller_ptr, dest, m_ptr)
 450: struct proc *caller_ptr;        /* who is trying to send a message? */
 451: int dest;                       /* to whom is message being sent? */
 452: message *m_ptr;                 /* pointer to message buffer */
 453: {
 454: /* Safe gateway to mini_send() for tasks. */
 455: 
 456:   int result;
 457: 
 458:   switching = TRUE;
 459:   result = mini_send(caller_ptr, dest, m_ptr);
 460:   switching = FALSE;
 461:   return(result);
 462: }
 463: 
 464: /*==========================================================================*
 465:  *                              lock_pick_proc                              *
 466:  *==========================================================================*/
 467: PUBLIC void lock_pick_proc()
 468: {
 469: /* Safe gateway to pick_proc() for tasks. */
 470: 
 471:   switching = TRUE;
 472:   pick_proc();
 473:   switching = FALSE;
 474: }
 475: 
 476: /*==========================================================================*
 477:  *                              lock_ready                                  *
 478:  *==========================================================================*/
 479: PUBLIC void lock_ready(rp)
 480: struct proc *rp;                /* this process is now runnable */
 481: {
 482: /* Safe gateway to ready() for tasks. */
 483: 
 484:   switching = TRUE;
 485:   ready(rp);
 486:   switching = FALSE;
 487: }
 488: 
 489: /*==========================================================================*
 490:  *                              lock_unready                                *
 491:  *==========================================================================*/
 492: PUBLIC void lock_unready(rp)
 493: struct proc *rp;                /* this process is no longer runnable */
 494: {
 495: /* Safe gateway to unready() for tasks. */
 496: 
 497:   switching = TRUE;
 498:   unready(rp);
 499:   switching = FALSE;
 500: }
 501: 
 502: /*==========================================================================*
 503:  *                              lock_sched                                  *
 504:  *==========================================================================*/
 505: PUBLIC void lock_sched()
 506: {
 507: /* Safe gateway to sched() for tasks. */
 508: 
 509:   switching = TRUE;
 510:   sched();
 511:   switching = FALSE;
 512: }
 513: 
 514: /*==========================================================================*
 515:  *                              unhold                                      *
 516:  *==========================================================================*/
 517: PUBLIC void unhold()
 518: {
 519: /* Flush any held-up interrupts.  k_reenter must be 0.  held_head must not
 520:  * be NIL_PROC.  Interrupts must be disabled.  They will be enabled but will
 521:  * be disabled when this returns.
 522:  */
 523: 
 524:   register struct proc *rp;     /* current head of held queue */
 525: 
 526:   if (switching) return;
 527:   rp = held_head;
 528:   do {
 529:         if ( (held_head = rp->p_nextheld) == NIL_PROC) held_tail = NIL_PROC;
 530:         rp->p_int_held = FALSE;
 531:         unlock();               /* reduce latency; held queue may change! */
 532:         interrupt(proc_number(rp));
 533:         lock();                 /* protect the held queue again */
 534:   }
 535:   while ( (rp = held_head) != NIL_PROC);
 536: }
 537: 
 538: #if (CHIP == M68000)
 539: /*==========================================================================*
 540:  *                              cp_mess                                     *
 541:  *==========================================================================*/
 542: PRIVATE void cp_mess(src, src_p, src_m, dst_p, dst_m)
 543: int src;                        /* sender process */
 544: register struct proc *src_p;    /* source proc entry */
 545: message *src_m;                 /* source message */
 546: register struct proc *dst_p;    /* destination proc entry */
 547: message *dst_m;                 /* destination buffer */
 548: {
 549:   /* convert virtual address to physical address */
 550:   /* The caller has already checked if all addresses are within bounds */
 551:   
 552:   src_m = (message *)((char *)src_m + (((phys_bytes)src_p->p_map[D].mem_phys
 553:                                 - src_p->p_map[D].mem_vir) << CLICK_SHIFT));
 554:   dst_m = (message *)((char *)dst_m + (((phys_bytes)dst_p->p_map[D].mem_phys
 555:                                 - dst_p->p_map[D].mem_vir) << CLICK_SHIFT));
 556: #ifdef NEEDFSTRUCOPY
 557:   phys_copy(src_m,dst_m,(phys_bytes) sizeof(message));
 558: #else
 559:   *dst_m = *src_m;
 560: #endif
 561:   dst_m->m_source = src;
 562: }
 563: #endif