Main Page   Class Hierarchy   Compound List   File List   Compound Members   File Members  

Consumer.cc

Go to the documentation of this file.
00001 
00022 // ===================================================================
00023 // INCLUDES
00024 
00025 #include <assert.h>
00026 #include <stdio.h>
00027 #include <stdlib.h>
00028 #include <string.h>
00029 #include "Consumer.hh"
00030 // ===================================================================
00031 
00032 
00033 
00034 // ===================================================================
00035 Consumer::Consumer (Reference_Reader* const reference_reader,
00036         const char* const kernel_pathname) {
00037 
00038   // Hold a pointer to the reference reader.
00039   this->reference_reader = reference_reader;
00040 
00041   // Attempt to open the kernel trace file.
00042   kernel_stream = fopen(kernel_pathname, "r");
00043 
00044   // Did the kernel trace open correctly?
00045   if (kernel_stream == NULL) {
00046 
00047     // No.  Emit the error and exit.
00048     perror("Failed opening kernel trace");
00049     exit(1);
00050 
00051   }
00052 
00053   // Initialize some data members.
00054   number_reference_records = 0;
00055   number_kernel_records = 0;
00056   next_canonical_page_ID = 0;
00057   canonical_zero_page = get_new_canonical_page(TYPE_ZERO_PAGE);
00058   next_process_ID = KERNEL_PROCESS_ID + 1;
00059 
00060   // Create space to hold one reference record and one kernel record.
00061   reference_record = new reference_record_s;
00062   kernel_record = new kernel_record_s;
00063   if ((reference_record == 0) || (kernel_record == 0)) {
00064     fprintf(stderr, "Consumer(): Unable to allocate record space\n");
00065     exit(1);
00066   }
00067 
00068   // Create a task object for the initial kernel task as well as a
00069   // process object for the ``kernel process''.
00070   kernel_task = allocate_new_task();
00071   Process* kernel_process = allocate_new_process();
00072 
00073   // Give each an ID of 0, initialize the other fields, and assign the
00074   // thread to the process.
00075   kernel_task->task_ID = 0;
00076   kernel_task->process = kernel_process;
00077   kernel_task->parent_task = NULL;
00078   kernel_process->process_ID = KERNEL_PROCESS_ID;
00079   kernel_process->task_list.push_front(kernel_task);
00080   kernel_process->image_file_ID.inode_ID = 0;
00081   kernel_process->image_file_ID.major = 0;
00082   kernel_process->image_file_ID.minor = 0;
00083   strcpy(kernel_process->image_pathname, "***KERNEL***");
00084 
00085   // Create the virtual->canonical map (the context itself) and
00086   // associate it with the kernel process.
00087   virtual_to_canonical_map_t* kernel_context_map =
00088     new virtual_to_canonical_map_t;
00089   if (kernel_context_map == 0) {
00090     fprintf(stderr, "Consumer(): Unable to allocate kernel context map\n");
00091     exit(1);
00092   }
00093   kernel_process->virtual_to_canonical_map = kernel_context_map;
00094   kernel_process->context_ID = KERNEL_CONTEXT;
00095 
00096   // Insert them into both the task_ID- and context_ID-keyed tables.
00097   task_ID_to_Task_map[kernel_task->task_ID] = kernel_task;
00098   context_ID_to_Process_map[kernel_process->context_ID] =
00099     kernel_process;
00100 
00101   // Track the currently active task, which is initially the kernel task.
00102   active_task = kernel_task;
00103 
00104   // Nullify the scheduled task, as there isn't one yet.
00105   scheduled_task = NULL;
00106 
00107   // Initialize the single canonical buffer cache page.  (Reminder:
00108   // This is a cheap trick until we really track buffer use.  See
00109   // Consumer.hh for further notes.)
00110   canonical_buffer_cache_page.page = 0;
00111   canonical_buffer_cache_page.reference_count = 0;
00112   canonical_buffer_cache_page.type = TYPE_BUFFER_CACHE;
00113 
00114 } // Consumer::Consumer
00115 // ===================================================================
00116 
00117 
00118 
00119 // ===================================================================
00120 Consumer::~Consumer () {
00121 
00122   // WARNING: We do little here to clean up this object.  We (lazily)
00123   // assume that when this object dies, the process is complete
00124   // anyway.
00125 
00126   // Attempt to close the kernel trace file.
00127   int kernel_result = fclose(kernel_stream);
00128 
00129   // Did the kernel trace close correctly?
00130   if (kernel_result == EOF) {
00131 
00132     // No.  Emit the error and exit.
00133     perror("Failed closing kernel trace");
00134     exit(1);
00135 
00136   }
00137 
00138 } // Consumer::~Consumer
00139 // ===================================================================
00140 
00141 
00142 
00143 // ===================================================================
00144 // Perform the merging.
00145 void
00146 Consumer::merge () {
00147 
00148   // Initialize any output.
00149   initialize();
00150 
00151   // Perform the initial synchronizing phase, getting the reference
00152   // and kernel traces in sync.
00153   synchronize();
00154 
00155   // Set the first reference record (marking the last kernel event) as
00156   // the start of scheduled time for the kernel task.
00157   kernel_task->
00158     mark_quanta_start(reference_record->cycle_timestamp,
00159           reference_record->instruction_timestamp,
00160           reference_record->reference_timestamp);
00161 
00162   // Read the first reference record to be processed.  Since this is
00163   // the beginning of virtual time passage, set this as the start of
00164   // the scheduling quanta (as far as the traces indicate) for the
00165   // active task.
00166   reference_reader->read(reference_record);
00167 
00168   // Merge the traces until the last reference has been processed.
00169   while (reference_record->tag != TAG_END_OF_TRACE) {
00170 
00171     // Update the time that has passed between the last reference
00172     // record and this one for the active task.
00173     active_task->mark_time(reference_record->cycle_timestamp,
00174          reference_record->instruction_timestamp,
00175          reference_record->reference_timestamp);
00176 
00177     // Does the reference record indicate that there's a kernel record
00178     // to read and process?
00179     if (reference_record->tag == TAG_KERNEL_EVENT) {
00180 
00181       // It does.  Read the kernel record, update the maps with this
00182       // kernel record, and act on the record (that is, output and
00183       // other bookkeeping).
00184       read_kernel_record();
00185       update_with_kernel_record();
00186       act_on_kernel_record();
00187 
00188     } else {
00189 
00190       // No, this is just a reference record.  Update the maps with
00191       // this reference record, and act on the record (that is, output
00192       // and other bookkeeping).
00193       update_with_reference_record();
00194       act_on_reference_record();
00195 
00196     }
00197 
00198     // Read the next reference record.
00199     reference_reader->read(reference_record);
00200 
00201   } // Reference record reading loop
00202 
00203   // Complete processing by finishing output and other bookkeeping.
00204   clean_up();
00205 
00206 } // Consumer::merge
00207 // ===================================================================
00208 
00209 
00210 
00211 // ===================================================================
00212 void
00213 Consumer::synchronize () {
00214 
00215   // Read the first reference record which must be a kernel event
00216   // record that indicates the time of the _last_ kernel event
00217   // preceeding references.
00218   reference_reader->read(reference_record);
00219   if (reference_record->tag != TAG_KERNEL_EVENT) {
00220     fprintf(stderr, "Consumer::synchronize(): ");
00221     fprintf(stderr, "First reference record not a kernel event\n");
00222     exit(1);
00223   }
00224   timestamp_t last_kernel_event_time =
00225     reference_record->cycle_timestamp;
00226 
00227   // If the last kernel event time is 0, that means that reference
00228   // tracing began during system startup, _before_ the kernel was
00229   // loaded and any kernel events could be recorded.  Therefore, we
00230   // can skip synchronization.
00231   if (last_kernel_event_time == 0) {
00232     return;
00233   }
00234 
00235   // Start at the beginning of the kernel trace.  Read and process all
00236   // records concluding with the last one preceeding actual
00237   // references.  In this pass of the kernel trace, we seek only to
00238   // establish which tasks will be live at the time of the first
00239   // reference.
00240   do {
00241 
00242     // Read the next kernel record.
00243     read_kernel_record();
00244 
00245     // Sanity check: We shouldn't go too far in the kernel trace.
00246     if (kernel_record->cycle_timestamp > last_kernel_event_time) {
00247       fprintf(stderr, "Consumer::synchronize(): ");
00248       fprintf(stderr, "Went too far in kernel trace\n");
00249       exit(1);
00250     }
00251 
00252     // Determine what to do based on the record type.
00253     switch (kernel_record->tag) {
00254 
00255     case TAG_FORK:
00256       // Mark the new task as live.
00257       live_map[kernel_record->task_ID] = true;
00258       break;
00259 
00260     case TAG_EXIT:
00261       // Mark the terminated task as dead.  Check first, though,
00262       // to be sure that it was live and that the kernel trace is
00263       // sane.
00264       if (!live_map[kernel_record->task_ID]) {
00265   fprintf(stderr, "Consumer::synchronize(): ");
00266   fprintf(stderr,  "Exit record on dead task\n");
00267   fprintf(stderr,
00268     "cycle_timestamp = %qx\n",
00269     kernel_record->cycle_timestamp);
00270   exit(1);
00271       }
00272       live_map[kernel_record->task_ID] = false;
00273       break;
00274 
00275     } // switch-case on tag
00276 
00277   } while (kernel_record->cycle_timestamp != last_kernel_event_time);
00278 
00279   // Perform a second pass, where we will maintain, for each task, its
00280   // most recent executable image pathname -- something that will
00281   // continue throughout the merging.  When fork() records are
00282   // encountered, the task will adopt the most recent executable image
00283   // pathname of the parent, and that pathname will be emitted with
00284   // the new fork record.  Stop when the first reference time is
00285   // reached.
00286 
00287   // Process until the time of the first reference.
00288   rewind(kernel_stream);
00289   number_kernel_records = 0;
00290   do {
00291 
00292     // Read the next kernel record.
00293     read_kernel_record();
00294 
00295     // Sanity check: We shouldn't go too far in the kernel trace.
00296     if (kernel_record->cycle_timestamp > last_kernel_event_time) {
00297       fprintf(stderr, "Consumer::synchronize(): ");
00298       fprintf(stderr, "Went too far in kernel trace\n");
00299       exit(1);
00300     }
00301 
00302     // Update the tables based on this record and act on this record
00303     // (that is, output and bookkeeping).
00304     update_with_kernel_record();
00305     act_on_kernel_record();
00306 
00307   } while (kernel_record->cycle_timestamp != last_kernel_event_time);
00308 
00309 } // synchronize
00310 // ===================================================================
00311 
00312 
00313 
00314 // ===================================================================
00315 Task*
00316 Consumer::safe_task_ID_to_task (const task_ID_t task_ID) {
00317 
00318   // Attempt lookup.
00319   task_ID_to_Task_map_t::iterator i = task_ID_to_Task_map.find(task_ID);
00320 
00321   // If the lookup failed then abort.
00322   if (i == task_ID_to_Task_map.end()) {
00323     fprintf(stderr, "Consumer::safe_task_ID_to_task(): ");
00324     fprintf(stderr, "Failed lookup\n");
00325     exit(1);
00326   }
00327 
00328   // Return the lookup result.
00329   return (*i).second;
00330 
00331 }
00332 // ===================================================================
00333 
00334 
00335 
00336 // ===================================================================
00337 Process*
00338 Consumer::safe_context_ID_to_process (const context_ID_t context) {
00339 
00340   // Attempt lookup.
00341   context_ID_to_Process_map_t::iterator i =
00342     context_ID_to_Process_map.find(context);
00343 
00344   // If the lookup failed then abort.
00345   if (i == context_ID_to_Process_map.end()) {
00346     fprintf(stderr, "Consumer::safe_context_ID_to_process(): ");
00347     fprintf(stderr, "Failed lookup\n");
00348     exit(1);
00349   }
00350 
00351   // Return the lookup result.
00352   return (*i).second;
00353 
00354 }
00355 // ===================================================================
00356 
00357 
00358 
00359 // ===================================================================
00360 bool
00361 Consumer::safe_live (const task_ID_t task_ID) {
00362 
00363   // Attempt lookup.
00364   live_map_t::iterator i = live_map.find(task_ID);
00365 
00366   // If the lookup failed then abort.
00367   if (i == live_map.end()) {
00368     fprintf(stderr, "safe_live(): Failed lookup\n");
00369     exit(1);
00370   }
00371 
00372   // Return the lookup result.
00373   return (*i).second;
00374 
00375 }
00376 // ===================================================================
00377 
00378 
00379 
00380 // ===================================================================
00381 virtual_to_canonical_map_t*
00382 Consumer::safe_file_ID_to_V2C_map (const file_ID_s& file_ID) {
00383 
00384   virtual_to_canonical_map_t* map = 0;
00385 
00386   // Attempt lookup.
00387   file_ID_to_V2C_map_t::iterator i = file_ID_to_V2C_map.find(file_ID);
00388 
00389   // Did the lookup succeed?
00390   if (i == file_ID_to_V2C_map.end()) {
00391 
00392     // No.  Allocate a new map, and ensure that the allocation
00393     // succeeded.
00394     map = new virtual_to_canonical_map_t;
00395     if (map == 0) {
00396       fprintf(stderr, "safe_file_ID_to_V2C_map(): Failed allocation\n");
00397       exit(1);
00398     }
00399 
00400     // Associate the given file ID with this new map.
00401     file_ID_to_V2C_map[file_ID] = map;
00402 
00403   } else {
00404 
00405     // Yes, use the map found via lookup.
00406     map = (*i).second;
00407 
00408   }
00409 
00410   // Return the lookup result.
00411   return map;
00412 
00413 }
00414 // ===================================================================
00415 
00416 
00417 
00418 // ===================================================================
00419 virtual_to_canonical_map_t*
00420 Consumer::safe_shm_ID_to_V2C_map (const shm_ID_t& shm_ID) {
00421 
00422   virtual_to_canonical_map_t* map = 0;
00423 
00424   // Attempt lookup.
00425   shm_ID_to_V2C_map_t::iterator i = shm_ID_to_V2C_map.find(shm_ID);
00426 
00427   // Did the lookup succeed?
00428   if (i == shm_ID_to_V2C_map.end()) {
00429 
00430     // No.  Allocate a new map, and ensure that the allocation
00431     // succeeded.
00432     map = new virtual_to_canonical_map_t;
00433     if (map == 0) {
00434       fprintf(stderr, "safe_shm_ID_to_V2C_map(): Failed allocation\n");
00435       exit(1);
00436     }
00437 
00438     // Associate the given shm ID with this new map.
00439     shm_ID_to_V2C_map[shm_ID] = map;
00440 
00441   } else {
00442 
00443     // Yes, use the map found via lookup.
00444     map = (*i).second;
00445 
00446   }
00447 
00448   // Return the lookup result.
00449   return map;
00450 
00451 } // safe_shm_ID_to_V2C_map
00452 // ===================================================================
00453 
00454 
00455 
00456 // ===================================================================
00457 void
00458 Consumer::map_virtual_to_canonical
00459 (virtual_to_canonical_map_t* const map,
00460  const virtual_page_ID_t virtual_page_ID,
00461  canonical_page_s* const canonical_page) {
00462 
00463   // The local page about to be assigned should exist.
00464   // created.
00465   if (canonical_page == NULL) {
00466     fprintf(stderr, "map_virtual_to_canonical(): Null canonical page\n");
00467     exit(1);
00468   }
00469 
00470   // Insert the mapping.
00471   (*map)[virtual_page_ID] = canonical_page;
00472 
00473 } // map_virtual_to_canonical
00474 // ===================================================================
00475 
00476 
00477 
00478 // ===================================================================
00479 void
00480 Consumer::read_kernel_record () {
00481 
00482   // Attempt to read the next record of the kernel trace.
00483   char* read_result = fgets(buffer, BUFFER_SIZE, kernel_stream);
00484 
00485   // Did the read fail?
00486   if (read_result == NULL) {
00487 
00488     // Distinguish between an EOF and other errors, but abort
00489     // processing in either case.
00490     fprintf(stderr, "Consumer::read_kernel_record(): ");
00491     if (feof(kernel_stream)) {
00492       fprintf(stderr, "Premature EOF\n");
00493     } else {
00494       fprintf(stderr, "Read error\n");
00495     }
00496     exit(1);
00497   }
00498 
00499   // Parse the record type, which should be the first character of the
00500   // buffer.
00501   kernel_record->tag = buffer[0];
00502 
00503   // Attempt to parse the rest of the record based on its type.
00504   // Declare and assign some variables here to avoid compiler
00505   // complaints of skipping initialization in switch-case statement.
00506   unsigned int parse_result = 0;
00507   unsigned int items_to_parse = 0;
00508   virtual_address_t starting_virtual_address = 0;
00509   virtual_address_t ending_virtual_address = 0;
00510   offset_t length = 0;
00511   file_offset_t file_offset = 0;
00512   switch (kernel_record->tag) {
00513 
00514   case TAG_SCHEDULE:
00515   case TAG_ACCEPT:
00516   case TAG_EXIT:
00517     items_to_parse = 2;
00518     parse_result = sscanf(&buffer[2],
00519         "%qx %hd",
00520         &kernel_record->cycle_timestamp,
00521         &kernel_record->task_ID);
00522     break;
00523 
00524   case TAG_FORK:
00525     items_to_parse = 3;
00526     parse_result = sscanf(&buffer[2],
00527         "%qx %hd %hd",
00528         &kernel_record->cycle_timestamp,
00529         &kernel_record->task_ID,
00530         &kernel_record->parent_task_ID);
00531     break;
00532 
00533   case TAG_EXEC:
00534     items_to_parse = 6;
00535     parse_result = sscanf(&buffer[2],
00536         "%qx %hd %lx %hd %hd %s",
00537         &kernel_record->cycle_timestamp,
00538         &kernel_record->task_ID,
00539         &kernel_record->file_ID.inode_ID,
00540         &kernel_record->file_ID.major,
00541         &kernel_record->file_ID.minor,
00542         kernel_record->filename);
00543     break;
00544 
00545   case TAG_CONTEXT_ASSIGNMENT:
00546     items_to_parse = 3;
00547     parse_result = sscanf(&buffer[2],
00548         "%qx %hd %lx",
00549         &kernel_record->cycle_timestamp,
00550         &kernel_record->task_ID,
00551         &kernel_record->context_ID);
00552     break;
00553 
00554   case TAG_DUPLICATE_RANGE:
00555     items_to_parse = 5;
00556     parse_result = sscanf(&buffer[2],
00557         "%qx %lx %lx %lx %lx",
00558         &kernel_record->cycle_timestamp,
00559         &kernel_record->context_ID,
00560         &kernel_record->duplicate_context_ID,
00561         &starting_virtual_address,
00562         &ending_virtual_address);
00563     break;
00564 
00565   case TAG_MMAP_FILE:
00566     items_to_parse = 9;
00567     parse_result = sscanf(&buffer[2],
00568         "%qx %hd %lx %lx %lx %hd %hd %qx %s",
00569         &kernel_record->cycle_timestamp,
00570         &kernel_record->task_ID,
00571         &starting_virtual_address,
00572         &length,
00573         &kernel_record->file_ID.inode_ID,
00574         &kernel_record->file_ID.major,
00575         &kernel_record->file_ID.minor,
00576         &file_offset,
00577         kernel_record->filename);
00578     break;
00579 
00580   case TAG_MMAP_ANONYMOUS:
00581     items_to_parse = 4;
00582     parse_result = sscanf(&buffer[2],
00583         "%qx %hd %lx %lx",
00584         &kernel_record->cycle_timestamp,
00585         &kernel_record->task_ID,
00586         &starting_virtual_address,
00587         &length);
00588     break;
00589 
00590   case TAG_MUNMAP:
00591     items_to_parse = 4;
00592     parse_result = sscanf(&buffer[2],
00593         "%qx %hd %lx %lx",
00594         &kernel_record->cycle_timestamp,
00595         &kernel_record->task_ID,
00596         &starting_virtual_address,
00597         &length);
00598     break;
00599 
00600   case TAG_COMPLETE_UNMAP:
00601     items_to_parse = 2;
00602     parse_result = sscanf(&buffer[2],
00603         "%qx %lx",
00604         &kernel_record->cycle_timestamp,
00605         &kernel_record->context_ID);
00606     break;
00607 
00608   case TAG_IPC_SHMAT:
00609     items_to_parse = 5;
00610     parse_result = sscanf(&buffer[2],
00611         "%qx %hd %lx %lx %lx",
00612         &kernel_record->cycle_timestamp,
00613         &kernel_record->task_ID,
00614         &starting_virtual_address,
00615         &length,
00616         &kernel_record->shm_ID);
00617     break;
00618 
00619   case TAG_IPC_SHMDT:
00620     items_to_parse = 3;
00621     parse_result = sscanf(&buffer[2],
00622         "%qx %hd %lx",
00623         &kernel_record->cycle_timestamp,
00624         &kernel_record->task_ID,
00625         &starting_virtual_address);
00626     break;
00627 
00628   case TAG_COW_UNMAP:
00629     items_to_parse = 3;
00630     parse_result = sscanf(&buffer[2],
00631         "%qx %hd %lx",
00632         &kernel_record->cycle_timestamp,
00633         &kernel_record->task_ID,
00634         &starting_virtual_address);
00635     break;
00636 
00637   case TAG_BUFFER_ALLOCATION:
00638   case TAG_BUFFER_DEALLOCATION:
00639   case TAG_FILE_CACHE_DEALLOCATION:
00640     items_to_parse = 2;
00641     parse_result = sscanf(&buffer[2],
00642         "%qx %lx",
00643         &kernel_record->cycle_timestamp,
00644         &starting_virtual_address);
00645     break;
00646 
00647   case TAG_FILE_CACHE_ALLOCATION:
00648     items_to_parse = 6;
00649     parse_result = sscanf(&buffer[2],
00650         "%qx %lx %lx %hd %hd %qx",
00651         &kernel_record->cycle_timestamp,
00652         &starting_virtual_address,
00653         &kernel_record->file_ID.inode_ID,
00654         &kernel_record->file_ID.major,
00655         &kernel_record->file_ID.minor,
00656         &file_offset);
00657     break;
00658 
00659   case TAG_FILE_OPEN:
00660     items_to_parse = 6;
00661     parse_result = sscanf(&buffer[2],
00662         "%qx %hd %lx %hd %hd %s",
00663         &kernel_record->cycle_timestamp,
00664         &kernel_record->task_ID,
00665         &kernel_record->file_ID.inode_ID,
00666         &kernel_record->file_ID.major,
00667         &kernel_record->file_ID.minor,
00668         kernel_record->filename);
00669     break;
00670 
00671   case TAG_FILE_CLOSE:
00672     items_to_parse = 5;
00673     parse_result = sscanf(&buffer[2],
00674         "%qx %hd %lx %hd %hd",
00675         &kernel_record->cycle_timestamp,
00676         &kernel_record->task_ID,
00677         &kernel_record->file_ID.inode_ID,
00678         &kernel_record->file_ID.major,
00679         &kernel_record->file_ID.minor);
00680     break;
00681 
00682   case TAG_FILE_READ:
00683   case TAG_FILE_WRITE:
00684     items_to_parse = 7;
00685     parse_result = sscanf(&buffer[2],
00686         "%qx %hd %lx %hd %hd %qx %lx",
00687         &kernel_record->cycle_timestamp,
00688         &kernel_record->task_ID,
00689         &kernel_record->file_ID.inode_ID,
00690         &kernel_record->file_ID.major,
00691         &kernel_record->file_ID.minor,
00692         &file_offset,
00693         &length);
00694     break;
00695 
00696   case TAG_FILE_DELETE:
00697     items_to_parse = 4;
00698     parse_result = sscanf(&buffer[2],
00699         "%qx %lx %hd %hd",
00700         &kernel_record->cycle_timestamp,
00701         &kernel_record->file_ID.inode_ID,
00702         &kernel_record->file_ID.major,
00703         &kernel_record->file_ID.minor);
00704     break;
00705 
00706   case TAG_END_OF_TRACE:
00707 
00708     fprintf(stderr, "Consumer::read_kernel_record(): ");
00709     fprintf(stderr, "Premature end of kernel trace\n");
00710     exit(1);
00711     break;
00712 
00713   default:
00714     fprintf(stderr, "Consumer::read_kernel_record(): ");
00715     fprintf(stderr,
00716       "Invalid kernel tag %c(%d)\n",
00717       kernel_record->tag,
00718       (int)kernel_record->tag);
00719     exit(1);
00720 
00721   }
00722 
00723   // If the parse failed, emit the error and exit.
00724   if (parse_result != items_to_parse) {
00725     fprintf(stderr, "Consumer::read_kernel_record(): ");
00726     fprintf(stderr, "Could not parse remainder\n");
00727     fprintf(stderr,
00728       "cycle_timestamp = %qx\n",
00729       kernel_record->cycle_timestamp);
00730     exit(1);
00731   }
00732 
00733   // If the record type requires it, convert the given information
00734   // about the mapped region into starting and ending virtual page
00735   // numbers.  Note that some records only use the starting virtual
00736   // address, but no harm is done by computing a bogus ending address.
00737   if ((kernel_record->tag == TAG_MMAP_FILE) ||
00738       (kernel_record->tag == TAG_MMAP_ANONYMOUS) ||
00739       (kernel_record->tag == TAG_IPC_SHMAT) ||
00740       (kernel_record->tag == TAG_IPC_SHMDT) ||
00741       (kernel_record->tag == TAG_MUNMAP) ||
00742       (kernel_record->tag == TAG_COW_UNMAP) ||
00743       (kernel_record->tag == TAG_DUPLICATE_RANGE) ||
00744       (kernel_record->tag == TAG_BUFFER_ALLOCATION) ||
00745       (kernel_record->tag == TAG_BUFFER_DEALLOCATION) ||
00746       (kernel_record->tag == TAG_FILE_CACHE_ALLOCATION) ||
00747       (kernel_record->tag == TAG_FILE_CACHE_DEALLOCATION)) {
00748 
00749     // Subtract one because the ending address is always the first
00750     // byte *beyond* the given range.
00751     if (kernel_record->tag == TAG_DUPLICATE_RANGE) {
00752       ending_virtual_address--;
00753     } else {
00754       ending_virtual_address = starting_virtual_address + length - 1;
00755     }
00756 
00757     kernel_record->starting_page =
00758       starting_virtual_address >> PAGE_SIZE_ORDER;
00759     kernel_record->ending_page =
00760       ending_virtual_address >> PAGE_SIZE_ORDER;
00761 
00762     // For some records, the file offset and length must be converted
00763     // into starting and ending pages within that address space as
00764     // well.
00765     if ((kernel_record->tag == TAG_MMAP_FILE) ||
00766   (kernel_record->tag == TAG_FILE_CACHE_ALLOCATION) ||
00767   (kernel_record->tag == TAG_FILE_READ) ||
00768   (kernel_record->tag == TAG_FILE_WRITE)) {
00769 
00770       kernel_record->file_starting_page =
00771   file_offset >> PAGE_SIZE_ORDER;
00772       kernel_record->file_ending_page =
00773   (file_offset + length - 1) >> PAGE_SIZE_ORDER;
00774 
00775     }
00776   }
00777 
00778   number_kernel_records++;
00779 
00780 } // read_kernel_record
00781 // ===================================================================
00782 
00783 
00784 
00785 // ===================================================================
00786 canonical_page_s*
00787 Consumer::get_new_canonical_page (const page_type_t type) {
00788 
00789   // Ensure that we haven't exhausted the canonical ID space.  Since the
00790   // upper 1 GB of pages is reserved for the shared kernel space, we
00791   // must stop allocating new canonical pages once we reach that range.
00792   if (next_canonical_page_ID == KERNEL_CANONICAL_PAGE_OFFSET) {
00793     fprintf(stderr,
00794       "get_new_canonical_page(): Canonical page IDs exhausted\n");
00795     exit(1);
00796   }
00797 
00798   // Allocate a new canonical page structure.
00799   canonical_page_s* page = new canonical_page_s;
00800   if (page == 0) {
00801     fprintf(stderr, "get_new_canonical_page(): Allocation failed\n");
00802     exit(1);
00803   }
00804 
00805   // Assign the fields and return a pointer to it.  Assume initially
00806   // that there are no maps pointing to this page.
00807   page->page = next_canonical_page_ID++;
00808   page->reference_count = 0;
00809   page->type = type;
00810   return page;
00811 
00812 } // get_new_canonical_page
00813 // ===================================================================
00814 
00815 
00816 
00817 // ===================================================================
00818 // Take a handle to the canonical page, making it possible to nullify
00819 // the pointer to the page if it is deleted by this method.
00820 
00821 void
00822 Consumer::delete_if_unused_page
00823 (canonical_page_s** const canonical_page_handle) {
00824 
00825   // Is this an anonymous page that is no longer mapped by any
00826   // context?
00827   if (((*canonical_page_handle)->type == TYPE_ANONYMOUS) &&
00828       ((*canonical_page_handle)->reference_count == 0)) {
00829 
00830     // Yes.  Delete this canonical page, as it can never be mapped
00831     // again.
00832     delete (*canonical_page_handle);
00833     (*canonical_page_handle) = 0;
00834 
00835   }
00836 } // Consumer::delete_if_unused_page
00837 // ===================================================================
00838 
00839 
00840 
00841 // ===================================================================
00842 // Take a handle to the process, making it possible to nullify the
00843 // pointer to the process if it is deleted by this method.
00844 
00845 void
00846 Consumer::delete_if_defunct_process (Process** const process_handle) {
00847 
00848   Process* process = *process_handle;
00849 
00850   // Are there any tasks left for this process?
00851   if (process->task_list.empty()) {
00852 
00853     // None.  Unmap any remaining pages in the context.
00854     handle_complete_unmap(process->context_ID);
00855 
00856     // Find and erase the context_ID->process mapping.
00857     context_ID_to_Process_map_t::iterator i =
00858       context_ID_to_Process_map.find(process->context_ID);
00859     if (i != context_ID_to_Process_map.end()) {
00860 
00861       context_ID_to_Process_map.erase(i);
00862 
00863     }
00864 
00865     // Delete the virtual->canonical map and the process object.
00866     delete process->virtual_to_canonical_map;
00867     delete process;
00868     *process_handle = 0;
00869 
00870   } // Process has no remaining tasks
00871 
00872 } // Consumer::delete_if_defunct_process
00873 // ===================================================================
00874 
00875 
00876 
00877 // ===================================================================
00878 void
00879 Consumer::delete_defunct_shm_segments () {
00880 
00881   // Traverse the table of defunct IPC shared memory segments.
00882   defunct_shm_ID_map_t::iterator i = defunct_shm_ID_map.begin();
00883   while (i != defunct_shm_ID_map.end()) {
00884 
00885     // Each entry should be marked as ``true'' to indicate that the
00886     // shm segment is defunct.
00887     assert((*i).second);
00888 
00889     // Grab the segment ID and lookup its virtual->canonical map.
00890     shm_ID_t shm_ID = (*i).first;
00891     shm_ID_to_V2C_map_t::iterator map_iterator =
00892       shm_ID_to_V2C_map.find(shm_ID);
00893     assert(map_iterator != shm_ID_to_V2C_map.end());
00894     virtual_to_canonical_map_t* map = (*map_iterator).second;
00895 
00896     // Traverse the map of canonical pages for this defunct segment.
00897     virtual_to_canonical_map_t::iterator j = (*map).begin();
00898     while (j != (*map).end()) {
00899 
00900       // Sanity check the canonical page and then delete it.
00901       canonical_page_s* canonical_page = (*j).second;
00902       assert(canonical_page->type == TYPE_IPC_SHM_SEGMENT);
00903       assert(canonical_page->reference_count == 0);
00904       delete canonical_page;
00905       canonical_page = 0;
00906 
00907       // Advance to the next page.  Don't bother erasing the map
00908       // entry, as we will later delete the entire map.
00909       j++;
00910 
00911     } // Canonical page traversal
00912 
00913     // All of the canonical pages are gone.  Eliminate the
00914     // virtual->canonical map itself.
00915     delete map;
00916     map = 0;
00917     shm_ID_to_V2C_map.erase(map_iterator);
00918 
00919     // Delete the entry in the defunct segment table and move on to
00920     // the next one.
00921     defunct_shm_ID_map_t::iterator moribund = i;
00922     i++;
00923     defunct_shm_ID_map.erase(moribund);
00924 
00925   } // Defunct shm segment traversal
00926 
00927 } // Consumer::delete_defunct_shm_segments
00928 // ===================================================================
00929 
00930 
00931 
00932 // ===================================================================
00933 void
00934 Consumer::mark_if_defunct_shm_segment
00935 (const canonical_page_s* const canonical_page) {
00936 
00937   // Is this a canonical page from an IPC shared memory segment that
00938   // is unattached to any virtual address space
00939   assert(canonical_page->type == TYPE_IPC_SHM_SEGMENT);
00940   if (canonical_page->reference_count == 0) {
00941 
00942     // It is.  Mark the segment to which this canonical page belongs
00943     // as defunct.
00944     defunct_shm_ID_map[canonical_page->shm_ID] = true;
00945 
00946   }
00947 } // Consumer::mark_if_defunct_shm_segment
00948 // ===================================================================
00949 
00950 
00951 
00952 // ===================================================================
00953 void
00954 Consumer::unlink_task_and_process (Task* const task) {
00955 
00956   // Grab a pointer the task's process.
00957   Process* process = task->process;
00958 
00959   // If the task is not associated with a process, then there's no
00960   // work to do here.
00961   if (process == NULL) {
00962     return;
00963   }
00964 
00965   // Clear the process pointer in the task object, thus performing
00966   // the first 1/2 of the unlinking.
00967   task->process = NULL;
00968 
00969   // Search through the process's task list for this task.
00970   list<Task*>::iterator i = process->task_list.begin();
00971   bool done = false;
00972   while (!done) {
00973 
00974     // Sanity check:  We should never reach the end of the list
00975     // without finding the task.
00976     if (i == process->task_list.end()) {
00977       fprintf(stderr, "Consumer::unlink_task_and_process(): ");
00978       fprintf(stderr, "Task not found in process\n");
00979       exit(1);
00980     }
00981 
00982     // If we've found it, delete it and end the search.  Otherwise,
00983     // keep searching.
00984     if (*i == task) {
00985 
00986       // It is.  Erase it from the list, thus performing the second
00987       // 1/2 of the unlinking.
00988       process->task_list.erase(i);
00989       done = true;
00990 
00991     } else {
00992 
00993       // It is not.  Continue searching.
00994       i++;
00995 
00996     }
00997   } // Loop over task IDs in the process
00998 
00999   // The process may be defunct as a result of the unlinking, and if
01000   // so, it should be deleted.
01001   delete_if_defunct_process(&process);
01002 
01003 } // Consumer::unlink_task_and_process
01004 // ===================================================================
01005 
01006 
01007 
01008 // ===================================================================
01009 void
01010 Consumer::make_kernel_active () {
01011 
01012   // It must be the case that the scheduled task was previously
01013   // active.
01014   if (active_task != scheduled_task) {
01015     fprintf(stderr, "Consumer::make_kernel_active(): ");
01016     fprintf(stderr, "User task was not active\n");
01017     exit(1);
01018   }
01019 
01020   // Mark the beginning of a quanta (of sorts) for the kernel.
01021   kernel_task->
01022     mark_quanta_start(reference_record->cycle_timestamp,
01023           reference_record->instruction_timestamp,
01024           reference_record->reference_timestamp);
01025 
01026   // Make the kernel the active task.
01027   active_task = kernel_task;
01028 
01029 } // Consumer::make_kernel_active
01030 // ===================================================================
01031 
01032 
01033 
01034 // ===================================================================
01035 void
01036 Consumer::make_user_active () {
01037 
01038   // It must be the case that the kernel task was previously active.
01039   if (active_task != kernel_task) {
01040     fprintf(stderr, "Consumer::make_kernel_active(): ");
01041     fprintf(stderr, "Kernel task was not active\n");
01042     exit(1);
01043   }
01044   // Mark the beginning of a quanta (of sorts) for the user.
01045   scheduled_task->
01046     mark_quanta_start(reference_record->cycle_timestamp,
01047           reference_record->instruction_timestamp,
01048           reference_record->reference_timestamp);
01049 
01050   // Make the user the active task.
01051   active_task = scheduled_task;
01052 
01053 } // Consumer::make_user_active
01054 // ===================================================================
01055 
01056 
01057 
01058 // ===================================================================
01059 void
01060 Consumer::handle_schedule () {
01061 
01062   // Find the task object based on the given ID.
01063   Task* newly_scheduled_task =
01064     safe_task_ID_to_task(kernel_record->task_ID);
01065 
01066   // Is the currently scheduled task the active one?
01067   if (active_task == scheduled_task) {
01068 
01069     // Also mark the starting time of the new task's quanta, thus
01070     // allowing it to maintain its virtual time.
01071     newly_scheduled_task->
01072       mark_quanta_start(reference_record->cycle_timestamp,
01073       reference_record->instruction_timestamp,
01074       reference_record->reference_timestamp);
01075 
01076   }
01077 
01078   // Assign the newly scheduled task.
01079   scheduled_task = newly_scheduled_task;
01080 
01081 } // Consumer::handle_schedule
01082 // ===================================================================
01083 
01084 
01085 
01086 // ===================================================================
01087 void
01088 Consumer::handle_fork () {
01089 
01090   // Sanity check: There should be no task already using the given ID.
01091   task_ID_to_Task_map_t::iterator i =
01092     task_ID_to_Task_map.find(kernel_record->task_ID);
01093   if (i != task_ID_to_Task_map.end()) {
01094     fprintf(stderr, "Consumer::handle_fork(): Duplicate task ID\n");
01095     exit(1);
01096   }
01097 
01098   // Create a new task object.
01099   Task* child_task = allocate_new_task();
01100 
01101   // Look up the parent task.
01102   Task* parent_task =
01103     safe_task_ID_to_task(kernel_record->parent_task_ID);
01104 
01105   // Assign the new task's fields.  Note that until the task is
01106   // assigned a context, it won't be associated with a process.
01107   child_task->task_ID = kernel_record->task_ID;
01108   child_task->process = NULL;
01109   child_task->parent_task = parent_task;
01110 
01111   // Create a table entry mapping the task ID to the new task object.
01112   task_ID_to_Task_map[kernel_record->task_ID] = child_task;
01113 
01114 } // Consumer::handle_fork
01115 // ===================================================================
01116 
01117 
01118 
01119 // ===================================================================
01120 void
01121 Consumer::handle_exec () {
01122 
01123   // Grab the task process associated with this task.
01124   Task* task = safe_task_ID_to_task(kernel_record->task_ID);
01125   Process* process = task->process;
01126 
01127   // Assign the new executable image.
01128   process->image_file_ID = kernel_record->file_ID;
01129   strcpy(process->image_pathname, kernel_record->filename);
01130 
01131 } // Consumer::handle_exec
01132 // ===================================================================
01133 
01134 
01135 
01136 // ===================================================================
01137 void
01138 Consumer::handle_exit () {
01139 
01140   // Lookup the task and process objects for the given task ID.
01141   task_ID_to_Task_map_t::iterator task_iterator =
01142     task_ID_to_Task_map.find(kernel_record->task_ID);
01143   if (task_iterator == task_ID_to_Task_map.end()) {
01144     fprintf(stderr, "Consumer::handle_exit(): Failed lookup\n");
01145     exit(1);
01146   }
01147   Task* task = (*task_iterator).second;
01148 
01149   // Separate the task and its process.  If the process becomes
01150   // defunct as a result, it will be deleted.
01151   unlink_task_and_process(task);
01152 
01153   // Clear the task_ID->process mapping.
01154   task_ID_to_Task_map.erase(task_iterator);
01155 
01156   // Delete the task itself.
01157   delete task;
01158   task = 0;
01159 
01160 } // Consumer::handle_exit
01161 // ===================================================================
01162 
01163 
01164 
01165 // ===================================================================
01166 void
01167 Consumer::handle_context_assignment () {
01168 
01169   // Grab the task object for the given task ID.
01170   Task* task = safe_task_ID_to_task(kernel_record->task_ID);
01171 
01172   // Unlink this task from its current process (if any).
01173   unlink_task_and_process(task);
01174 
01175   // Get the mapping from the given context ID to its process.  Is it
01176   // mapped to any process?
01177   context_ID_to_Process_map_t:: iterator process_iterator =
01178     context_ID_to_Process_map.find(kernel_record->context_ID);
01179   Process* process;
01180   if (process_iterator == context_ID_to_Process_map.end()) {
01181 
01182     // The context ID is not yet associated with a process.  Allocate
01183     // a new process object.
01184     process = new Process;
01185     if (process == 0) {
01186       fprintf(stderr, "Consumer::handle_context_assignment(): ");
01187       fprintf(stderr, "Failed allocation of new process\n");
01188       exit(1);
01189     }
01190 
01191     // Assign the process a new ID.
01192     process->process_ID = next_process_ID++;
01193     if (process->process_ID == 0) {
01194       fprintf(stderr, "Consumer::handle_context_assignment(): ");
01195       fprintf(stderr, "Exhausted process ID space\n");
01196       exit(1);
01197     }
01198 
01199     // Create a new context by creating a new virtual->canonical map,
01200     // and assign it to the new process.
01201     process->virtual_to_canonical_map = new virtual_to_canonical_map_t;
01202     if (process->virtual_to_canonical_map == NULL) {
01203       fprintf(stderr, "Consumer::handle_context_assignment(): ");
01204       fprintf(stderr, "Failed allocation of virtual->canonical map\n");
01205       exit(1);
01206     }
01207 
01208     // Assign the given context ID to the process.
01209     process->context_ID = kernel_record->context_ID;
01210 
01211     // Assign a mapping from this context ID to this process.
01212     context_ID_to_Process_map[process->context_ID] = process;
01213 
01214     // Assign the process an executable image by copying it from this
01215     // task's parent's process.
01216     Process* parent_process = task->parent_task->process;
01217     process->image_file_ID = parent_process->image_file_ID;
01218     strcpy(process->image_pathname, parent_process->image_pathname);
01219 
01220   } else {
01221 
01222     // A process already exists using the given context, so this task
01223     // is joining an existing process.  Grab that process.
01224     process = (*process_iterator).second;
01225 
01226     // Sanity check:  This process should explicitly indicate that it
01227     // is using the given context ID.
01228     if (process->context_ID != kernel_record->context_ID) {
01229       fprintf(stderr, "Consumer::handle_context_assignment(): ");
01230       fprintf(stderr, "Process and map disagree on context ID\n");
01231       exit(1);
01232     }
01233 
01234   }
01235 
01236   // Link the task and the process to one another.
01237   task->process = process;
01238   process->task_list.push_front(task);
01239 
01240 } // Consumer::handle_context_assignment
01241 // ===================================================================
01242 
01243 
01244 
01245 // ===================================================================
01246 void
01247 Consumer::handle_duplicate_range () {
01248 
01249   // Get the virtual->canonical map for both the original (parent) and
01250   // duplicate (child) contexts.
01251   Process* original_process =
01252     safe_context_ID_to_process(kernel_record->context_ID);
01253   virtual_to_canonical_map_t* original_map =
01254     original_process->virtual_to_canonical_map;
01255   Process* duplicate_process =
01256     safe_context_ID_to_process(kernel_record->duplicate_context_ID);
01257   virtual_to_canonical_map_t* duplicate_map =
01258     duplicate_process->virtual_to_canonical_map;
01259 
01260   // Traverse every virtual page in the range, making that page in the
01261   // duplicate context point to the same canonical page as that page
01262   // does in the original context.
01263   for (virtual_page_ID_t current_page = kernel_record->starting_page;
01264        current_page <= kernel_record->ending_page;
01265        current_page++) {
01266 
01267     // We should never be trying to duplicate pages in the kernel's
01268     // address range.
01269     if (current_page >= KERNEL_VIRTUAL_PAGE_OFFSET) {
01270       fprintf(stderr, "Consumer::handle_duplicate_range(): ");
01271       fprintf(stderr, "In kernel range\n");
01272       exit(1);
01273     }
01274 
01275     // Attempt to lookup the canonical page underlying the original.
01276     // Note that it is possible for a task to map an anonymous
01277     // region without having referenced it yet, leaving some pages in
01278     // that region without assigned canonical pages.
01279     virtual_to_canonical_map_t::iterator i =
01280       (*original_map).find(current_page);
01281 
01282     // Has a canonical page yet been assigned to this page in the
01283     // original context?
01284     canonical_page_s* canonical_page = 0;
01285     if (i == (*original_map).end()) {
01286 
01287       // No.  Create a new canonical page and map it into the original
01288       // context.
01289       canonical_page = get_new_canonical_page(TYPE_ANONYMOUS);
01290       map_virtual_to_canonical(original_map,
01291            current_page,
01292            canonical_page);
01293       canonical_page->reference_count++;
01294 
01295     } else {
01296 
01297       // Yes.  Use that canonical page.
01298       canonical_page = (*i).second;
01299 
01300     }
01301 
01302     // The current page in the duplicate context cannot be mapped to
01303     // anything -- an error has occurred if it is.
01304     if ((*duplicate_map).find(current_page) !=
01305   (*duplicate_map).end()) {
01306       fprintf(stderr, "Consumer::handle_duplicate_range(): ");
01307       fprintf(stderr, "Duplicate range not unmapped\n");
01308       exit(1);
01309     }
01310 
01311     // Map this canonical page into the duplicate context.
01312     map_virtual_to_canonical(duplicate_map,
01313            current_page,
01314            canonical_page);
01315     canonical_page->reference_count++;
01316 
01317   }
01318 } // Consumer::handle_duplicate_range
01319 // ===================================================================
01320 
01321 
01322 
01323 // ===================================================================
01324 void
01325 Consumer::handle_file_mmap_range () {
01326 
01327   // Get the virtual->canonical maps for the context and the file.
01328   Task* task = safe_task_ID_to_task(kernel_record->task_ID);
01329   virtual_to_canonical_map_t* context_map =
01330     task->process->virtual_to_canonical_map;
01331   virtual_to_canonical_map_t* file_map =
01332     safe_file_ID_to_V2C_map(kernel_record->file_ID);
01333 
01334   // Traverse each page in the range, establishing an entry that maps
01335   // the task virtual page to the file page.
01336   virtual_page_ID_t task_page = kernel_record->starting_page;
01337   virtual_page_ID_t file_page = kernel_record->file_starting_page;
01338   while (task_page <= kernel_record->ending_page) {
01339 
01340     // The task page should never be in the kernel's page range.
01341     if (task_page >= KERNEL_VIRTUAL_PAGE_OFFSET) {
01342       fprintf(stderr, "Consumer::handle_file_mmap_range(): ");
01343       fprintf(stderr, "In kernel range\n");
01344       exit(1);
01345     }
01346 
01347     // Attempt to lookup the canonical page underlying this file page.
01348     virtual_to_canonical_map_t::iterator i =
01349       (*file_map).find(file_page);
01350 
01351     // Has a canonical page yet been assigned to this file page?
01352     canonical_page_s* canonical_page = 0;
01353     if (i == (*file_map).end()) {
01354 
01355       // No.  Create a new canonical page and map it into the file's
01356       // space.
01357       canonical_page = get_new_canonical_page(TYPE_FILE_CACHE);
01358       map_virtual_to_canonical(file_map, file_page, canonical_page);
01359       
01360     } else {
01361 
01362       // Yes.  Use that canonical page.
01363       canonical_page = (*i).second;
01364 
01365       // Is this page not an mmap page?
01366       if (canonical_page->type != TYPE_FILE_CACHE) {
01367 
01368   // This is not a file cache page, so something has gone wrong
01369   // in mapping a canonical page that was of some other type.
01370   fprintf(stderr, "Consumer::handle_file_mmap_range(): ");
01371   fprintf(stderr, "Mapping canonical page of wrong type\n");
01372   exit(1);
01373 
01374       }
01375     }
01376 
01377     // Ensure that the task doesn't already have a mapping for this
01378     // page in its space.  If so, emit the error and exit.
01379     if ((*context_map).find(task_page) != (*context_map).end()) {
01380       fprintf(stderr, "Consumer::handle_file_mmap_range(): ");
01381       fprintf(stderr, "Attempt to map an already mapped page\n");
01382       exit(1);
01383     }
01384 
01385     // Map this canonical page into the task's space.
01386     map_virtual_to_canonical(context_map, task_page, canonical_page);
01387     canonical_page->reference_count++;
01388  
01389     // Move to the next page in each space.
01390     task_page++;
01391     file_page++;
01392 
01393   }
01394 
01395 } // Consumer::handle_file_mmap_range
01396 // ===================================================================
01397 
01398 
01399 
01400 // ===================================================================
01401 void
01402 Consumer::handle_anonymous_mmap_range () {
01403 
01404   // Grab the virtual->canonical map for the process's context.
01405   Task* task = safe_task_ID_to_task(kernel_record->task_ID);
01406   virtual_to_canonical_map_t* map =
01407     task->process->virtual_to_canonical_map;
01408 
01409   // Traverse each page in the range, establishing an entry that maps
01410   // the task virtual page to the canonical ``zero page''.
01411   virtual_page_ID_t page = kernel_record->starting_page;
01412   while (page <= kernel_record->ending_page) {
01413 
01414     // The task page should never be in the kernel's page range.
01415     if (page >= KERNEL_VIRTUAL_PAGE_OFFSET) {
01416       fprintf(stderr, "Consumer::handle_anonymous_mmap_range(): ");
01417       fprintf(stderr, "In kernel range\n");
01418       exit(1);
01419     }
01420 
01421     // Ensure that the task doesn't already have a mapping for this
01422     // page in its space.  If so, emit the error and exit.
01423     if ((*map).find(page) != (*map).end()) {
01424       fprintf(stderr, "Consumer::handle_anonymous_mmap_range(): ");
01425       fprintf(stderr, "Attempt to map an already mapped page\n");
01426       exit(1);
01427     }
01428 
01429     // Map the canonical zero page into the task's space.
01430     map_virtual_to_canonical(map, page, canonical_zero_page);
01431     canonical_zero_page->reference_count++;
01432  
01433     // Move to the next page.
01434     page++;
01435 
01436   }
01437 } // Consumer::handle_anonymous_mmap_range
01438 // ===================================================================
01439 
01440 
01441 
01442 // ===================================================================
01443 void
01444 Consumer::handle_shmat () {
01445 
01446   // Grab the virtual->canonical maps for the context and the shared
01447   // memory segment.
01448   Task* task = safe_task_ID_to_task(kernel_record->task_ID);
01449   virtual_to_canonical_map_t* context_map =
01450     task->process->virtual_to_canonical_map;
01451   virtual_to_canonical_map_t* shm_map =
01452     safe_shm_ID_to_V2C_map(kernel_record->shm_ID);
01453 
01454   // Traverse every page in the range, establishing an entry that maps
01455   // the task virtual page to the shared memory segment page.  Note
01456   // that shared memory segments are assumed to start at an offset of
01457   // zero.
01458   virtual_page_ID_t task_page = kernel_record->starting_page;
01459   virtual_page_ID_t shm_page = 0;
01460   while (task_page <= kernel_record->ending_page) {
01461 
01462     // The task page should never be in the kernel's page range.
01463     if (task_page >= KERNEL_VIRTUAL_PAGE_OFFSET) {
01464       fprintf(stderr, "Consumer::handle_shmat(): In kernel range\n");
01465       exit(1);
01466     }
01467 
01468     // Attempt to lookup the canonical page underlying this shared
01469     // memory page.
01470     virtual_to_canonical_map_t::iterator i = (*shm_map).find(shm_page);
01471 
01472     // Has a canonical page yet been assigned to this shared memory
01473     // page?
01474     canonical_page_s* canonical_page = 0;
01475     if (i == (*shm_map).end()) {
01476 
01477       // No.  Create a new canonical page, associate it with this new
01478       // shm segment, and map it into the shared memory segment's
01479       // space.
01480       canonical_page = get_new_canonical_page(TYPE_IPC_SHM_SEGMENT);
01481       canonical_page->shm_ID = kernel_record->shm_ID;
01482       map_virtual_to_canonical(shm_map, shm_page, canonical_page);
01483       
01484     } else {
01485 
01486       // Yes.  Use that canonical page.
01487       canonical_page = (*i).second;
01488 
01489     }
01490 
01491     // Ensure that the task doesn't already have a mapping for this
01492     // page in its space.  If so, emit the error and exit.
01493     if ((*context_map).find(task_page) != (*context_map).end()) {
01494       fprintf(stderr, "Consumer::handle_shmat(): ");
01495       fprintf(stderr, "Attempt to map an already mapped page\n");
01496       exit(1);
01497     }
01498 
01499     // Map this canonical page into the task's space.
01500     map_virtual_to_canonical(context_map, task_page, canonical_page);
01501     canonical_page->reference_count++;
01502 
01503     // Move to the next pair of pages.
01504     task_page++;
01505     shm_page++;
01506 
01507   }
01508 } // Consumer::handle_shmat
01509 // ===================================================================
01510 
01511 
01512 
01513 // ===================================================================
01514 void
01515 Consumer::handle_munmap_range () {
01516 
01517   // Grab the virtual->canonical map for the context.
01518   Task* task = safe_task_ID_to_task(kernel_record->task_ID);
01519   virtual_to_canonical_map_t* map =
01520     task->process->virtual_to_canonical_map;
01521 
01522   // Traverse every page in the range, eliminating any mappings
01523   // found.
01524   virtual_page_ID_t page = kernel_record->starting_page;
01525   while (page <= kernel_record->ending_page) {
01526 
01527     // The task page should never be in the kernel's page range.
01528     if (page >= KERNEL_VIRTUAL_PAGE_OFFSET) {
01529       fprintf(stderr, "Consumer::handle_munmap_range(): ");
01530       fprintf(stderr, "In kernel range\n");
01531       exit(1);
01532     }
01533 
01534     // Is this page mapped to another?
01535     virtual_to_canonical_map_t::iterator i = (*map).find(page);
01536     if (i != (*map).end()) {
01537 
01538       // It is, so eliminate the mapping.  If the underlying canonical
01539       // pages indicate that this is an IPC shared memory segment,
01540       // determine if that segment is defunct and should be deleted.
01541       canonical_page_s* canonical_page = (*i).second;
01542       canonical_page->reference_count--;
01543       (*map).erase(i);
01544       delete_if_unused_page(&canonical_page);
01545 
01546       // Note that only anonymous pages are deleted by the above call,
01547       // so this use of canonical_page is safe.
01548       if ((canonical_page != NULL) &&
01549     (canonical_page->type == TYPE_IPC_SHM_SEGMENT)) {
01550   mark_if_defunct_shm_segment(canonical_page);
01551       }
01552 
01553     }
01554 
01555     // Move to the next page.
01556     page++;
01557 
01558   }
01559 } // Consumer::handle_munmap_range
01560 // ===================================================================
01561 
01562 
01563 
01564 // ===================================================================
01565 void
01566 Consumer::handle_complete_unmap (const context_ID_t context_ID) {
01567 
01568   // Is this context ID mapped to a process?  It may not be if the
01569   // process with which it was associated became defunct and was
01570   // deleted, taking the context with it.
01571   context_ID_to_Process_map_t::iterator process_iterator =
01572     context_ID_to_Process_map.find(context_ID);
01573   if (process_iterator == context_ID_to_Process_map.end()) {
01574 
01575     // The context has been deleted.  No work to do here.
01576     return;
01577 
01578   }
01579 
01580   // Grab the virtual->canonical map that forms the context for the
01581   // process.
01582   virtual_to_canonical_map_t* map =
01583     (*process_iterator).second->virtual_to_canonical_map;
01584 
01585   // This function takes a brute-force approach: It traverses every
01586   // page in the table, eliminating any mappings it might find.
01587   virtual_to_canonical_map_t::iterator i = (*map).begin();
01588   while (i != (*map).end()) {
01589 
01590     // It should never be the case that we are unmapping pages in the
01591     // kernel's page range.
01592     if ((*i).first >= KERNEL_VIRTUAL_PAGE_OFFSET) {
01593       fprintf(stderr, "Consumer::handle_complete_unmap(): ");
01594       fprintf(stderr, "In kernel range\n");
01595       exit(1);
01596     }
01597 
01598     // Eliminate the mapping.  If the underlying canonical pages
01599     // indicate that this is an IPC shared memory segment, determine
01600     // if that segment is defunct and should be deleted.
01601     canonical_page_s* canonical_page = (*i).second;
01602     virtual_to_canonical_map_t::iterator moribund = i;
01603     i++;
01604     (*map).erase(moribund);
01605     canonical_page->reference_count--;
01606     delete_if_unused_page(&canonical_page);
01607 
01608     // Note that only anonymous pages are deleted by the above call,
01609     // so if this is an IPC shm canonical page, it will still exist.
01610     if ((canonical_page != 0) &&
01611   (canonical_page->type == TYPE_IPC_SHM_SEGMENT)) {
01612       mark_if_defunct_shm_segment(canonical_page);
01613     }
01614 
01615   }
01616 } // Consumer::handle_complete_unmap
01617 // ===================================================================
01618 
01619 
01620 
01621 // ===================================================================
01622 void
01623 Consumer::handle_cow_unmap () {
01624 
01625   virtual_page_ID_t page = kernel_record->starting_page;
01626 
01627   // The page should not be in the kernel's page range.
01628   if (page >= KERNEL_VIRTUAL_PAGE_OFFSET) {
01629     fprintf(stderr, "Consumer::handle_cow_unmap(): In kernel range\n");
01630     exit(1);
01631   }
01632 
01633   // Grab the virtual->canonical map for the context.
01634   Task* task = safe_task_ID_to_task(kernel_record->task_ID);
01635   virtual_to_canonical_map_t* map =
01636     task->process->virtual_to_canonical_map;
01637 
01638   // This page should be mapped.  If it is not, an error has
01639   // occurred.
01640   virtual_to_canonical_map_t::iterator i = (*map).find(page);
01641   if (i == (*map).end()) {
01642     fprintf(stderr, "Consumer::handle_cow_unmap(): ");
01643     fprintf(stderr, "Unmapping a page with no mapping\n");
01644     exit(1);
01645   }
01646 
01647   // Reassign this virtual page to a new canonical page, thus
01648   // representing the copy that is made by the COW mechanism.  Whether
01649   // the original page was anonymous or from a file, the new copy must
01650   // be anonymous.  Also increase its sharing count.
01651   canonical_page_s* originalCanonicalPage = (*i).second;
01652   originalCanonicalPage->reference_count--;
01653   (*map).erase(i);
01654   canonical_page_s* newCanonicalPage =
01655     get_new_canonical_page(originalCanonicalPage->type);
01656   newCanonicalPage->type = TYPE_ANONYMOUS;
01657   newCanonicalPage->reference_count++;
01658   map_virtual_to_canonical(map, page, newCanonicalPage);
01659 
01660   // The original page cannot have been an IPC shared memory page.
01661   if (originalCanonicalPage->type == TYPE_IPC_SHM_SEGMENT) {
01662     fprintf(stderr, "Consumer::handle_cow_unmap(): ");
01663     fprintf(stderr, "COW with an IPC shm page\n");
01664     exit(1);
01665   }
01666 
01667   // When COW-unmapping an anonymous page, it should never be the case
01668   // that the sharing count reaches zero -- the last task to write
01669   // to the COW page should get that original.
01670   if ((originalCanonicalPage->type == TYPE_ANONYMOUS) &&
01671       (originalCanonicalPage->reference_count == 0)) {
01672     fprintf(stderr, "Consumer::handle_cow_unmap(): ");
01673     fprintf(stderr, "Anonymous page orphaned\n");
01674     exit(1);
01675   }
01676 } // Consumer::handle_cow_unmap
01677 // ===================================================================
01678 
01679 
01680 
01681 // ===================================================================
01682 void
01683 Consumer::handle_buffer_cache_allocation () {
01684 
01685   virtual_page_ID_t page = kernel_record->starting_page;
01686 
01687   // The page must be in the kernel's page range.
01688   if (page < KERNEL_VIRTUAL_PAGE_OFFSET) {
01689     fprintf(stderr, "Consumer::handle_buffer_cache_allocation(): ");
01690     fprintf(stderr, "Outside kernel range\n");
01691     exit(1);
01692   }
01693 
01694   // Grab the virtual->canonical map for the kernel's context.
01695   Process* kernel_process =
01696     safe_context_ID_to_process(KERNEL_CONTEXT);
01697   virtual_to_canonical_map_t* map =
01698     kernel_process->virtual_to_canonical_map;
01699 
01700   // This page should not be mapped.
01701   virtual_to_canonical_map_t::iterator i = (*map).find(page);
01702   if (i != (*map).end()) {
01703     fprintf(stderr, "Consumer::handle_buffer_cache_allocation(): ");
01704     fprintf(stderr,
01705       "At record %qd: Page already mapped\n",
01706       number_kernel_records);
01707     exit(1);
01708   }
01709 
01710   // Assign this virtual page to the one canonical buffer cache page.
01711   // Increment the reference count so that we can track the number of
01712   // buffer cache pages at each moment.
01713   canonical_buffer_cache_page.reference_count++;
01714   map_virtual_to_canonical(map, page, &canonical_buffer_cache_page);
01715 
01716 } // Consumer::handle_buffer_cache_allocation
01717 // ===================================================================
01718 
01719 
01720 
01721 // ===================================================================
01722 void
01723 Consumer::handle_buffer_cache_deallocation () {
01724 
01725   virtual_page_ID_t page = kernel_record->starting_page;
01726 
01727   // The page must be in the kernel's page range.
01728   if (page < KERNEL_VIRTUAL_PAGE_OFFSET) {
01729     fprintf(stderr, "Consumer::handle_buffer_cache_deallocation(): ");
01730     fprintf(stderr, "Outside kernel range\n");
01731     exit(1);
01732   }
01733 
01734   // Grab the virtual->canonical map for the kernel's context.
01735   Process* kernel_process =
01736     safe_context_ID_to_process(KERNEL_CONTEXT);
01737   virtual_to_canonical_map_t* map =
01738     kernel_process->virtual_to_canonical_map;
01739 
01740   // This page must be mapped to the canonical buffer cache page.
01741   virtual_to_canonical_map_t::iterator i = (*map).find(page);
01742   if (i == (*map).end()) {
01743     fprintf(stderr, "Consumer::handle_buffer_cache_deallocation(): ");
01744     fprintf(stderr, "Page not mapped\n");
01745     exit(1);
01746   }
01747   canonical_page_s* canonical_page = (*i).second;
01748   if (canonical_page->type != TYPE_BUFFER_CACHE) {
01749     fprintf(stderr, "Consumer::handle_buffer_cache_deallocation(): ");
01750     fprintf(stderr,
01751       "At %qd, deallocating non-buffer cache page\n",
01752       number_kernel_records);
01753     exit(1);
01754   }     
01755 
01756   // Eliminate the mapping.  Decrement the reference count to track
01757   // the number of buffer cache pages.
01758   (*map).erase(i);
01759   canonical_page->reference_count--;
01760 
01761 } // Consumer::handle_buffer_cache_deallocation
01762 // ===================================================================
01763 
01764 
01765 
01766 // ===================================================================
01767 void
01768 Consumer::handle_file_cache_allocation () {
01769 
01770   virtual_page_ID_t kernel_page = kernel_record->starting_page;
01771   virtual_page_ID_t file_page = kernel_record->file_starting_page;
01772 
01773   // The page must be in the kernel's page range.
01774   if (kernel_page < KERNEL_VIRTUAL_PAGE_OFFSET) {
01775     fprintf(stderr, "Consumer::handle_file_cache_allocation(): ");
01776     fprintf(stderr, "Outside kernel range\n");
01777     exit(1);
01778   }
01779 
01780   // Grab the virtual->canonical map for the kernel's context and the
01781   // file.
01782   Process* kernel_process =
01783     safe_context_ID_to_process(KERNEL_CONTEXT);
01784   virtual_to_canonical_map_t* context_map =
01785     kernel_process->virtual_to_canonical_map;
01786   virtual_to_canonical_map_t* file_map =
01787     safe_file_ID_to_V2C_map(kernel_record->file_ID);
01788 
01789   // Attempt to lookup the canonical page underlying this file page.
01790   virtual_to_canonical_map_t::iterator i = (*file_map).find(file_page);
01791 
01792   // Has a canonical page yet been assigned to this file page?
01793   canonical_page_s* canonical_page = 0;
01794   if (i == (*file_map).end()) {
01795 
01796     // No.  Create a new canonical page and map it into the file's
01797     // space.
01798     canonical_page = get_new_canonical_page(TYPE_FILE_CACHE);
01799     map_virtual_to_canonical(file_map, file_page, canonical_page);
01800       
01801   } else {
01802 
01803     // Yes.  Use that canonical page.
01804     canonical_page = (*i).second;
01805 
01806     // Ensure that the canonical page is of the correct type.
01807     if (canonical_page->type != TYPE_FILE_CACHE) {
01808       fprintf(stderr, "Consumer::handle_file_cache_allocation(): ");
01809       fprintf(stderr, "Mapping wrong type of canonical page\n");
01810       exit(1);
01811 
01812     }
01813   }
01814 
01815   // Ensure that the KERNEL doesn't already have a mapping for this
01816   // page in its space.  If so, emit the error and exit.
01817   if ((*context_map).find(kernel_page) != (*context_map).end()) {
01818     fprintf(stderr, "Consumer::handle_file_cache_allocation(): ");
01819     fprintf(stderr, "Attempt to map an already mapped page\n");
01820     exit(1);
01821   }
01822 
01823   // Map this canonical page into the task's space.  Note that we do
01824   // not attribute a reference count to this page, as it may not be
01825   // mapped into any virtual space.  Since it may be used for VFS
01826   // access, canonical file cache pages are never freed.
01827   map_virtual_to_canonical(context_map, kernel_page, canonical_page);
01828 
01829 } // Consumer::handle_file_cache_allocation
01830 // ===================================================================
01831 
01832 
01833 
01834 // ===================================================================
01835 void
01836 Consumer::handle_file_cache_deallocation () {
01837 
01838   virtual_page_ID_t page = kernel_record->starting_page;
01839 
01840   // The address must be in the kernel's range.
01841   if (page < KERNEL_VIRTUAL_PAGE_OFFSET) {
01842     fprintf(stderr, "Consumer::handle_file_cache_deallocation(): ");
01843     fprintf(stderr, "Outside kernel range\n");
01844     exit(1);
01845   }
01846 
01847   // Get the virtual->canonical map for the kernel.
01848   Process* kernel_process =
01849     safe_context_ID_to_process(KERNEL_CONTEXT);
01850   virtual_to_canonical_map_t* map =
01851     kernel_process->virtual_to_canonical_map;
01852 
01853   // The page not actually have been recorded as a file cache page (as
01854   // Linux calls for deallocation on pages that may never have reached
01855   // the point of insertion into the page cache).
01856   virtual_to_canonical_map_t::iterator i = (*map).find(page);
01857   if (i != (*map).end()) {
01858 
01859     canonical_page_s* canonical_page = (*i).second;
01860     if (canonical_page->type != TYPE_FILE_CACHE) {
01861       fprintf(stderr, "Consumer::handle_file_cache_deallocation(): ");
01862       fprintf(stderr, "Deallocating non-file cache page\n");
01863       exit(1);
01864     }
01865 
01866     // Eliminate the mapping.
01867     (*map).erase(i);
01868 
01869   }
01870 } // Consumer::handle_file_cache_deallocation
01871 // ===================================================================
01872 
01873 
01874 
01875 // ===================================================================
01876 void
01877 Consumer::handle_file_delete () {
01878 
01879   // Grab the context for this file.
01880   file_ID_to_V2C_map_t::iterator map_iterator =
01881     file_ID_to_V2C_map.find(kernel_record->file_ID);
01882   assert(map_iterator != file_ID_to_V2C_map.end());
01883   virtual_to_canonical_map_t* map =
01884     safe_file_ID_to_V2C_map(kernel_record->file_ID);
01885 
01886   // Traverse every page in the table, deleting the underlying
01887   // canonical pages.
01888   virtual_to_canonical_map_t::iterator i = (*map).begin();
01889   while (i != (*map).end()) {
01890 
01891     // Sanity check:  The canonical page should have a reference count
01892     // of zero, or else the filesystem was deleting a file that had
01893     // actively open and mapped pages.
01894     canonical_page_s* canonical_page = (*i).second;
01895     assert(canonical_page->reference_count == 0);
01896 
01897     // Delete the page.  There's no need to delete the individual
01898     // mapping, as the whole map will be deleted later.
01899     delete canonical_page;
01900     canonical_page = 0;
01901 
01902     // Move to the next mapped page.
01903     i++;
01904 
01905   } // Table traversal
01906 
01907   // Delete the map and its association with the given file ID.
01908   delete map;
01909   map = 0;
01910   file_ID_to_V2C_map.erase(map_iterator);
01911 
01912 } // handle_file_delete
01913 // ===================================================================
01914 
01915 
01916 
01917 // ===================================================================
01918 void
01919 Consumer::update_with_reference_record () {
01920 
01921   // What type of reference is this?
01922   switch (reference_record->tag) {
01923 
01924   case TAG_KERNEL_READ:
01925   case TAG_KERNEL_WRITE:
01926   case TAG_KERNEL_INSTRUCTION_FETCH:
01927 
01928     // If the kernel is not the active task, then switch to it.
01929     if (active_task != kernel_task) {
01930       make_kernel_active();
01931     }
01932     break;
01933 
01934   case TAG_USER_READ:
01935   case TAG_USER_WRITE:
01936   case TAG_USER_INSTRUCTION_FETCH:
01937 
01938     // If the user-level task is not the active one, make it so.
01939     if (active_task != scheduled_task) {
01940       make_user_active();
01941     }
01942     break;
01943 
01944   default:
01945 
01946     fprintf(stderr, "Consumer::update_with_reference_record(): ");
01947     fprintf(stderr,
01948       "Invalid reference tag %c(%hd)\n",
01949       reference_record->tag,
01950       (unsigned short int)reference_record->tag);
01951     exit(1);
01952 
01953   }
01954 
01955   // Grab local copies of the reference page's information.
01956   const context_ID_t context_ID =
01957     reference_record->virtual_page.context_ID;
01958   const virtual_page_ID_t page_ID =
01959     reference_record->virtual_page.page_ID;
01960 
01961   // An update to the page mappings is required only if this record
01962   // represents a reference not within the kernel's shared space.
01963   if (page_ID < KERNEL_VIRTUAL_PAGE_OFFSET) {
01964 
01965     // Grab the map of the context in which this reference was
01966     // performed.
01967     Process* process = safe_context_ID_to_process(context_ID);
01968     virtual_to_canonical_map_t* map = process->virtual_to_canonical_map;
01969 
01970     // Is this virtual page mapped to some canonical page?
01971     virtual_to_canonical_map_t::iterator i = (*map).find(page_ID);
01972     if (i == (*map).end()) {
01973 
01974       // No.  Create a new canonical page and map this virtual page to
01975       // it.
01976       canonical_page_s* canonical_page =
01977   get_new_canonical_page(TYPE_ANONYMOUS);
01978       map_virtual_to_canonical(map, page_ID, canonical_page);
01979       canonical_page->reference_count++;
01980 
01981     }
01982   }
01983 } // Consumer::update_with_reference_record
01984 // ===================================================================
01985 
01986 
01987 
01988 // ===================================================================
01989 void
01990 Consumer::update_with_kernel_record () {
01991 
01992   // Update the executable image pathname based on the record
01993   // type.
01994   switch (kernel_record->tag) {
01995 
01996   case TAG_SCHEDULE:
01997 
01998     handle_schedule();
01999     break;
02000 
02001   case TAG_FORK:
02002 
02003     handle_fork();
02004     break;
02005 
02006   case TAG_EXEC:
02007 
02008     handle_exec();
02009     break;
02010 
02011   case TAG_EXIT:
02012 
02013     handle_exit();
02014     break;
02015 
02016   case TAG_CONTEXT_ASSIGNMENT:
02017 
02018     handle_context_assignment();
02019     break;
02020 
02021   case TAG_DUPLICATE_RANGE:
02022 
02023     handle_duplicate_range();
02024     break;
02025 
02026   case TAG_MMAP_FILE:
02027 
02028     handle_file_mmap_range();
02029     break;
02030 
02031   case TAG_MMAP_ANONYMOUS:
02032 
02033     handle_anonymous_mmap_range();
02034     break;
02035 
02036   case TAG_MUNMAP:
02037 
02038     handle_munmap_range();
02039     break;
02040 
02041   case TAG_COMPLETE_UNMAP:
02042 
02043     handle_complete_unmap(kernel_record->context_ID);
02044     break;
02045 
02046   case TAG_IPC_SHMAT:
02047 
02048     handle_shmat();
02049     break;
02050 
02051   case TAG_IPC_SHMDT:
02052 
02053     // At the moment, we rely on the munmap() call performed by every
02054     // shmdt() call to unmap the IPC shared memory segment pages and
02055     // to determine whether a particular segment has become defunct
02056     // and should be deleted.
02057     break;
02058 
02059   case TAG_COW_UNMAP:
02060 
02061     handle_cow_unmap();
02062     break;
02063 
02064   case TAG_BUFFER_ALLOCATION:
02065 
02066     handle_buffer_cache_allocation();
02067     break;
02068 
02069   case TAG_BUFFER_DEALLOCATION:
02070 
02071     handle_buffer_cache_deallocation();
02072     break;
02073 
02074   case TAG_FILE_CACHE_ALLOCATION:
02075 
02076     handle_file_cache_allocation();
02077     break;
02078 
02079   case TAG_FILE_CACHE_DEALLOCATION:
02080 
02081     handle_file_cache_deallocation();
02082     break;
02083 
02084   case TAG_FILE_OPEN:
02085   case TAG_FILE_CLOSE:
02086   case TAG_FILE_READ:
02087   case TAG_FILE_WRITE:
02088   case TAG_ACCEPT:
02089 
02090     // At the moment, we do nothing with these.  We should have data
02091     // structures tracking file use per-task so that we can emit
02092     // filesystem traces and schedule blocking I/O calls.  Save that
02093     // work for later.
02094     break;
02095 
02096   case TAG_FILE_DELETE:
02097 
02098     handle_file_delete();
02099     break;
02100 
02101   default:
02102 
02103     fprintf(stderr, "Consumer::update_with_kernel_record(): ");
02104     fprintf(stderr,
02105       "At record %qd: Unknown tag %c(%d)\n",
02106       number_kernel_records,
02107       kernel_record->tag,
02108       (int)kernel_record->tag);
02109     exit(1);
02110 
02111   }
02112 
02113   // If any unmapping occurred, there may be IPC shared memory
02114   // segments that are now defunct and need to be deleted.
02115   delete_defunct_shm_segments();
02116 
02117 } // Consumer::update_with_kernel_record
02118 // ===================================================================
02119 
02120 
02121 
02122 // ===================================================================
02123 void
02124 Consumer::lookup_canonical_page_ID
02125 (bool* return_V2C_mapping_exists,
02126  canonical_page_ID_t* const return_canonical_page_ID,
02127  const virtual_to_canonical_map_t* const map,
02128  const virtual_page_ID_t virtual_page_ID) {
02129 
02130   // Assume initially that if the virtual page is valid, its mapping
02131   // into canonical space will exist.
02132   *return_V2C_mapping_exists = true;
02133 
02134   // Attempt to lookup the page in the context's map.
02135   const virtual_to_canonical_map_t::const_iterator i =
02136     (*map).find(virtual_page_ID);
02137 
02138   // Was the lookup successful?
02139   if (i != (*map).end()) {
02140 
02141     // Yes.  Is the canonical page a buffer cache page?
02142     if ((*i).second->type == TYPE_BUFFER_CACHE) {
02143 
02144       // It is a buffer cache page, and so no mapping into canonical
02145       // space really exists.
02146       *return_V2C_mapping_exists = false;
02147 
02148     } else {
02149 
02150       // It is not a buffer cache page, so return the canonical page's
02151       // ID.
02152       *return_canonical_page_ID = (*i).second->page;
02153 
02154     }
02155 
02156     // The lookup failed.  Is the virtual address in the kernel's
02157     // address space?
02158   } else if (virtual_page_ID >= KERNEL_VIRTUAL_PAGE_OFFSET) {
02159 
02160     // Yes.  The upper 1 GB of the canonical space is reserved to
02161     // correspond to the upper 1 GB of kernel virtual address
02162     // space. [Linux specific] Calculate the canonical page ID.
02163     *return_canonical_page_ID =
02164       KERNEL_CANONICAL_PAGE_OFFSET + (virtual_page_ID -
02165               KERNEL_VIRTUAL_PAGE_OFFSET);
02166 
02167   } else {
02168 
02169     // There was no mapping into canonical space found, and the virtual
02170     // address is not in the kernel space.  Therefore, this is an
02171     // invalid virtual address.  Emit the error and exit.
02172     fprintf(stderr, "Consumer::lookup_canonical_page_ID(): ");
02173     fprintf(stderr, "Failed lookup\n");
02174     exit(1);
02175 
02176   }
02177 } // lookup_canonical_page_ID
02178 // ===================================================================
02179 
02180 
02181 
02182 // ===================================================================
02183 Task*
02184 Consumer::allocate_new_task () {
02185 
02186   // Attempt to allocate a new Task.  Check for success.
02187   Task* task = new Task;
02188   if (task == NULL) {
02189     fprintf(stderr, "Consumer::allocate_new_task(): ");
02190     fprintf(stderr, "Allocation failure\n");
02191     exit(1);
02192   }
02193 
02194   return task;
02195 
02196 } // Consumer::allocate_new_task
02197 // ===================================================================
02198 
02199 
02200 
02201 // ===================================================================
02202 Process*
02203 Consumer::allocate_new_process () {
02204 
02205   // Attempt to allocate a new Task.  Check for success.
02206   Process* process = new Process;
02207   if (process == NULL) {
02208     fprintf(stderr, "Consumer::allocate_new_process(): ");
02209     fprintf(stderr, "Allocation failure\n");
02210     exit(1);
02211   }
02212 
02213   return process;
02214 
02215 } // Consumer::allocate_new_process
02216 // ===================================================================

Generated on Fri Jan 31 10:33:34 2003 for Laplace-merge by doxygen1.3-rc2