00001 #include <stdio.h>
00002 #include "util.h"
00003 #include "vpr_types.h"
00004 #include "globals.h"
00005 #include "path_delay.h"
00006 #include "path_delay2.h"
00007 #include "net_delay.h"
00008 #include "vpr_utils.h"
00009 #include <assert.h>
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076 #define T_CONSTANT_GENERATOR -1000
00077
00078
00079
00080 enum e_subblock_pin_type
00081 { SUB_INPUT = 0, SUB_OUTPUT, SUB_CLOCK, NUM_SUB_PIN_TYPES };
00082
00083
00084
00085
00086
00087
00088 static struct s_linked_vptr *tedge_ch_list_head = NULL;
00089 static int tedge_ch_bytes_avail = 0;
00090 static char *tedge_ch_next_avail = NULL;
00091
00092
00093
00094
00095 static int alloc_and_load_pin_mappings(int ***block_pin_to_tnode_ptr,
00096 int *****snode_block_pin_to_tnode_ptr,
00097 t_subblock_data subblock_data,
00098 int ***num_uses_of_sblk_opin);
00099
00100 static void free_pin_mappings(int **block_pin_to_tnode,
00101 int ****snode_block_pin_to_tnode,
00102 int *num_subblocks_per_block);
00103
00104 static void alloc_and_load_fanout_counts(int ***num_uses_of_fb_ipin_ptr,
00105 int ****num_uses_of_sblk_opin_ptr,
00106 t_subblock_data subblock_data);
00107
00108 static void free_fanout_counts(int **num_uses_of_fb_ipin,
00109 int ***num_uses_of_sblk_opin);
00110
00111 static float **alloc_net_slack(void);
00112
00113 static void compute_net_slacks(float **net_slack);
00114
00115 static void alloc_and_load_tnodes_and_net_mapping(int **num_uses_of_fb_ipin,
00116 int
00117 ***num_uses_of_sblk_opin,
00118 int **block_pin_to_tnode,
00119 int
00120 ****snode_block_pin_to_tnode,
00121 t_subblock_data
00122 subblock_data,
00123 t_timing_inf timing_inf);
00124
00125 static void build_fb_tnodes(int iblk,
00126 int *n_uses_of_fb_ipin,
00127 int **block_pin_to_tnode,
00128 int ***sub_pin_to_tnode,
00129 int num_subs,
00130 t_subblock * sub_inf,
00131 float T_fb_ipin_to_sblk_ipin);
00132
00133 static void build_subblock_tnodes(int **n_uses_of_sblk_opin,
00134 int *node_block_pin_to_tnode,
00135 int ***sub_pin_to_tnode,
00136 int *num_subblocks_per_block,
00137 t_subblock ** subblock_inf,
00138 t_timing_inf timing_inf,
00139 int iblk);
00140
00141
00142 static boolean is_global_clock(int iblk,
00143 int sub,
00144 int subpin,
00145 int *num_subblocks_per_block,
00146 t_subblock ** subblock_inf);
00147
00148 static void build_block_output_tnode(int inode,
00149 int iblk,
00150 int ipin,
00151 int **block_pin_to_tnode);
00152
00153
00154
00155
00156
00157 float **
00158 alloc_and_load_timing_graph(t_timing_inf timing_inf,
00159 t_subblock_data subblock_data)
00160 {
00161
00162
00163
00164
00165
00166
00167
00168
00169 int i;
00170 int **num_uses_of_fb_ipin;
00171 int ***num_uses_of_sblk_opin;
00172
00173
00174
00175
00176
00177
00178 int **block_pin_to_tnode;
00179
00180
00181
00182
00183
00184
00185 int ****snode_block_pin_to_tnode;
00186
00187 int num_sinks;
00188 float **net_slack;
00189
00190
00191
00192 if(tedge_ch_list_head != NULL)
00193 {
00194 printf("Error in alloc_and_load_timing_graph:\n"
00195 "\tAn old timing graph still exists.\n");
00196 exit(1);
00197 }
00198
00199
00200
00201
00202 for(i = 0; i < num_types; i++)
00203 {
00204 if(type_descriptors[i].num_pins > MAX_SHORT)
00205 {
00206 printf
00207 ("Error in alloc_and_load_timing_graph: pins for type %s is %d."
00208 "\tWill cause short overflow in tnode_descript.\n",
00209 type_descriptors[i].name,
00210 type_descriptors[i].num_pins);
00211 exit(1);
00212 }
00213
00214 if(type_descriptors[i].max_subblocks > MAX_SHORT)
00215 {
00216 printf
00217 ("Error in alloc_and_load_timing_graph: max_subblocks_per_block"
00218 "\tis %d -- will cause short overflow in tnode_descript.\n",
00219 type_descriptors[i].max_subblocks);
00220 exit(1);
00221 }
00222 }
00223
00224 alloc_and_load_fanout_counts(&num_uses_of_fb_ipin,
00225 &num_uses_of_sblk_opin, subblock_data);
00226
00227 num_tnodes = alloc_and_load_pin_mappings(&block_pin_to_tnode,
00228 &snode_block_pin_to_tnode,
00229 subblock_data,
00230 num_uses_of_sblk_opin);
00231
00232 alloc_and_load_tnodes_and_net_mapping(num_uses_of_fb_ipin,
00233 num_uses_of_sblk_opin,
00234 block_pin_to_tnode,
00235 snode_block_pin_to_tnode,
00236 subblock_data, timing_inf);
00237
00238 num_sinks = alloc_and_load_timing_graph_levels();
00239
00240 check_timing_graph(subblock_data.num_const_gen, subblock_data.num_ff,
00241 num_sinks);
00242
00243 free_fanout_counts(num_uses_of_fb_ipin, num_uses_of_sblk_opin);
00244 free_pin_mappings(block_pin_to_tnode, snode_block_pin_to_tnode,
00245 subblock_data.num_subblocks_per_block);
00246
00247 net_slack = alloc_net_slack();
00248 return (net_slack);
00249 }
00250
00251
00252 static float **
00253 alloc_net_slack(void)
00254 {
00255
00256
00257
00258 float **net_slack;
00259 float *tmp_ptr;
00260 int inet;
00261
00262 net_slack = (float **)my_malloc(num_nets * sizeof(float *));
00263
00264 for(inet = 0; inet < num_nets; inet++)
00265 {
00266 tmp_ptr =
00267 (float *)my_chunk_malloc(((net[inet].num_sinks + 1) - 1) *
00268 sizeof(float), &tedge_ch_list_head,
00269 &tedge_ch_bytes_avail,
00270 &tedge_ch_next_avail);
00271 net_slack[inet] = tmp_ptr - 1;
00272 }
00273
00274 return (net_slack);
00275 }
00276
00277
00278 static int
00279 alloc_and_load_pin_mappings(int ***block_pin_to_tnode_ptr,
00280 int *****snode_block_pin_to_tnode_ptr,
00281 t_subblock_data subblock_data,
00282 int ***num_uses_of_sblk_opin)
00283 {
00284
00285
00286
00287
00288 int iblk, isub, ipin, num_subblocks, opin, clk_pin;
00289 int curr_tnode;
00290 int ****snode_block_pin_to_tnode, **block_pin_to_tnode;
00291 int *num_subblocks_per_block;
00292 t_type_ptr type;
00293 t_subblock **subblock_inf;
00294 boolean has_inputs;
00295
00296 num_subblocks_per_block = subblock_data.num_subblocks_per_block;
00297 subblock_inf = subblock_data.subblock_inf;
00298
00299
00300 block_pin_to_tnode = (int **)my_malloc(num_blocks * sizeof(int *));
00301
00302 snode_block_pin_to_tnode =
00303 (int ****)my_malloc(num_blocks * sizeof(int ***));
00304
00305 curr_tnode = 0;
00306
00307 for(iblk = 0; iblk < num_blocks; iblk++)
00308 {
00309 type = block[iblk].type;
00310 block_pin_to_tnode[iblk] =
00311 (int *)my_malloc(type->num_pins * sizeof(int));
00312
00313 for(ipin = 0; ipin < block[iblk].type->num_pins; ipin++)
00314 {
00315 if(block[iblk].nets[ipin] == OPEN)
00316 {
00317 block_pin_to_tnode[iblk][ipin] = OPEN;
00318 }
00319 else
00320 {
00321 block_pin_to_tnode[iblk][ipin] = curr_tnode;
00322 curr_tnode++;
00323 }
00324 }
00325
00326
00327
00328 num_subblocks = num_subblocks_per_block[iblk];
00329 snode_block_pin_to_tnode[iblk] = (int ***)alloc_matrix(0, num_subblocks - 1, 0, NUM_SUB_PIN_TYPES - 1, sizeof(int *));
00330
00331 for(isub = 0; isub < num_subblocks; isub++)
00332 {
00333
00334 snode_block_pin_to_tnode[iblk][isub][SUB_INPUT] =
00335 (int *)my_malloc(type->max_subblock_inputs *
00336 sizeof(int));
00337 snode_block_pin_to_tnode[iblk][isub][SUB_OUTPUT] =
00338 (int *)my_malloc(type->max_subblock_outputs *
00339 sizeof(int));
00340 snode_block_pin_to_tnode[iblk][isub][SUB_CLOCK] =
00341 (int *)my_malloc(sizeof(int));
00342
00343
00344
00345 has_inputs = FALSE;
00346 for(ipin = 0; ipin < type->max_subblock_inputs; ipin++)
00347 {
00348 if(subblock_inf[iblk][isub].inputs[ipin] != OPEN)
00349 {
00350 has_inputs = TRUE;
00351 snode_block_pin_to_tnode[iblk][isub]
00352 [SUB_INPUT][ipin] = curr_tnode;
00353 curr_tnode++;
00354 if(type == IO_TYPE)
00355 curr_tnode++;
00356 }
00357 else
00358 {
00359 snode_block_pin_to_tnode[iblk][isub]
00360 [SUB_INPUT][ipin] = OPEN;
00361 }
00362 }
00363
00364
00365
00366
00367
00368 for(opin = 0; opin < type->max_subblock_outputs; opin++)
00369 {
00370
00371 if(num_uses_of_sblk_opin[iblk][isub][opin] != 0)
00372 {
00373 snode_block_pin_to_tnode[iblk][isub]
00374 [SUB_OUTPUT][opin] = curr_tnode;
00375
00376 if(type == IO_TYPE)
00377 curr_tnode += 2;
00378 else if(has_inputs)
00379 curr_tnode++;
00380 else
00381 curr_tnode += 2;
00382 }
00383 else
00384 {
00385 snode_block_pin_to_tnode[iblk][isub]
00386 [SUB_OUTPUT][opin] = OPEN;
00387 }
00388 }
00389
00390 clk_pin = 0;
00391
00392 if(subblock_inf[iblk][isub].clock != OPEN)
00393 {
00394
00395
00396
00397
00398 snode_block_pin_to_tnode[iblk][isub][SUB_CLOCK]
00399 [clk_pin] = curr_tnode;
00400
00401 for(opin = 0; opin < type->max_subblock_outputs;
00402 opin++)
00403 {
00404 if(subblock_inf[iblk][isub].
00405 outputs[opin] != OPEN)
00406 curr_tnode += 2;
00407 }
00408 }
00409 else
00410 {
00411 snode_block_pin_to_tnode[iblk][isub][SUB_CLOCK]
00412 [clk_pin] = OPEN;
00413 }
00414 }
00415
00416 }
00417
00418 *snode_block_pin_to_tnode_ptr = snode_block_pin_to_tnode;
00419 *block_pin_to_tnode_ptr = block_pin_to_tnode;
00420 return (curr_tnode);
00421 }
00422
00423
00424 static void
00425 free_pin_mappings(int **block_pin_to_tnode,
00426 int ****snode_block_pin_to_tnode,
00427 int *num_subblocks_per_block)
00428 {
00429
00430
00431
00432 int isub, iblk, isubtype, num_subblocks;
00433
00434 for(iblk = 0; iblk < num_blocks; iblk++)
00435 {
00436 num_subblocks = num_subblocks_per_block[iblk];
00437 for(isub = 0; isub < num_subblocks; isub++)
00438 {
00439 for(isubtype = 0; isubtype < NUM_SUB_PIN_TYPES;
00440 isubtype++)
00441 {
00442 free(snode_block_pin_to_tnode[iblk][isub]
00443 [isubtype]);
00444 }
00445 }
00446 free_matrix(snode_block_pin_to_tnode[iblk], 0,
00447 num_subblocks_per_block[iblk] - 1, 0, sizeof(int *));
00448 free(block_pin_to_tnode[iblk]);
00449 }
00450 free(block_pin_to_tnode);
00451 free(snode_block_pin_to_tnode);
00452 }
00453
00454
00455 static void
00456 alloc_and_load_fanout_counts(int ***num_uses_of_fb_ipin_ptr,
00457 int ****num_uses_of_sblk_opin_ptr,
00458 t_subblock_data subblock_data)
00459 {
00460
00461
00462
00463
00464 int iblk;
00465 int **num_uses_of_fb_ipin, ***num_uses_of_sblk_opin;
00466 int *num_subblocks_per_block;
00467 t_subblock **subblock_inf;
00468
00469 num_subblocks_per_block = subblock_data.num_subblocks_per_block;
00470 subblock_inf = subblock_data.subblock_inf;
00471
00472 num_uses_of_fb_ipin = (int **)my_malloc(num_blocks * sizeof(int *));
00473
00474 num_uses_of_sblk_opin = (int ***)my_malloc(num_blocks * sizeof(int **));
00475
00476 for(iblk = 0; iblk < num_blocks; iblk++)
00477 {
00478 num_uses_of_fb_ipin[iblk] =
00479 (int *)my_calloc(block[iblk].type->num_pins, sizeof(int));
00480 num_uses_of_sblk_opin[iblk] =
00481 (int **)alloc_matrix(0, block[iblk].type->max_subblocks - 1,
00482 0,
00483 block[iblk].type->max_subblock_outputs -
00484 1, sizeof(int));
00485
00486 load_one_fb_fanout_count(subblock_inf[iblk],
00487 num_subblocks_per_block[iblk],
00488 num_uses_of_fb_ipin[iblk],
00489 num_uses_of_sblk_opin[iblk], iblk);
00490
00491 }
00492
00493 *num_uses_of_fb_ipin_ptr = num_uses_of_fb_ipin;
00494 *num_uses_of_sblk_opin_ptr = num_uses_of_sblk_opin;
00495 }
00496
00497
00498 static void
00499 free_fanout_counts(int **num_uses_of_fb_ipin,
00500 int ***num_uses_of_sblk_opin)
00501 {
00502
00503
00504
00505 int iblk;
00506 t_type_ptr type;
00507
00508 for(iblk = 0; iblk < num_blocks; iblk++)
00509 {
00510 type = block[iblk].type;
00511 free(num_uses_of_fb_ipin[iblk]);
00512 free_matrix(num_uses_of_sblk_opin[iblk], 0,
00513 type->max_subblocks - 1, 0, sizeof(int));
00514 }
00515
00516 free(num_uses_of_fb_ipin);
00517 free(num_uses_of_sblk_opin);
00518 }
00519
00520
00521 static void
00522 alloc_and_load_tnodes_and_net_mapping(int **num_uses_of_fb_ipin,
00523 int ***num_uses_of_sblk_opin,
00524 int **block_pin_to_tnode,
00525 int ****snode_block_pin_to_tnode,
00526 t_subblock_data subblock_data,
00527 t_timing_inf timing_inf)
00528 {
00529
00530 int iblk;
00531 int *num_subblocks_per_block;
00532 t_subblock **subblock_inf;
00533
00534
00535 tnode = (t_tnode *) my_malloc(num_tnodes * sizeof(t_tnode));
00536 tnode_descript = (t_tnode_descript *) my_malloc(num_tnodes *
00537 sizeof(t_tnode_descript));
00538
00539 net_to_driver_tnode = (int *)my_malloc(num_nets * sizeof(int));
00540
00541 subblock_inf = subblock_data.subblock_inf;
00542 num_subblocks_per_block = subblock_data.num_subblocks_per_block;
00543
00544
00545 for(iblk = 0; iblk < num_blocks; iblk++)
00546 {
00547 build_fb_tnodes(iblk, num_uses_of_fb_ipin[iblk],
00548 block_pin_to_tnode,
00549 snode_block_pin_to_tnode[iblk],
00550 num_subblocks_per_block[iblk], subblock_inf[iblk],
00551 block[iblk].type->type_timing_inf.
00552 T_fb_ipin_to_sblk_ipin);
00553
00554 build_subblock_tnodes(num_uses_of_sblk_opin[iblk],
00555 block_pin_to_tnode[iblk],
00556 snode_block_pin_to_tnode[iblk],
00557 num_subblocks_per_block, subblock_inf,
00558 timing_inf, iblk);
00559 }
00560 }
00561
00562
00563 static void
00564 build_fb_tnodes(int iblk,
00565 int *n_uses_of_fb_ipin,
00566 int **block_pin_to_tnode,
00567 int ***sub_pin_to_tnode,
00568 int num_subs,
00569 t_subblock * sub_inf,
00570 float T_fb_ipin_to_sblk_ipin)
00571 {
00572
00573
00574
00575
00576
00577
00578
00579 int isub, ipin, iedge, from_pin, opin;
00580 int inode, to_node, num_edges;
00581 t_tedge *tedge;
00582 int clk_pin;
00583 t_type_ptr type;
00584 int *next_ipin_edge;
00585
00586 type = block[iblk].type;
00587 next_ipin_edge = (int *)my_malloc(type->num_pins * sizeof(int));
00588 clk_pin = 0;
00589
00590
00591
00592 for(ipin = 0; ipin < block[iblk].type->num_pins; ipin++)
00593 {
00594 inode = block_pin_to_tnode[iblk][ipin];
00595
00596 if(inode != OPEN)
00597 {
00598 if(is_opin(ipin, block[iblk].type))
00599 {
00600 build_block_output_tnode(inode, iblk, ipin,
00601 block_pin_to_tnode);
00602 tnode_descript[inode].type = FB_OPIN;
00603 }
00604 else
00605 {
00606 next_ipin_edge[ipin] = 0;
00607 num_edges = n_uses_of_fb_ipin[ipin];
00608
00609
00610 for(isub = 0; isub < num_subs; isub++)
00611 {
00612 if(sub_inf[isub].clock == ipin)
00613 {
00614 for(opin = 0;
00615 opin <
00616 type->max_subblock_outputs;
00617 opin++)
00618 {
00619 if(sub_inf[isub].
00620 outputs[opin] != OPEN)
00621 {
00622 num_edges++;
00623 }
00624 }
00625 num_edges--;
00626 }
00627 }
00628
00629 tnode[inode].num_edges = num_edges;
00630
00631 tnode[inode].out_edges =
00632 (t_tedge *) my_chunk_malloc(num_edges *
00633 sizeof(t_tedge),
00634 &tedge_ch_list_head,
00635 &tedge_ch_bytes_avail,
00636 &tedge_ch_next_avail);
00637
00638 tnode_descript[inode].type = FB_IPIN;
00639 }
00640
00641 tnode_descript[inode].ipin = ipin;
00642 tnode_descript[inode].isubblk = OPEN;
00643 tnode_descript[inode].iblk = iblk;
00644 }
00645 }
00646
00647
00648
00649
00650 for(isub = 0; isub < num_subs; isub++)
00651 {
00652 for(ipin = 0; ipin < type->max_subblock_inputs; ipin++)
00653 {
00654 from_pin = sub_inf[isub].inputs[ipin];
00655
00656
00657
00658 if(from_pin != OPEN
00659 && from_pin < block[iblk].type->num_pins)
00660 {
00661 inode = block_pin_to_tnode[iblk][from_pin];
00662 assert(inode != OPEN);
00663 to_node = sub_pin_to_tnode[isub][SUB_INPUT][ipin];
00664 tedge = tnode[inode].out_edges;
00665 iedge = next_ipin_edge[from_pin]++;
00666 tedge[iedge].to_node = to_node;
00667 tedge[iedge].Tdel = T_fb_ipin_to_sblk_ipin;
00668 }
00669 }
00670
00671 from_pin = sub_inf[isub].clock;
00672
00673 if(from_pin != OPEN && from_pin < block[iblk].type->num_pins)
00674 {
00675 inode = block_pin_to_tnode[iblk][from_pin];
00676 to_node = sub_pin_to_tnode[isub][SUB_CLOCK][clk_pin];
00677
00678 for(opin = 0; opin < type->max_subblock_outputs; opin++)
00679 {
00680 if(sub_inf[isub].outputs[opin] != OPEN)
00681 {
00682 tedge = tnode[inode].out_edges;
00683 iedge = next_ipin_edge[from_pin]++;
00684 tedge[iedge].to_node = to_node;
00685
00686
00687
00688
00689
00690
00691 tedge[iedge].Tdel = 0.;
00692 to_node += 2;
00693 }
00694 }
00695 }
00696 }
00697 free(next_ipin_edge);
00698 }
00699
00700
00701 static void
00702 build_block_output_tnode(int inode,
00703 int iblk,
00704 int ipin,
00705 int **block_pin_to_tnode)
00706 {
00707
00708
00709
00710
00711 int iedge, to_blk, to_pin, to_node, num_edges, inet;
00712 t_tedge *tedge;
00713
00714 inet = block[iblk].nets[ipin];
00715 assert(inet != OPEN);
00716
00717 net_to_driver_tnode[inet] = inode;
00718
00719 num_edges = (net[inet].num_sinks + 1) - 1;
00720 tnode[inode].num_edges = num_edges;
00721
00722 tnode[inode].out_edges = (t_tedge *) my_chunk_malloc(num_edges *
00723 sizeof(t_tedge),
00724 &tedge_ch_list_head,
00725 &tedge_ch_bytes_avail,
00726 &tedge_ch_next_avail);
00727
00728 tedge = tnode[inode].out_edges;
00729
00730 for(iedge = 0; iedge < (net[inet].num_sinks + 1) - 1; iedge++)
00731 {
00732 to_blk = net[inet].node_block[iedge + 1];
00733
00734 to_pin = net[inet].node_block_pin[iedge + 1];
00735
00736 to_node = block_pin_to_tnode[to_blk][to_pin];
00737 tedge[iedge].to_node = to_node;
00738
00739 }
00740 }
00741
00742
00743 static void
00744 build_subblock_tnodes(int **n_uses_of_sblk_opin,
00745 int *node_block_pin_to_tnode,
00746 int ***sub_pin_to_tnode,
00747 int *num_subblocks_per_block,
00748 t_subblock ** subblock_inf,
00749 t_timing_inf timing_inf,
00750 int iblk)
00751 {
00752
00753
00754
00755
00756
00757 int isub, ipin, inode, to_node, from_pin, to_pin, opin, from_opin,
00758 clk_pin, used_opin_count;
00759 int num_subs, from_sub;
00760 t_subblock *sub_inf;
00761 int iedge, num_edges;
00762 float sink_delay;
00763 t_tedge *tedge;
00764 boolean has_inputs, has_outputs;
00765 int **next_sblk_opin_edge;
00766 int *num_opin_used_in_sblk;
00767 t_type_ptr type = block[iblk].type;
00768
00769 sub_inf = subblock_inf[iblk];
00770 num_subs = num_subblocks_per_block[iblk];
00771
00772 next_sblk_opin_edge =
00773 (int **)alloc_matrix(0, type->max_subblocks - 1, 0,
00774 type->max_subblock_outputs - 1, sizeof(int));
00775 num_opin_used_in_sblk =
00776 (int *)my_malloc(type->max_subblocks * sizeof(int));
00777
00778 clk_pin = 0;
00779
00780
00781 for(isub = 0; isub < num_subs; isub++)
00782 {
00783 num_opin_used_in_sblk[isub] = 0;
00784 for(opin = 0; opin < type->max_subblock_outputs; opin++)
00785 {
00786 inode = sub_pin_to_tnode[isub][SUB_OUTPUT][opin];
00787
00788 if(inode != OPEN)
00789 {
00790 num_opin_used_in_sblk[isub]++;
00791 next_sblk_opin_edge[isub][opin] = 0;
00792 num_edges = n_uses_of_sblk_opin[isub][opin];
00793 tnode[inode].num_edges = num_edges;
00794
00795 tnode[inode].out_edges =
00796 (t_tedge *) my_chunk_malloc(num_edges *
00797 sizeof(t_tedge),
00798 &tedge_ch_list_head,
00799 &tedge_ch_bytes_avail,
00800 &tedge_ch_next_avail);
00801
00802 if(IO_TYPE == type)
00803 {
00804 tnode_descript[inode].type = INPAD_OPIN;
00805 tnode[inode + 1].num_edges = 1;
00806 tnode[inode + 1].out_edges = (t_tedge *)
00807 my_chunk_malloc(sizeof(t_tedge),
00808 &tedge_ch_list_head,
00809 &tedge_ch_bytes_avail,
00810 &tedge_ch_next_avail);
00811 tedge = tnode[inode + 1].out_edges;
00812 tedge[0].to_node = inode;
00813
00814
00815 if(is_global_clock
00816 (iblk, isub, opin,
00817 num_subblocks_per_block,
00818 subblock_inf))
00819 tedge[0].Tdel = 0.;
00820 else
00821 tedge[0].Tdel =
00822 type->type_timing_inf.
00823 T_subblock[isub].T_seq_out[opin];
00824
00825 tnode_descript[inode + 1].type =
00826 INPAD_SOURCE;
00827 tnode_descript[inode + 1].ipin = OPEN;
00828 tnode_descript[inode + 1].isubblk = isub;
00829 tnode_descript[inode + 1].iblk = iblk;
00830 }
00831 else
00832 {
00833 tnode_descript[inode].type = SUBBLK_OPIN;
00834 }
00835
00836 tnode_descript[inode].ipin = opin;
00837 tnode_descript[inode].isubblk = isub;
00838 tnode_descript[inode].iblk = iblk;
00839 }
00840 }
00841 }
00842
00843
00844
00845
00846
00847 for(isub = 0; isub < num_subs; isub++)
00848 {
00849 has_outputs = FALSE;
00850
00851 for(opin = 0; opin < type->max_subblock_outputs; opin++)
00852 {
00853 if(sub_pin_to_tnode[isub][SUB_OUTPUT][opin] != OPEN)
00854 {
00855 has_outputs = TRUE;
00856 }
00857 }
00858
00859 if(!has_outputs && type != IO_TYPE)
00860 {
00861 continue;
00862 }
00863
00864 if(sub_inf[isub].clock != OPEN)
00865 {
00866 inode = sub_pin_to_tnode[isub][SUB_CLOCK][clk_pin];
00867
00868 for(opin = 0; opin < type->max_subblock_outputs; opin++)
00869 {
00870 if(sub_inf[isub].outputs[opin] != OPEN)
00871 {
00872
00873 tnode[inode].num_edges = 1;
00874 tnode[inode].out_edges = (t_tedge *)
00875 my_chunk_malloc(sizeof(t_tedge),
00876 &tedge_ch_list_head,
00877 &tedge_ch_bytes_avail,
00878 &tedge_ch_next_avail);
00879
00880 tnode_descript[inode].type = FF_SOURCE;
00881 tnode_descript[inode].ipin = OPEN;
00882 tnode_descript[inode].isubblk = isub;
00883 tnode_descript[inode].iblk = iblk;
00884
00885
00886
00887 inode++;
00888 tnode[inode].num_edges = 0;
00889 tnode[inode].out_edges = NULL;
00890
00891 tnode_descript[inode].type = FF_SINK;
00892 tnode_descript[inode].ipin = OPEN;
00893 tnode_descript[inode].isubblk = isub;
00894 tnode_descript[inode].iblk = iblk;
00895 inode++;
00896 }
00897 }
00898 }
00899
00900
00901
00902 for(ipin = 0; ipin < type->max_subblock_inputs; ipin++)
00903 {
00904 inode = sub_pin_to_tnode[isub][SUB_INPUT][ipin];
00905
00906 if(inode != OPEN)
00907 {
00908 if(type == IO_TYPE)
00909 {
00910 tnode[inode].num_edges = 1;
00911 opin = 0;
00912 tnode[inode].out_edges = (t_tedge *)
00913 my_chunk_malloc(sizeof(t_tedge),
00914 &tedge_ch_list_head,
00915 &tedge_ch_bytes_avail,
00916 &tedge_ch_next_avail);
00917 tnode_descript[inode].type = OUTPAD_IPIN;
00918 tnode[inode + 1].num_edges = 0;
00919 tnode[inode + 1].out_edges = NULL;
00920 tedge = tnode[inode].out_edges;
00921 tedge[0].to_node = inode + 1;
00922
00923
00924
00925 tedge[0].Tdel =
00926 type->type_timing_inf.
00927 T_subblock[isub].T_comb[ipin][opin] +
00928 type->type_timing_inf.
00929 T_subblock[isub].T_seq_in[opin];
00930
00931 tnode_descript[inode + 1].type =
00932 OUTPAD_SINK;
00933 tnode_descript[inode + 1].ipin = OPEN;
00934 tnode_descript[inode + 1].isubblk = isub;
00935 tnode_descript[inode + 1].iblk = iblk;
00936 }
00937 else
00938 {
00939 tnode[inode].num_edges =
00940 num_opin_used_in_sblk[isub];
00941 tnode[inode].out_edges =
00942 (t_tedge *)
00943 my_chunk_malloc(num_opin_used_in_sblk
00944 [isub] *
00945 sizeof(t_tedge),
00946 &tedge_ch_list_head,
00947 &tedge_ch_bytes_avail,
00948 &tedge_ch_next_avail);
00949 tnode[inode].num_edges =
00950 num_opin_used_in_sblk[isub];
00951 tnode_descript[inode].type = SUBBLK_IPIN;
00952 }
00953
00954 tnode_descript[inode].ipin = ipin;
00955 tnode_descript[inode].isubblk = isub;
00956 tnode_descript[inode].iblk = iblk;
00957 }
00958 }
00959 }
00960
00961
00962
00963 for(isub = 0; isub < num_subs; isub++)
00964 {
00965 used_opin_count = 0;
00966 for(opin = 0; opin < type->max_subblock_outputs; opin++)
00967 {
00968 if(sub_pin_to_tnode[isub][SUB_OUTPUT][opin] == OPEN)
00969 {
00970 continue;
00971 }
00972 for(ipin = 0; ipin < type->max_subblock_inputs; ipin++)
00973 {
00974
00975 from_pin = sub_inf[isub].inputs[ipin];
00976
00977
00978
00979 if(from_pin >= type->num_pins)
00980 {
00981
00982
00983
00984 from_sub =
00985 (from_pin -
00986 type->num_pins) /
00987 type->max_subblock_outputs;
00988 from_opin =
00989 (from_pin -
00990 type->num_pins) %
00991 type->max_subblock_outputs;
00992 inode =
00993 sub_pin_to_tnode[from_sub][SUB_OUTPUT]
00994 [from_opin];
00995 to_node =
00996 sub_pin_to_tnode[isub][SUB_INPUT]
00997 [ipin];
00998 tedge = tnode[inode].out_edges;
00999 iedge = next_sblk_opin_edge[from_sub]
01000 [from_opin]++;
01001 tedge[iedge].to_node = to_node;
01002 tedge[iedge].Tdel =
01003 type->type_timing_inf.
01004 T_sblk_opin_to_sblk_ipin;
01005 }
01006 }
01007 from_pin = sub_inf[isub].clock;
01008
01009
01010
01011 if(from_pin >= type->num_pins)
01012 {
01013 from_sub =
01014 (from_pin -
01015 type->num_pins) / type->max_subblock_outputs;
01016 from_opin =
01017 (from_pin -
01018 type->num_pins) % type->max_subblock_outputs;
01019 inode = sub_pin_to_tnode[from_sub][SUB_OUTPUT]
01020 [from_opin];
01021
01022 to_node =
01023 sub_pin_to_tnode[isub][SUB_CLOCK][clk_pin] +
01024 2 * used_opin_count;
01025 tedge = tnode[inode].out_edges;
01026 iedge =
01027 next_sblk_opin_edge[from_sub][from_opin]++;
01028 tedge[iedge].to_node = to_node;
01029
01030
01031 tedge[iedge].Tdel =
01032 type->type_timing_inf.
01033 T_sblk_opin_to_sblk_ipin;
01034 }
01035
01036 to_pin = sub_inf[isub].outputs[opin];
01037 if(to_pin != OPEN)
01038 {
01039
01040
01041
01042
01043 if(block[iblk].nets[to_pin] != OPEN)
01044 {
01045 to_node = node_block_pin_to_tnode[to_pin];
01046 inode = sub_pin_to_tnode[isub][SUB_OUTPUT]
01047 [opin];
01048 tedge = tnode[inode].out_edges;
01049 iedge = next_sblk_opin_edge[isub][opin]++;
01050 tedge[iedge].to_node = to_node;
01051 tedge[iedge].Tdel =
01052 type->type_timing_inf.
01053 T_sblk_opin_to_fb_opin;
01054 }
01055 }
01056 used_opin_count++;
01057 }
01058 }
01059
01060
01061
01062 for(isub = 0; isub < num_subs; isub++)
01063 {
01064 used_opin_count = 0;
01065 for(opin = 0; opin < type->max_subblock_outputs; opin++)
01066 {
01067
01068 if(sub_pin_to_tnode[isub][SUB_OUTPUT][opin] == OPEN)
01069 continue;
01070
01071 if(sub_inf[isub].clock == OPEN)
01072 {
01073 to_node =
01074 sub_pin_to_tnode[isub][SUB_OUTPUT][opin];
01075 sink_delay = 0;
01076 }
01077 else
01078 {
01079 inode =
01080 sub_pin_to_tnode[isub][SUB_CLOCK][clk_pin] +
01081 2 * used_opin_count;
01082
01083
01084 tedge = tnode[inode].out_edges;
01085 tedge[0].to_node =
01086 sub_pin_to_tnode[isub][SUB_OUTPUT][opin];
01087 tedge[0].Tdel =
01088 type->type_timing_inf.T_subblock[isub].
01089 T_seq_out[opin];
01090
01091
01092
01093 inode++;
01094 to_node = inode;
01095 sink_delay =
01096 type->type_timing_inf.T_subblock[isub].
01097 T_seq_in[opin];
01098 }
01099
01100
01101
01102 has_inputs = FALSE;
01103 for(ipin = 0; ipin < type->max_subblock_inputs; ipin++)
01104 {
01105 inode = sub_pin_to_tnode[isub][SUB_INPUT][ipin];
01106
01107 if(inode != OPEN)
01108 {
01109 has_inputs = TRUE;
01110 tedge = tnode[inode].out_edges;
01111 tedge[used_opin_count].to_node = to_node;
01112 tedge[used_opin_count].Tdel =
01113 sink_delay +
01114 type->type_timing_inf.
01115 T_subblock[isub].T_comb[ipin][opin];
01116 }
01117 }
01118
01119 if(!has_inputs && type != IO_TYPE)
01120 {
01121
01122 inode =
01123 sub_pin_to_tnode[isub][SUB_OUTPUT][opin] + 1;
01124 tnode[inode].num_edges = 1;
01125 tnode[inode].out_edges =
01126 (t_tedge *) my_chunk_malloc(sizeof(t_tedge),
01127 &tedge_ch_list_head,
01128 &tedge_ch_bytes_avail,
01129 &tedge_ch_next_avail);
01130 tedge = tnode[inode].out_edges;
01131 tedge[used_opin_count].to_node = to_node;
01132
01133
01134
01135 tedge[used_opin_count].Tdel =
01136 T_CONSTANT_GENERATOR;
01137
01138 tnode_descript[inode].type = CONSTANT_GEN_SOURCE;
01139 tnode_descript[inode].ipin = OPEN;
01140 tnode_descript[inode].isubblk = isub;
01141 tnode_descript[inode].iblk = iblk;
01142 }
01143 used_opin_count++;
01144 }
01145 }
01146 free_matrix(next_sblk_opin_edge, 0, type->max_subblocks - 1, 0,
01147 sizeof(int));
01148 free(num_opin_used_in_sblk);
01149 }
01150
01151 static boolean
01152 is_global_clock(int iblk,
01153 int sub,
01154 int subpin,
01155 int *num_subblocks_per_block,
01156 t_subblock ** subblock_inf)
01157 {
01158
01159
01160
01161
01162
01163
01164 int inet, ipin, to_blk, to_pin, isub;
01165 t_type_ptr type = block[iblk].type;
01166
01167 assert(type == IO_TYPE);
01168
01169 inet = block[iblk].nets[subblock_inf[iblk][sub].outputs[subpin]];
01170 assert(inet != OPEN);
01171
01172 if(!net[inet].is_global)
01173 return (FALSE);
01174
01175 for(ipin = 1; ipin < (net[inet].num_sinks + 1); ipin++)
01176 {
01177 to_blk = net[inet].node_block[ipin];
01178 to_pin = net[inet].node_block_pin[ipin];
01179
01180 for(isub = 0; isub < num_subblocks_per_block[to_blk]; isub++)
01181 {
01182 if(subblock_inf[to_blk][isub].clock == to_pin)
01183 return (TRUE);
01184 }
01185 }
01186
01187 return (FALSE);
01188 }
01189
01190 void
01191 load_timing_graph_net_delays(float **net_delay)
01192 {
01193
01194
01195
01196
01197
01198
01199 int inet, ipin, inode;
01200 t_tedge *tedge;
01201
01202 for(inet = 0; inet < num_nets; inet++)
01203 {
01204 inode = net_to_driver_tnode[inet];
01205 tedge = tnode[inode].out_edges;
01206
01207
01208
01209
01210 for(ipin = 1; ipin < (net[inet].num_sinks + 1); ipin++)
01211 tedge[ipin - 1].Tdel = net_delay[inet][ipin];
01212 }
01213 }
01214
01215
01216 void
01217 free_timing_graph(float **net_slack)
01218 {
01219
01220
01221
01222 if(tedge_ch_list_head == NULL)
01223 {
01224 printf("Error in free_timing_graph: No timing graph to free.\n");
01225 exit(1);
01226 }
01227
01228 free_chunk_memory(tedge_ch_list_head);
01229 free(tnode);
01230 free(tnode_descript);
01231 free(net_to_driver_tnode);
01232 free_ivec_vector(tnodes_at_level, 0, num_tnode_levels - 1);
01233 free(net_slack);
01234
01235 tedge_ch_list_head = NULL;
01236 tedge_ch_bytes_avail = 0;
01237 tedge_ch_next_avail = NULL;
01238
01239 tnode = NULL;
01240 tnode_descript = NULL;
01241 num_tnodes = 0;
01242 net_to_driver_tnode = NULL;
01243 tnodes_at_level = NULL;
01244 num_tnode_levels = 0;
01245 }
01246
01247
01248 void
01249 print_net_slack(char *fname,
01250 float **net_slack)
01251 {
01252
01253
01254
01255 int inet, ipin;
01256 FILE *fp;
01257
01258 fp = my_fopen(fname, "w");
01259
01260 fprintf(fp, "Net #\tSlacks\n\n");
01261
01262 for(inet = 0; inet < num_nets; inet++)
01263 {
01264 fprintf(fp, "%5d", inet);
01265 for(ipin = 1; ipin < (net[inet].num_sinks + 1); ipin++)
01266 {
01267 fprintf(fp, "\t%g", net_slack[inet][ipin]);
01268 }
01269 fprintf(fp, "\n");
01270 }
01271 }
01272
01273
01274 void
01275 print_timing_graph(char *fname)
01276 {
01277
01278
01279
01280 FILE *fp;
01281 int inode, iedge, ilevel, i;
01282 t_tedge *tedge;
01283 t_tnode_type itype;
01284 char *tnode_type_names[] = { "INPAD_SOURCE", "INPAD_OPIN",
01285 "OUTPAD_IPIN", "OUTPAD_SINK", "FB_IPIN", "FB_OPIN",
01286 "SUBBLK_IPIN", "SUBBLK_OPIN", "FF_SINK", "FF_SOURCE",
01287 "CONSTANT_GEN_SOURCE"
01288 };
01289
01290
01291 fp = my_fopen(fname, "w");
01292
01293 fprintf(fp, "num_tnodes: %d\n", num_tnodes);
01294 fprintf(fp, "Node #\tType\t\tipin\tisubblk\tiblk\t# edges\t"
01295 "Edges (to_node, Tdel)\n\n");
01296
01297 for(inode = 0; inode < num_tnodes; inode++)
01298 {
01299 fprintf(fp, "%d\t", inode);
01300
01301 itype = tnode_descript[inode].type;
01302 fprintf(fp, "%-15.15s\t", tnode_type_names[itype]);
01303
01304 fprintf(fp, "%d\t%d\t%d\t", tnode_descript[inode].ipin,
01305 tnode_descript[inode].isubblk,
01306 tnode_descript[inode].iblk);
01307
01308 fprintf(fp, "%d\t", tnode[inode].num_edges);
01309 tedge = tnode[inode].out_edges;
01310 for(iedge = 0; iedge < tnode[inode].num_edges; iedge++)
01311 {
01312 fprintf(fp, "\t(%4d,%7.3g)", tedge[iedge].to_node,
01313 tedge[iedge].Tdel);
01314 }
01315 fprintf(fp, "\n");
01316 }
01317
01318 fprintf(fp, "\n\nnum_tnode_levels: %d\n", num_tnode_levels);
01319
01320 for(ilevel = 0; ilevel < num_tnode_levels; ilevel++)
01321 {
01322 fprintf(fp, "\n\nLevel: %d Num_nodes: %d\nNodes:", ilevel,
01323 tnodes_at_level[ilevel].nelem);
01324 for(i = 0; i < tnodes_at_level[ilevel].nelem; i++)
01325 fprintf(fp, "\t%d", tnodes_at_level[ilevel].list[i]);
01326 }
01327
01328 fprintf(fp, "\n");
01329 fprintf(fp, "\n\nNet #\tNet_to_driver_tnode\n");
01330
01331 for(i = 0; i < num_nets; i++)
01332 fprintf(fp, "%4d\t%6d\n", i, net_to_driver_tnode[i]);
01333
01334 fprintf(fp, "\n\nNode #\t\tT_arr\t\tT_req\n\n");
01335
01336 for(inode = 0; inode < num_tnodes; inode++)
01337 fprintf(fp, "%d\t%12g\t%12g\n", inode, tnode[inode].T_arr,
01338 tnode[inode].T_req);
01339
01340 fclose(fp);
01341 }
01342
01343
01344 float
01345 load_net_slack(float **net_slack,
01346 float target_cycle_time)
01347 {
01348
01349
01350
01351
01352
01353
01354
01355 float T_crit, T_arr, Tdel, T_cycle, T_req;
01356 int inode, ilevel, num_at_level, i, num_edges, iedge, to_node;
01357 t_tedge *tedge;
01358
01359
01360
01361
01362
01363 for(inode = 0; inode < num_tnodes; inode++)
01364 tnode[inode].T_arr = T_CONSTANT_GENERATOR;
01365
01366
01367
01368
01369 T_crit = 0.;
01370
01371
01372
01373 num_at_level = tnodes_at_level[0].nelem;
01374 for(i = 0; i < num_at_level; i++)
01375 {
01376 inode = tnodes_at_level[0].list[i];
01377 tnode[inode].T_arr = 0.;
01378 }
01379
01380 for(ilevel = 0; ilevel < num_tnode_levels; ilevel++)
01381 {
01382 num_at_level = tnodes_at_level[ilevel].nelem;
01383
01384 for(i = 0; i < num_at_level; i++)
01385 {
01386 inode = tnodes_at_level[ilevel].list[i];
01387 T_arr = tnode[inode].T_arr;
01388 num_edges = tnode[inode].num_edges;
01389 tedge = tnode[inode].out_edges;
01390 T_crit = max(T_crit, T_arr);
01391
01392 for(iedge = 0; iedge < num_edges; iedge++)
01393 {
01394 to_node = tedge[iedge].to_node;
01395 Tdel = tedge[iedge].Tdel;
01396 tnode[to_node].T_arr =
01397 max(tnode[to_node].T_arr, T_arr + Tdel);
01398 }
01399 }
01400
01401 }
01402
01403 if(target_cycle_time > 0.)
01404 T_cycle = target_cycle_time;
01405 else
01406 T_cycle = T_crit;
01407
01408
01409
01410
01411 for(ilevel = num_tnode_levels - 1; ilevel >= 0; ilevel--)
01412 {
01413 num_at_level = tnodes_at_level[ilevel].nelem;
01414
01415 for(i = 0; i < num_at_level; i++)
01416 {
01417 inode = tnodes_at_level[ilevel].list[i];
01418 num_edges = tnode[inode].num_edges;
01419
01420 if(num_edges == 0)
01421 {
01422 tnode[inode].T_req = T_cycle;
01423 }
01424 else
01425 {
01426 tedge = tnode[inode].out_edges;
01427 to_node = tedge[0].to_node;
01428 Tdel = tedge[0].Tdel;
01429 T_req = tnode[to_node].T_req - Tdel;
01430
01431 for(iedge = 1; iedge < num_edges; iedge++)
01432 {
01433 to_node = tedge[iedge].to_node;
01434 Tdel = tedge[iedge].Tdel;
01435 T_req =
01436 min(T_req,
01437 tnode[to_node].T_req - Tdel);
01438 }
01439
01440 tnode[inode].T_req = T_req;
01441 }
01442 }
01443 }
01444
01445 compute_net_slacks(net_slack);
01446
01447 return (T_crit);
01448 }
01449
01450
01451 static void
01452 compute_net_slacks(float **net_slack)
01453 {
01454
01455
01456
01457 int inet, iedge, inode, to_node, num_edges;
01458 t_tedge *tedge;
01459 float T_arr, Tdel, T_req;
01460
01461 for(inet = 0; inet < num_nets; inet++)
01462 {
01463 inode = net_to_driver_tnode[inet];
01464 T_arr = tnode[inode].T_arr;
01465 num_edges = tnode[inode].num_edges;
01466 tedge = tnode[inode].out_edges;
01467
01468 for(iedge = 0; iedge < num_edges; iedge++)
01469 {
01470 to_node = tedge[iedge].to_node;
01471 Tdel = tedge[iedge].Tdel;
01472 T_req = tnode[to_node].T_req;
01473 net_slack[inet][iedge + 1] = T_req - T_arr - Tdel;
01474 }
01475 }
01476 }
01477
01478
01479 void
01480 print_critical_path(char *fname,
01481 t_subblock_data subblock_data)
01482 {
01483
01484
01485
01486 t_linked_int *critical_path_head, *critical_path_node;
01487 FILE *fp;
01488 int non_global_nets_on_crit_path, global_nets_on_crit_path;
01489 int tnodes_on_crit_path, inode, iblk, inet;
01490 t_tnode_type type;
01491 float total_net_delay, total_logic_delay, Tdel;
01492
01493 critical_path_head = allocate_and_load_critical_path();
01494 critical_path_node = critical_path_head;
01495
01496 fp = my_fopen(fname, "w");
01497
01498 non_global_nets_on_crit_path = 0;
01499 global_nets_on_crit_path = 0;
01500 tnodes_on_crit_path = 0;
01501 total_net_delay = 0.;
01502 total_logic_delay = 0.;
01503
01504 while(critical_path_node != NULL)
01505 {
01506 Tdel =
01507 print_critical_path_node(fp, critical_path_node,
01508 subblock_data);
01509 inode = critical_path_node->data;
01510 type = tnode_descript[inode].type;
01511 tnodes_on_crit_path++;
01512
01513 if(type == INPAD_OPIN || type == FB_OPIN)
01514 {
01515 get_tnode_block_and_output_net(inode, &iblk, &inet);
01516
01517 if(!net[inet].is_global)
01518 non_global_nets_on_crit_path++;
01519 else
01520 global_nets_on_crit_path++;
01521
01522 total_net_delay += Tdel;
01523 }
01524 else
01525 {
01526 total_logic_delay += Tdel;
01527 }
01528
01529 critical_path_node = critical_path_node->next;
01530 }
01531
01532 fprintf(fp,
01533 "\nTnodes on crit. path: %d Non-global nets on crit. path: %d."
01534 "\n", tnodes_on_crit_path, non_global_nets_on_crit_path);
01535 fprintf(fp, "Global nets on crit. path: %d.\n", global_nets_on_crit_path);
01536 fprintf(fp, "Total logic delay: %g (s) Total net delay: %g (s)\n",
01537 total_logic_delay, total_net_delay);
01538
01539 printf("Nets on crit. path: %d normal, %d global.\n",
01540 non_global_nets_on_crit_path, global_nets_on_crit_path);
01541
01542 printf("Total logic delay: %g (s) Total net delay: %g (s)\n",
01543 total_logic_delay, total_net_delay);
01544
01545 fclose(fp);
01546 free_int_list(&critical_path_head);
01547 }
01548
01549
01550 t_linked_int *
01551 allocate_and_load_critical_path(void)
01552 {
01553
01554
01555
01556
01557 t_linked_int *critical_path_head, *curr_crit_node, *prev_crit_node;
01558 int inode, iedge, to_node, num_at_level, i, crit_node, num_edges;
01559 float min_slack, slack;
01560 t_tedge *tedge;
01561
01562 num_at_level = tnodes_at_level[0].nelem;
01563 min_slack = HUGE_FLOAT;
01564 crit_node = OPEN;
01565
01566 for(i = 0; i < num_at_level; i++)
01567 {
01568 inode = tnodes_at_level[0].list[i];
01569 slack = tnode[inode].T_req - tnode[inode].T_arr;
01570
01571 if(slack < min_slack)
01572 {
01573 crit_node = inode;
01574 min_slack = slack;
01575 }
01576 }
01577
01578 critical_path_head = (t_linked_int *) my_malloc(sizeof(t_linked_int));
01579 critical_path_head->data = crit_node;
01580 prev_crit_node = critical_path_head;
01581 num_edges = tnode[crit_node].num_edges;
01582
01583 while(num_edges != 0)
01584 {
01585 curr_crit_node = (t_linked_int *) my_malloc(sizeof(t_linked_int));
01586 prev_crit_node->next = curr_crit_node;
01587 tedge = tnode[crit_node].out_edges;
01588 min_slack = HUGE_FLOAT;
01589
01590 for(iedge = 0; iedge < num_edges; iedge++)
01591 {
01592 to_node = tedge[iedge].to_node;
01593 slack = tnode[to_node].T_req - tnode[to_node].T_arr;
01594
01595 if(slack < min_slack)
01596 {
01597 crit_node = to_node;
01598 min_slack = slack;
01599 }
01600 }
01601
01602 curr_crit_node->data = crit_node;
01603 prev_crit_node = curr_crit_node;
01604 num_edges = tnode[crit_node].num_edges;
01605 }
01606
01607 prev_crit_node->next = NULL;
01608 return (critical_path_head);
01609 }
01610
01611
01612 void
01613 get_tnode_block_and_output_net(int inode,
01614 int *iblk_ptr,
01615 int *inet_ptr)
01616 {
01617
01618
01619
01620
01621
01622 int inet, ipin, iblk;
01623 t_tnode_type tnode_type;
01624
01625 iblk = tnode_descript[inode].iblk;
01626 tnode_type = tnode_descript[inode].type;
01627
01628 if(tnode_type == FB_OPIN || tnode_type == INPAD_OPIN)
01629 {
01630 ipin = tnode_descript[inode].ipin;
01631 inet = block[iblk].nets[ipin];
01632 }
01633 else
01634 {
01635 inet = OPEN;
01636 }
01637
01638 *iblk_ptr = iblk;
01639 *inet_ptr = inet;
01640 }
01641
01642
01643 void
01644 do_constant_net_delay_timing_analysis(t_timing_inf timing_inf,
01645 t_subblock_data subblock_data,
01646 float constant_net_delay_value)
01647 {
01648
01649
01650
01651
01652 struct s_linked_vptr *net_delay_chunk_list_head;
01653 float **net_delay, **net_slack;
01654
01655 float T_crit;
01656
01657 net_slack = alloc_and_load_timing_graph(timing_inf, subblock_data);
01658 net_delay = alloc_net_delay(&net_delay_chunk_list_head);
01659
01660 load_constant_net_delay(net_delay, constant_net_delay_value);
01661 load_timing_graph_net_delays(net_delay);
01662 T_crit = load_net_slack(net_slack, 0);
01663
01664 printf("\n");
01665 printf("\nCritical Path: %g (s)\n", T_crit);
01666
01667 #ifdef CREATE_ECHO_FILES
01668 print_critical_path("critical_path.echo", subblock_data);
01669 print_timing_graph("timing_graph.echo");
01670 print_net_slack("net_slack.echo", net_slack);
01671 print_net_delay(net_delay, "net_delay.echo");
01672 #endif
01673
01674 free_timing_graph(net_slack);
01675 free_net_delay(net_delay, &net_delay_chunk_list_head);
01676 }