2005 |
Dong Lu, Yi Qiao, Peter Dinda, Fabián E. Bustamante Characterizing and Predicting TCP Throughput on the Wide Area Network Journal Article In Proc. of the 25th IEEE International Conference on Distributed Computing Systems, 2005. @article{TTWAN, title = {Characterizing and Predicting TCP Throughput on the Wide Area Network}, author = {Dong Lu and Yi Qiao and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Donglu05ICDCS.pdf}, year = {2005}, date = {2005-06-03}, journal = {In Proc. of the 25th IEEE International Conference on Distributed Computing Systems}, abstract = {DualPats exploits the strong correlation between TCP throughput and flow size, and the statistical stability of Internet path characteristics to accurately predict the TCP throughput of large transfers using active probing. We propose additional mechanisms to explain the correlation, and then analyze why traditional TCP benchmarking fails to predict the throughput of large transfers well. We characterize stability and develop a dynamic sampling rate adjustment algorithm so that we probe a path based on its stability. Our analysis, design, and evaluation is based on a large-scale measurement study.}, keywords = {}, pubstate = {published}, tppubtype = {article} } DualPats exploits the strong correlation between TCP throughput and flow size, and the statistical stability of Internet path characteristics to accurately predict the TCP throughput of large transfers using active probing. We propose additional mechanisms to explain the correlation, and then analyze why traditional TCP benchmarking fails to predict the throughput of large transfers well. We characterize stability and develop a dynamic sampling rate adjustment algorithm so that we probe a path based on its stability. Our analysis, design, and evaluation is based on a large-scale measurement study. |
Stefan Birrer, Fabián E. Bustamante, Dong Lu, Peter Dinda, Yi Qiao FatNemo: Multi-Source Multicast Overlay Fat-Tree Journal Article Poster in Proc. of the Second Symposium on Networked Systems Design & Implementation, 2005. @article{FatNemob, title = {FatNemo: Multi-Source Multicast Overlay Fat-Tree}, author = {Stefan Birrer and Fabián E. Bustamante and Dong Lu and Peter Dinda and Yi Qiao}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05NSDI.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi05poster.pdf}, year = {2005}, date = {2005-05-03}, journal = {Poster in Proc. of the Second Symposium on Networked Systems Design & Implementation}, abstract = {This poster presents the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We describes FatNemo, a novel overlay multi-source multicast protocol based on this idea, and present early experimental and analytical results showing the advatanges of this approach. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This poster presents the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We describes FatNemo, a novel overlay multi-source multicast protocol based on this idea, and present early experimental and analytical results showing the advatanges of this approach. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures. |
Ashish Gupta, Manan Sanghi, Peter Dinda, Fabián E. Bustamante Magnolia: A novel DHT architecture for keyword-based searching Journal Article In Proc. of the Second Symposium on Networked Systems Design & Implementation , 2005. BibTeX | Links: @article{Magnolia, title = {Magnolia: A novel DHT architecture for keyword-based searching}, author = {Ashish Gupta and Manan Sanghi and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Gupta05NSDIPoster.pdf}, year = {2005}, date = {2005-05-03}, journal = {In Proc. of the Second Symposium on Networked Systems Design & Implementation }, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Dong Lu, Yi Qiao, Peter A. Dinda, Fabián E. Bustamante Modeling and Taming Parallel TCP on the Wide Area Network Journal Article In Proc. of the 19th IEEE International Parallel and Distributed Processing Symposium, 2005. @article{MTPTWAN, title = {Modeling and Taming Parallel TCP on the Wide Area Network}, author = {Dong Lu and Yi Qiao and Peter A. Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DongLuIPDPS05.pdf}, year = {2005}, date = {2005-04-03}, journal = {In Proc. of the 19th IEEE International Parallel and Distributed Processing Symposium}, abstract = {Parallel TCP flows are broadly used in the high performance distributed computing community to enhance network throughput, particularly for large data transfers. Previous research has studied the mechanism by which parallel TCP improves aggregate throughput, but there doesn't exist any practical mechanism to predict its throughput. In this work, we address how to predict parallel TCP throughput as a function of the number of flows, as well as how to predict the corresponding impact on cross traffic. To the best of our knowledge, we are the first to answer the following question on behalf of a user: what number of parallel flows will give the highest throughput with less than a p% impact on cross traffic? We term this the maximum nondisruptive throughput. We begin by studying the behavior of parallel TCP in simulation to help derive a model for predicting parallel TCP throughput and its impact on cross traffic. Combining this model with some previous findings we derive a simple, yet effective, online advisor. We evaluate our advisor through simulation-based and wide-area experimentation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Parallel TCP flows are broadly used in the high performance distributed computing community to enhance network throughput, particularly for large data transfers. Previous research has studied the mechanism by which parallel TCP improves aggregate throughput, but there doesn't exist any practical mechanism to predict its throughput. In this work, we address how to predict parallel TCP throughput as a function of the number of flows, as well as how to predict the corresponding impact on cross traffic. To the best of our knowledge, we are the first to answer the following question on behalf of a user: what number of parallel flows will give the highest throughput with less than a p% impact on cross traffic? We term this the maximum nondisruptive throughput. We begin by studying the behavior of parallel TCP in simulation to help derive a model for predicting parallel TCP throughput and its impact on cross traffic. Combining this model with some previous findings we derive a simple, yet effective, online advisor. We evaluate our advisor through simulation-based and wide-area experimentation. |
Stefan Birrer, Fabián E. Bustamante Nemo: Resilient Peer-to-Peer Multicast without the Cost Journal Article In Proc. of the 12th Annual Multimedia Computing and Networking Conference, 2005. @article{Nemoc, title = {Nemo: Resilient Peer-to-Peer Multicast without the Cost}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05MMCN.pdf}, year = {2005}, date = {2005-03-03}, journal = {In Proc. of the 12th Annual Multimedia Computing and Networking Conference}, abstract = {We introduce Nemo, a novel peer-to-peer multicast protocol that achieves high delivery ratio without sacrificing end-to-end latency or incurring additional costs. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo's design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We contrast the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%).}, keywords = {}, pubstate = {published}, tppubtype = {article} } We introduce Nemo, a novel peer-to-peer multicast protocol that achieves high delivery ratio without sacrificing end-to-end latency or incurring additional costs. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo's design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We contrast the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%). |
Stefan Birrer, Fabián E. Bustamante Reef: Efficiently designing and evaluating overlay algorithms Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-14), 2005. @techreport{Reef, title = {Reef: Efficiently designing and evaluating overlay algorithms}, author = {Stefan Birrer and Fabián E. Bustamante}, year = {2005}, date = {2005-02-03}, number = {NWU-CS-05-14}, institution = {Department of Computer Science, Northwestern University}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
David R. Choffnes, Fabián E. Bustamante Modeling Vehicular Traffic and Mobility for Vehicular Wireless Networks Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-03), 2005. @techreport{MVTMVEN, title = {Modeling Vehicular Traffic and Mobility for Vehicular Wireless Networks}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-05-03-1-1.pdf}, year = {2005}, date = {2005-01-03}, number = {NWU-CS-05-03}, institution = {Department of Computer Science, Northwestern University}, abstract = {Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring, and battleground communication. When evaluating application performance through simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This technical report discusses the implementation of STRAW, a new mobility model for VANETs in which nodes move according to a realistic vehicular traffic model on roads defined by real street map data. The challenge is to create a traffic model that accounts for individual vehicle motion without incurring significant overhead relative to the cost of performing the wireless network simulation. We identify essential and optional techniques for modeling vehicular motion that can be integrated into any wireless network simulator. We then detail choices we made in implementing STRAW.}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring, and battleground communication. When evaluating application performance through simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This technical report discusses the implementation of STRAW, a new mobility model for VANETs in which nodes move according to a realistic vehicular traffic model on roads defined by real street map data. The challenge is to create a traffic model that accounts for individual vehicle motion without incurring significant overhead relative to the cost of performing the wireless network simulation. We identify essential and optional techniques for modeling vehicular motion that can be integrated into any wireless network simulator. We then detail choices we made in implementing STRAW. |
2004 |
Yi Qiao, Dong Lu, Fabián E. Bustamante, Peter Dinda Looking at the Server-Side of Peer-to-Peer Systems Journal Article In Proc. of the 7th Workshop on Languages, Compilers and Run-time Support for Scalable Systems, 2004. @article{SSP2Pb, title = { Looking at the Server-Side of Peer-to-Peer Systems}, author = {Yi Qiao and Dong Lu and Fabián E. Bustamante and Peter Dinda}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao04LCR.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao04LCR.ppt}, year = {2004}, date = {2004-11-03}, journal = {In Proc. of the 7th Workshop on Languages, Compilers and Run-time Support for Scalable Systems}, abstract = {Peer-to-peer systems have grown significantly in popularity over the last few years. An increasing number of research projects have been closely following this trend, looking at many of the paradigm's technical aspects. In the context of data-sharing services, efforts have focused on a variety of issues from object location and routing to fair sharing and peer lifespans. Overall, the majority of these projects have concentrated on either the whole P2P infrastructure or the client-side of peers. Little attention has been given to the peer's server-side, even when that side determines much of the everyday-user's experience. In this paper, we make the case for looking at the server-side of peers, focusing on the problem of scheduling download requests at the server-side of P2P systems with the intent of minimizing the average response time experienced by users. We start by characterizing server workload based on extensive trace collection and analysis. We then evaluate the performance and fairness of different scheduling policies through trace-driven simulations. Our results show that average response time can be dramatically reduced by more effectively scheduling the requests on the server-side of P2P systems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Peer-to-peer systems have grown significantly in popularity over the last few years. An increasing number of research projects have been closely following this trend, looking at many of the paradigm's technical aspects. In the context of data-sharing services, efforts have focused on a variety of issues from object location and routing to fair sharing and peer lifespans. Overall, the majority of these projects have concentrated on either the whole P2P infrastructure or the client-side of peers. Little attention has been given to the peer's server-side, even when that side determines much of the everyday-user's experience. In this paper, we make the case for looking at the server-side of peers, focusing on the problem of scheduling download requests at the server-side of P2P systems with the intent of minimizing the average response time experienced by users. We start by characterizing server workload based on extensive trace collection and analysis. We then evaluate the performance and fairness of different scheduling policies through trace-driven simulations. Our results show that average response time can be dramatically reduced by more effectively scheduling the requests on the server-side of P2P systems. |
Dong Lu, Peter A. Dinda, Yi Qiao, Huanyuan Sheng, Fabián E. Bustamante Applications of SRPT Scheduling with Inaccurate Information Journal Article Poster in Proc. of the 12th IEEE/ACM International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS), 2004. @article{SRPT, title = {Applications of SRPT Scheduling with Inaccurate Information}, author = {Dong Lu and Peter A. Dinda and Yi Qiao and Huanyuan Sheng and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DongLuMASCOTS04.pdf}, year = {2004}, date = {2004-10-03}, journal = {Poster in Proc. of the 12th IEEE/ACM International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS)}, abstract = {The Shortest Remaining Processing Time (SRPT) scheduling policy was proven, in the 1960s, to yield the smallest mean response time, and recently it was proven its performance gain over Processor Sharing (PS) usually does not come at the expense of large jobs. However, despite the many advantages of SRPT scheduling, it is not widely applied. One important reason for the sporadic application of SRPT scheduling is that accurate job size information is often unavailable. Our previous work addressed the performance and fairness issues of SRPT scheduling when job size information is inaccurate. We found that SRPT (and FSP) scheduling outperforms PS as long as there exists a (rather small) amount of correlation between the estimated job size and the actual job size. In the work we summarize here, we have developed job size estimation techniques to support the application of SRPT to web server and Peer-to-Peer server side scheduling. We have evaluated our techniques with extensive simulation studies and real world implementation and measurement.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The Shortest Remaining Processing Time (SRPT) scheduling policy was proven, in the 1960s, to yield the smallest mean response time, and recently it was proven its performance gain over Processor Sharing (PS) usually does not come at the expense of large jobs. However, despite the many advantages of SRPT scheduling, it is not widely applied. One important reason for the sporadic application of SRPT scheduling is that accurate job size information is often unavailable. Our previous work addressed the performance and fairness issues of SRPT scheduling when job size information is inaccurate. We found that SRPT (and FSP) scheduling outperforms PS as long as there exists a (rather small) amount of correlation between the estimated job size and the actual job size. In the work we summarize here, we have developed job size estimation techniques to support the application of SRPT to web server and Peer-to-Peer server side scheduling. We have evaluated our techniques with extensive simulation studies and real world implementation and measurement. |
Stefan Birrer, Dong Lu, Fabián E. Bustamante, Yi Qiao, Peter Dinda FatNemo: Building a Resilient Multi-Source Multicast Fat-Tree Journal Article In Proc. of the Ninth International Workshop on Web Content Caching and Distribution, 2004. @article{FatNemo, title = {FatNemo: Building a Resilient Multi-Source Multicast Fat-Tree}, author = {Stefan Birrer and Dong Lu and Fabián E. Bustamante and Yi Qiao and Peter Dinda}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer04FNB.pdf}, year = {2004}, date = {2004-09-03}, journal = {In Proc. of the Ninth International Workshop on Web Content Caching and Distribution}, abstract = {This paper proposes the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We introduce FatNemo, a novel overlay multi-source multicast protocol based on this idea. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures. We present the design of FatNemo and show simulation-based experimental results comparing its performance with that of three alternative protocols (Narada, Nice and Nice-PRM). These initial results show that FatNemo not only minimizes the average and standard deviation of response time, but also handles end host failures gracefully with minimum performance penalty.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper proposes the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We introduce FatNemo, a novel overlay multi-source multicast protocol based on this idea. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures. We present the design of FatNemo and show simulation-based experimental results comparing its performance with that of three alternative protocols (Narada, Nice and Nice-PRM). These initial results show that FatNemo not only minimizes the average and standard deviation of response time, but also handles end host failures gracefully with minimum performance penalty. |
Stefan Birrer, Fabián E. Bustamante Resilient Peer-to-Peer Multicast from the Ground Up Journal Article In Proc. of the IEEE Network Computing and Applications - Workshop on Adaptive Grid Computing, 2004. @article{P2PMGUb, title = {Resilient Peer-to-Peer Multicast from the Ground Up}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer04RGU.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nca04nemo.pdf}, year = {2004}, date = {2004-08-03}, journal = {In Proc. of the IEEE Network Computing and Applications - Workshop on Adaptive Grid Computing}, abstract = {One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of churn inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios similar to those of comparable protocols (up to 99.98%) under different failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 85%) and control-related traffic. }, keywords = {}, pubstate = {published}, tppubtype = {article} } One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of churn inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios similar to those of comparable protocols (up to 99.98%) under different failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 85%) and control-related traffic. |
Brian Cornell, Peter Dinda, Fabián E. Bustamante Wayback: A User-level Versioning File System for Linux Journal Article In Proc. of USENIX Annual Technical Conference, FREENIX Track ( Best Paper Award ), 2004. @article{WayBack, title = {Wayback: A User-level Versioning File System for Linux}, author = {Brian Cornell and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Cornell04VFS.pdf https://sourceforge.net/projects/wayback/}, year = {2004}, date = {2004-06-03}, journal = {In Proc. of USENIX Annual Technical Conference, FREENIX Track ( Best Paper Award )}, abstract = {In a typical file system, only the current version of a file (or directory) is available. In Wayback, a user can also access any previous version, all the way back to the file's creation time. Versioning is done automatically at the write level: each write to the file creates a new version. Wayback implements versioning using an undo log structure, exploiting the massive space available on modern disks to provide its very useful functionality. Wayback is a user-level file system built on the FUSE framework that relies on an underlying file system for access to the disk. In addition to simplifying Wayback, this also allows it to extend any existing file system with versioning: after being mounted, the file system can be mounted a second time with versioning. We describe the implementation of Wayback, and evaluate its performance using several benchmarks.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In a typical file system, only the current version of a file (or directory) is available. In Wayback, a user can also access any previous version, all the way back to the file's creation time. Versioning is done automatically at the write level: each write to the file creates a new version. Wayback implements versioning using an undo log structure, exploiting the massive space available on modern disks to provide its very useful functionality. Wayback is a user-level file system built on the FUSE framework that relies on an underlying file system for access to the disk. In addition to simplifying Wayback, this also allows it to extend any existing file system with versioning: after being mounted, the file system can be mounted a second time with versioning. We describe the implementation of Wayback, and evaluate its performance using several benchmarks. |
Stefan Birrer, Fabián E. Bustamante, Yan Chen Can We Trust ICMP Measurements? Technical Report Department of Computer Science, Northwestern University (NWU-CS-04-48), 2004. @techreport{ICMPM, title = {Can We Trust ICMP Measurements?}, author = {Stefan Birrer and Fabián E. Bustamante and Yan Chen}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-04-48.pdf}, year = {2004}, date = {2004-03-03}, number = {NWU-CS-04-48}, institution = {Department of Computer Science, Northwestern University}, abstract = {ICMP-based measurements (e.g. ping) are often criticized as un-representative of the applications' experienced performance, as applications are based on TCP/UDP protocols and there is a well-accepted conjecture that routers are often configured to treat ICMP differently from TCP and UDP. However, to the best of our knowledge, this assumption has not been validated. With this in mind, we conducted extensive Internet end-to-end path measurements of these three protocols, spanning over 90 sites (from both commercial and academic networks), over 6,000 paths and more than 28 million probes in PlanetLab during two weeks. Our results show that ICMP performance is a good estimator for TCP/UDP performance for the majority of the paths. However for nearly 0.5% of the paths, we found persistent RTT differences between UDP and ICMP greater than 50%, while for TCP the difference exceeds 10% for 0.27% of the paths. Thus, although ICMP-based measurements can be trusted as predictors of TCP/UDP performance, distributed systems and network researchers should be aware of some scenarios where these measurements will be heavily misleading; this paper also provides some hints that can help in identifying those situations.}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } ICMP-based measurements (e.g. ping) are often criticized as un-representative of the applications' experienced performance, as applications are based on TCP/UDP protocols and there is a well-accepted conjecture that routers are often configured to treat ICMP differently from TCP and UDP. However, to the best of our knowledge, this assumption has not been validated. With this in mind, we conducted extensive Internet end-to-end path measurements of these three protocols, spanning over 90 sites (from both commercial and academic networks), over 6,000 paths and more than 28 million probes in PlanetLab during two weeks. Our results show that ICMP performance is a good estimator for TCP/UDP performance for the majority of the paths. However for nearly 0.5% of the paths, we found persistent RTT differences between UDP and ICMP greater than 50%, while for TCP the difference exceeds 10% for 0.27% of the paths. Thus, although ICMP-based measurements can be trusted as predictors of TCP/UDP performance, distributed systems and network researchers should be aware of some scenarios where these measurements will be heavily misleading; this paper also provides some hints that can help in identifying those situations. |
Yi Qiao, Dong Lu, Fabián E. Bustamante, Peter Dinda Looking at the Server-Side of Peer-to-Peer Systems Technical Report Department of Computer Science, Northwestern University ( NWU-CS-04-37), 2004. @techreport{SSP2PS, title = {Looking at the Server-Side of Peer-to-Peer Systems}, author = {Yi Qiao and Dong Lu and Fabián E. Bustamante and Peter Dinda}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-04-37.pdf}, year = {2004}, date = {2004-02-01}, number = { NWU-CS-04-37}, institution = {Department of Computer Science, Northwestern University}, abstract = {Peer-to-peer systems have grown significantly in popularity over the last few years. An increasing number of research projects have been closely following this trend, looking at many of the paradigm s technical aspects. In the context of data-sharing services, efforts have focused on a variety of issues from object location and routing to fair sharing and peer lifespans. Overall, the majority of these projects have concentrated on either the whole P2P infrastructure or the client-side of peers. Little attention has been given to the peer s server-side, even when that side determines much of the everyday user s experience. In this paper, we make the case for looking at the server side of peers, focusing on the problem of scheduling with the intent of minimizing the average response time experienced by users. We start by characterizing server workload based on extensive trace collection and analysis. We then evaluate the performance and fairness of different scheduling policies through trace-driven simulations. Our results show that average response time can be dramatically reduced by more effectively scheduling the requests on the server-side of P2P systems.}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } Peer-to-peer systems have grown significantly in popularity over the last few years. An increasing number of research projects have been closely following this trend, looking at many of the paradigm s technical aspects. In the context of data-sharing services, efforts have focused on a variety of issues from object location and routing to fair sharing and peer lifespans. Overall, the majority of these projects have concentrated on either the whole P2P infrastructure or the client-side of peers. Little attention has been given to the peer s server-side, even when that side determines much of the everyday user s experience. In this paper, we make the case for looking at the server side of peers, focusing on the problem of scheduling with the intent of minimizing the average response time experienced by users. We start by characterizing server workload based on extensive trace collection and analysis. We then evaluate the performance and fairness of different scheduling policies through trace-driven simulations. Our results show that average response time can be dramatically reduced by more effectively scheduling the requests on the server-side of P2P systems. |
Stefan Birrer, Fabián E. Bustamante Nemo: Resilient Peer-to-Peer Multicast without the Cost Technical Report Department of Computer Science, Northwestern University (NWU-CS-04-36), 2004. @techreport{Nemo, title = {Nemo: Resilient Peer-to-Peer Multicast without the Cost}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-04-36.pdf}, year = {2004}, date = {2004-01-03}, number = {NWU-CS-04-36}, institution = {Department of Computer Science, Northwestern University}, abstract = {One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of transiency inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo s design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%).}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of transiency inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo s design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%). |
2003 |
Patrick Widener, Karsten Schwan, Fabián E. Bustamante Differential Data Protection for Dynamic Distributed Applications Journal Article In Proc. of the 19th Annual Computer Security Applications Conference, 2003. @article{DDPDDA, title = {Differential Data Protection for Dynamic Distributed Applications}, author = {Patrick Widener and Karsten Schwan and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Widener03DDP.pdf}, year = {2003}, date = {2003-12-03}, journal = {In Proc. of the 19th Annual Computer Security Applications Conference}, abstract = {We present a mechanism for providing differential data protection to publish/subscribe distributed systems, such as those used in peer-to-peer computing, grid environments, and others. This mechamism, termed "security overlays," incorporates credential-based communication channel creation, subscription and extension. We describe a conceptual model of publish/subscribe services that is made concrete by our mechanism. We also present an application, Active Video Streams, whose reimplementation using security overlays allows it to react to high-level security policies specified in XML without significant performance loss or the necessity for embedding policy-specific code into the application.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present a mechanism for providing differential data protection to publish/subscribe distributed systems, such as those used in peer-to-peer computing, grid environments, and others. This mechamism, termed "security overlays," incorporates credential-based communication channel creation, subscription and extension. We describe a conceptual model of publish/subscribe services that is made concrete by our mechanism. We also present an application, Active Video Streams, whose reimplementation using security overlays allows it to react to high-level security policies specified in XML without significant performance loss or the necessity for embedding policy-specific code into the application. |
Yi Qiao, Fabián E. Bustamante Elders Know Best: Lifespan-Based Ideas in P2P Systems Journal Article 19th Symposium on Operating Systems Principles, 2003. @article{EKBLBI, title = {Elders Know Best: Lifespan-Based Ideas in P2P Systems}, author = {Yi Qiao and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao03EKB.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao03EKB.ppt}, year = {2003}, date = {2003-10-03}, journal = {19th Symposium on Operating Systems Principles}, abstract = {The transiency of peer population and its implications on peer-to-peer (P2P) applications are increasingly calling the attention of the research community. As undesireable as unavoidable, peers transiency could negate many of the appealing features of the P2P approach. We are exploring new P2P protocols and strategies that, by considering peers' lifespan a key attribute, can greatly boost the stability, efficiency and scalability of these systems. This work-in-progress briefly discusses our appraoch and presents some initial results.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The transiency of peer population and its implications on peer-to-peer (P2P) applications are increasingly calling the attention of the research community. As undesireable as unavoidable, peers transiency could negate many of the appealing features of the P2P approach. We are exploring new P2P protocols and strategies that, by considering peers' lifespan a key attribute, can greatly boost the stability, efficiency and scalability of these systems. This work-in-progress briefly discusses our appraoch and presents some initial results. |
Fabián E. Bustamante, Yi Qiao Friendships that last: Peer lifespan and its role in P2P protocols Journal Article In Proc. of the International Workshop on Web Content Caching and Distribution, 2003. @article{PLP2PP, title = {Friendships that last: Peer lifespan and its role in P2P protocols}, author = {Fabián E. Bustamante and Yi Qiao}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante03FLPL.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante03FLPL-2.ppt}, year = {2003}, date = {2003-09-03}, journal = {In Proc. of the International Workshop on Web Content Caching and Distribution}, abstract = {We consider the problem of choosing who to "befriend" among a collection of known peers in distributed P2P systems. In particular, our work explores a number of P2P protocols that, by considering peers' lifespan distribution a key attribute, can yield systems with performance characteristics more resilient to the natural instability of their environments. This article presents results from our initial efforts, focusing on currently deployed decentralized P2P systems. We measure the observed lifespan of more than 500,000 peers in a popular P2P system for over a week and propose a functional form that fits the distribution well. We consider a number of P2P protocols based on this distribution, and use a trace-driven simulator to compare them against alternative protocols for decentralized and unstructured or loosely-structured P2P systems. We find that simple lifespan-based protocols can reduce the ratio of connection breakdowns and their associated costs by over 42%. }, keywords = {}, pubstate = {published}, tppubtype = {article} } We consider the problem of choosing who to "befriend" among a collection of known peers in distributed P2P systems. In particular, our work explores a number of P2P protocols that, by considering peers' lifespan distribution a key attribute, can yield systems with performance characteristics more resilient to the natural instability of their environments. This article presents results from our initial efforts, focusing on currently deployed decentralized P2P systems. We measure the observed lifespan of more than 500,000 peers in a popular P2P system for over a week and propose a functional form that fits the distribution well. We consider a number of P2P protocols based on this distribution, and use a trace-driven simulator to compare them against alternative protocols for decentralized and unstructured or loosely-structured P2P systems. We find that simple lifespan-based protocols can reduce the ratio of connection breakdowns and their associated costs by over 42%. |
Yi Qiao, Fabián E. Bustamante The effect of lasting friendships in P2P protocols Technical Report Department of Computer Science, Northwestern University (NWU-CS-03-23 ), 2003. @techreport{FP2PP, title = {The effect of lasting friendships in P2P protocols}, author = {Yi Qiao and Fabián E. Bustamante}, year = {2003}, date = {2003-02-03}, number = {NWU-CS-03-23 }, institution = {Department of Computer Science, Northwestern University}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
Stefan Birrer, Fabián E. Bustamante Resilient Peer-to-Peer Multicast from the Ground Up Technical Report Department of Computer Science, Northwestern University (NWU-CS-03-22), 2003. @techreport{RP2PMGU, title = {Resilient Peer-to-Peer Multicast from the Ground Up}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-03-22.pdf}, year = {2003}, date = {2003-01-03}, number = {NWU-CS-03-22}, institution = { Department of Computer Science, Northwestern University}, abstract = {One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of churn inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show how Nemo can achieve delivery ratios similar to those of comparable protocols (up to 99.98%) under different failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 85%) and control-related traffic. }, keywords = {}, pubstate = {published}, tppubtype = {techreport} } One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of churn inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show how Nemo can achieve delivery ratios similar to those of comparable protocols (up to 99.98%) under different failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 85%) and control-related traffic. |
2002 |
Greg Eisenhauer, Fabián Bustamante, Karsten Schwan Native Data Representation: An Efficient Wire Format for High-Performance Computing Technical Report IEEE Transactions on Parallel and Distributed Systems (13(12): 1234-1246), 2002. @techreport{NDRb, title = { Native Data Representation: An Efficient Wire Format for High-Performance Computing}, author = {Greg Eisenhauer and Fabián Bustamante and Karsten Schwan}, year = {2002}, date = {2002-12-03}, journal = {IEEE Transactions on Parallel and Distributed Systems}, number = {13(12): 1234-1246}, institution = {IEEE Transactions on Parallel and Distributed Systems}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
Fabián E. Bustamante, Patrick Widener, Karsten Schwan Scalable Directory Services Using Proactivity Journal Article In Proc. of Supercomputing , 2002. @article{SDSUP, title = { Scalable Directory Services Using Proactivity}, author = {Fabián E. Bustamante and Patrick Widener and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante02SDS.pdf}, year = {2002}, date = {2002-11-03}, journal = {In Proc. of Supercomputing }, abstract = {Common to computational grids and pervasive computing is the need for an expressive, efficient, and scalable directory service that provides information about objects in the environment. We argue that a directory interface that pushes information to clients about changes to objects can significantly improve scalability. This paper describes the design, implementation, and evaluation of the Proactive Directory Service (PDS). PDS interface supports a customizable proactive mode through which clients can subscribe to be notified about changes to their objects of interest. Clients can dynamically tune the detail and granularity of these notifications through filter functions instantiated at the server or at the object's owner, and by remotely tuning the functionality of those filters. We compare PDS performance against off-the-shelf implementations of DNS and the Lightweight Directory Access Protocol. Our evaluation results confirm the expected performance advantages of this approach and demonstrate that customized notification through filter functions can reduce bandwidth utilization while improving the performance of both clients and directory servers.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Common to computational grids and pervasive computing is the need for an expressive, efficient, and scalable directory service that provides information about objects in the environment. We argue that a directory interface that pushes information to clients about changes to objects can significantly improve scalability. This paper describes the design, implementation, and evaluation of the Proactive Directory Service (PDS). PDS interface supports a customizable proactive mode through which clients can subscribe to be notified about changes to their objects of interest. Clients can dynamically tune the detail and granularity of these notifications through filter functions instantiated at the server or at the object's owner, and by remotely tuning the functionality of those filters. We compare PDS performance against off-the-shelf implementations of DNS and the Lightweight Directory Access Protocol. Our evaluation results confirm the expected performance advantages of this approach and demonstrate that customized notification through filter functions can reduce bandwidth utilization while improving the performance of both clients and directory servers. |
Fabián E. Bustamante, Christian Poellabauer, Karsten Schwan AIMS: Robustness Through Sensible Introspection Journal Article In Proc. of the 10th ACM SIGOPS European Workshop, 2002. @article{AIMS, title = {AIMS: Robustness Through Sensible Introspection}, author = {Fabián E. Bustamante and Christian Poellabauer and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante02AIMS.pdf}, year = {2002}, date = {2002-09-03}, journal = {In Proc. of the 10th ACM SIGOPS European Workshop}, abstract = {Our society increasingly relies on dependable complex computing systems. To be useful, dependable systems must also be robust when facing unpredictable changes to their operating environments. Introspection has proven to be a helpful approach in the design of dynamically adaptable computing systems. We argue that, for robustness, the intro-spective component itself needs to be dynamically adaptive since (i) it is effectively impossible to predict all information needed for introspection, (ii) even if we try, no introspective system will be able to manage the amount of data necessary to select the right adaptation to an overwhelming number of possible system conditions, and (iii) the right adaptation may be situation dependent as well. At Georgia Tech we are exploring the idea of dynamically adaptive introspective components for future systems. To this end, we are building AIMS,an Adaptive Introspective Management System through which monitoring probes (or agents) can be (un-)installed at runtime, their execution can be finely tuned dynamically, and the processing done on the collected data can be changed as needed.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Our society increasingly relies on dependable complex computing systems. To be useful, dependable systems must also be robust when facing unpredictable changes to their operating environments. Introspection has proven to be a helpful approach in the design of dynamically adaptable computing systems. We argue that, for robustness, the intro-spective component itself needs to be dynamically adaptive since (i) it is effectively impossible to predict all information needed for introspection, (ii) even if we try, no introspective system will be able to manage the amount of data necessary to select the right adaptation to an overwhelming number of possible system conditions, and (iii) the right adaptation may be situation dependent as well. At Georgia Tech we are exploring the idea of dynamically adaptive introspective components for future systems. To this end, we are building AIMS,an Adaptive Introspective Management System through which monitoring probes (or agents) can be (un-)installed at runtime, their execution can be finely tuned dynamically, and the processing done on the collected data can be changed as needed. |
Patrick Widener, Greg Eisenhauer, Karsten Schwan, Fabián E. Bustamante Open Metadata Formats: Efficient XML-Based Communication for High Performance Computing Technical Report Cluster Computing (5(3):315-324), 2002. @techreport{OMF, title = {Open Metadata Formats: Efficient XML-Based Communication for High Performance Computing}, author = {Patrick Widener and Greg Eisenhauer and Karsten Schwan and Fabián E. Bustamante}, year = {2002}, date = {2002-07-03}, number = {5(3):315-324}, institution = {Cluster Computing}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
2001 |
Fabián E. Bustamante The Active Streams Approach to Adaptive Distributed Applications And Services PhD Thesis Georgia Institute of Technology, 2001. @phdthesis{ASAADAS, title = {The Active Streams Approach to Adaptive Distributed Applications And Services}, author = {Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/astreams-thesis.pdf}, year = {2001}, date = {2001-11-20}, school = {Georgia Institute of Technology}, abstract = {The widespread deployment of inexpensive communication technologies, computational resources in the networking infrastructure, and network-capable end devices offers a rich design space for novel distributed applications and services. Exploration of this space has given rise, for instance, to the notions of grid and peer-to-peer computing. Both technologies promise to change the way we think about and use computing, by harvesting geographically distributed resources in order to create a universal source of pervasive computing power that will support new classes of applications. Despite the growing interest in these new environments and the increasing availability of the necessary hardware and network infrastructure, few actual applications are readily available and/or widely deployed. Such scarcity results from a number of technical challenges that must be addressed before the full potential of these technologies can be realized. Most of these applications, as well as the services they utilize, are expected to handle dynamically varying demand on resources and to run in large, heterogeneous, and dynamic environments, where the availability of resources cannot be guaranteed `a priori' -- all of this while providing acceptable levels of performance. To support such requirements, we believe that new services need to be customizable, applications need to be dynamically extensible, and both applications and services need to be able to adapt to variations in resources' availability and demand. The Active Streams approach, advocated in this dissertation, aims to facilitate the task of building new distributed systems with these characteristics. To this end, the approach considers the contents of the information flowing across the application and its services, it adopts a component-based model to application/service programming, and it provides for dynamic adaptation at multiple levels and points in the underlying platform. In addition, due to the complexity of building such systems, it tries to ease the programmer's task by facilitating the needed infrastructure for resource monitoring, self-monitoring and adaptation. This dissertation explores the Active Streams approach and its supporting framework in the context of these new distributed applications and services.}, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } The widespread deployment of inexpensive communication technologies, computational resources in the networking infrastructure, and network-capable end devices offers a rich design space for novel distributed applications and services. Exploration of this space has given rise, for instance, to the notions of grid and peer-to-peer computing. Both technologies promise to change the way we think about and use computing, by harvesting geographically distributed resources in order to create a universal source of pervasive computing power that will support new classes of applications. Despite the growing interest in these new environments and the increasing availability of the necessary hardware and network infrastructure, few actual applications are readily available and/or widely deployed. Such scarcity results from a number of technical challenges that must be addressed before the full potential of these technologies can be realized. Most of these applications, as well as the services they utilize, are expected to handle dynamically varying demand on resources and to run in large, heterogeneous, and dynamic environments, where the availability of resources cannot be guaranteed `a priori' -- all of this while providing acceptable levels of performance. To support such requirements, we believe that new services need to be customizable, applications need to be dynamically extensible, and both applications and services need to be able to adapt to variations in resources' availability and demand. The Active Streams approach, advocated in this dissertation, aims to facilitate the task of building new distributed systems with these characteristics. To this end, the approach considers the contents of the information flowing across the application and its services, it adopts a component-based model to application/service programming, and it provides for dynamic adaptation at multiple levels and points in the underlying platform. In addition, due to the complexity of building such systems, it tries to ease the programmer's task by facilitating the needed infrastructure for resource monitoring, self-monitoring and adaptation. This dissertation explores the Active Streams approach and its supporting framework in the context of these new distributed applications and services. |
Fabián E. Bustamante, Patrick Widener, Karsten Schwan The Case for Proactive Directory Services Technical Report Poster in Proc. of Supercomputing , 2001. @techreport{PDS, title = {The Case for Proactive Directory Services}, author = {Fabián E. Bustamante and Patrick Widener and Karsten Schwan}, year = {2001}, date = {2001-11-10}, journal = {Poster in Proc. of Supercomputing}, volume = {SC - 2001}, institution = {Poster in Proc. of Supercomputing}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
Fabián E. Bustamante, Greg Eisenhauer, Karsten Schwan, Patrick Widener Active Streams and the effects of stream specialization Journal Article Poster in Proc. of Tenth International Symposium on High Performance Distributed Computing, HPDC-2001 , 2001. @article{ASESS, title = {Active Streams and the effects of stream specialization}, author = {Fabián E. Bustamante and Greg Eisenhauer and Karsten Schwan and Patrick Widener}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante01ASE.pdf}, year = {2001}, date = {2001-08-03}, journal = {Poster in Proc. of Tenth International Symposium on High Performance Distributed Computing}, volume = {HPDC-2001}, abstract = {The explosive growth of the Internet, with the emergence of new networking technologies and the increasing number of network-capable end devices, is paving the way to a number of novel distributed applications and services. Cooperative distributed systems have become a common computing model and pervasive computing has caught the interest of academia and industry. The realization of these types of applications is complicated by the characteristics of their target environments, including their heterogeneous nature as well as the dynamically varying demands on and availability of their resources. Dynamic variations in resource usage are due to applications' data dependencies and/or users' dynamic behaviors, while the run-time variation in resource availability is a consequence of failures, resource additions or removals, and most importantly, contention for shared resources. This poster presents Active Streams, a middleware approach and its associated framework for building such novel distributed applications and services. It reports our initial results in understanding the effects of stream specialization through streamlets, demonstrating experimentally the potential improvements in latency (3-6X) and CPU utilization (up to 6X) derived from migrating streamlets `up' a stream, as well as the need for intermediate computational units.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The explosive growth of the Internet, with the emergence of new networking technologies and the increasing number of network-capable end devices, is paving the way to a number of novel distributed applications and services. Cooperative distributed systems have become a common computing model and pervasive computing has caught the interest of academia and industry. The realization of these types of applications is complicated by the characteristics of their target environments, including their heterogeneous nature as well as the dynamically varying demands on and availability of their resources. Dynamic variations in resource usage are due to applications' data dependencies and/or users' dynamic behaviors, while the run-time variation in resource availability is a consequence of failures, resource additions or removals, and most importantly, contention for shared resources. This poster presents Active Streams, a middleware approach and its associated framework for building such novel distributed applications and services. It reports our initial results in understanding the effects of stream specialization through streamlets, demonstrating experimentally the potential improvements in latency (3-6X) and CPU utilization (up to 6X) derived from migrating streamlets `up' a stream, as well as the need for intermediate computational units. |
Fabián E. Bustamante, Greg Eisenhauer, Patrick Widener, Karsten Schwan, Calton Pu Active Streams: An approach to adaptive distributed systems Journal Article In Proc. 8th Workshop on Hot Topics in Operating Systems, HotOS-VIII , 2001. @article{ASADS, title = {Active Streams: An approach to adaptive distributed systems}, author = {Fabián E. Bustamante and Greg Eisenhauer and Patrick Widener and Karsten Schwan and Calton Pu}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante01ASA.pdf}, year = {2001}, date = {2001-05-03}, journal = {In Proc. 8th Workshop on Hot Topics in Operating Systems}, volume = {HotOS-VIII}, abstract = {An increasing number of distributed applications aim to provide services to users by interacting with a correspondingly growing set of data-intensive network services. Such applications, as well as the services they utilize, are generally expected to handle dynamically varying demands on resources and to run in large, heterogeneous, and dynamic environments, where the availability of resources cannot be guaranteed a priori -- all of this while providing acceptable levels of performance. To support such requirements, we believe that new services need to be customizable, applications need to be dynamically extensible, and both applications and services need to be able to adapt to variations in resource availability and demand. A comprehensive approach to building new distributed applications can facilitate this by considering the contents of the information flowing across the application and its services and by adopting a component-based model to application/service programming. It should provide for dynamic adaptation at multiple levels and points in the underlying platform; and, since the mapping of components to resources in dynamic environment is too complicated, it should relieve programmers of this task. We propose Active Streams, a middleware approach and its associated framework for building distributed applications and services that exhibit these characteristics.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An increasing number of distributed applications aim to provide services to users by interacting with a correspondingly growing set of data-intensive network services. Such applications, as well as the services they utilize, are generally expected to handle dynamically varying demands on resources and to run in large, heterogeneous, and dynamic environments, where the availability of resources cannot be guaranteed a priori -- all of this while providing acceptable levels of performance. To support such requirements, we believe that new services need to be customizable, applications need to be dynamically extensible, and both applications and services need to be able to adapt to variations in resource availability and demand. A comprehensive approach to building new distributed applications can facilitate this by considering the contents of the information flowing across the application and its services and by adopting a component-based model to application/service programming. It should provide for dynamic adaptation at multiple levels and points in the underlying platform; and, since the mapping of components to resources in dynamic environment is too complicated, it should relieve programmers of this task. We propose Active Streams, a middleware approach and its associated framework for building distributed applications and services that exhibit these characteristics. |
Greg Eisenhauer, Fabián E. Bustamante, Karsten Schwan Event Services in High Performance Systems Technical Report Cluster Computing (4(3):243-252 ), 2001. @techreport{ESHPS, title = {Event Services in High Performance Systems}, author = {Greg Eisenhauer, Fabián E. Bustamante and Karsten Schwan}, year = {2001}, date = {2001-05-03}, number = {4(3):243-252 }, institution = {Cluster Computing}, abstract = {The Internet and the Grid are changing the face of high performance computing. Rather than tightly-coupled SPMD-style components running in a single cluster, on a parallel machine, or even on the Internet programmed in MPI, applications are evolving into sets of cooperating components scattered across diverse computational elements. These components may run on different operating systems and hardware platforms and may be written by different organizations in different languages. Complete “applications†are constructed by assembling these components in a plug-and-play fashion. This new vision for high performance computing demands features and characteristics not easily provided by traditional high-performance communications middleware. In response to these needs, we have developed ECho, a high-performance event-delivery middleware that meets the new demands of the Grid environment. ECho provides efficient binary transmission of event data with unique features that support data-type discovery and enterprise-scale application evolution. We present measurements detailing ECho's performance to show that ECho significantly outperforms other systems intended to provide this functionality and provides throughput and latency comparable to the most efficient middleware infrastructures available.}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } The Internet and the Grid are changing the face of high performance computing. Rather than tightly-coupled SPMD-style components running in a single cluster, on a parallel machine, or even on the Internet programmed in MPI, applications are evolving into sets of cooperating components scattered across diverse computational elements. These components may run on different operating systems and hardware platforms and may be written by different organizations in different languages. Complete “applications†are constructed by assembling these components in a plug-and-play fashion. This new vision for high performance computing demands features and characteristics not easily provided by traditional high-performance communications middleware. In response to these needs, we have developed ECho, a high-performance event-delivery middleware that meets the new demands of the Grid environment. ECho provides efficient binary transmission of event data with unique features that support data-type discovery and enterprise-scale application evolution. We present measurements detailing ECho's performance to show that ECho significantly outperforms other systems intended to provide this functionality and provides throughput and latency comparable to the most efficient middleware infrastructures available. |
Greg Eisenhauer, Fabián E. Bustamante, Karsten Schwan A Middleware Toolkit for Client-Initiated Service Specialization Journal Article ACM SIGOPS, 35 (2), 2001. @article{MTCIS, title = {A Middleware Toolkit for Client-Initiated Service Specialization}, author = {Greg Eisenhauer and Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Eisenhauer00MTC.pdf}, year = {2001}, date = {2001-04-03}, journal = {ACM SIGOPS}, volume = {35}, number = {2}, abstract = {As the Internet matures, streaming data services are taking an increasingly important place alongside traditional HTTP transactions. The need to dynamically adjust the delivery of such services to changes in available network and processing resources has spawned substantial research on application-specific methods for dynamic adaptation, including video and audio streaming applications. Such adaptation techniques are well developed, but they are also highly specialized, with the client (receiver) and server (sender) implementing well-defined protocols that exploit content-specific stream properties. This paper describes our efforts to bring the benefits of such content-aware, application-level service adaptation to all types of streaming data and to do so in a manner that is efficient and flexible. Our contribution in this domain is ECho, a high-performance event-delivery middleware system. ECho's basic functionality provides efficient binary transmission of event data with unique features that support dynamic data-type discovery and service evolution. ECho's contribution to data stream adaptation is in the mechanisms it provides for its clients to customize their data flows through type-safe dynamic server extension.}, keywords = {}, pubstate = {published}, tppubtype = {article} } As the Internet matures, streaming data services are taking an increasingly important place alongside traditional HTTP transactions. The need to dynamically adjust the delivery of such services to changes in available network and processing resources has spawned substantial research on application-specific methods for dynamic adaptation, including video and audio streaming applications. Such adaptation techniques are well developed, but they are also highly specialized, with the client (receiver) and server (sender) implementing well-defined protocols that exploit content-specific stream properties. This paper describes our efforts to bring the benefits of such content-aware, application-level service adaptation to all types of streaming data and to do so in a manner that is efficient and flexible. Our contribution in this domain is ECho, a high-performance event-delivery middleware system. ECho's basic functionality provides efficient binary transmission of event data with unique features that support dynamic data-type discovery and service evolution. ECho's contribution to data stream adaptation is in the mechanisms it provides for its clients to customize their data flows through type-safe dynamic server extension. |
Greg Eisenhauer, Fabián E. Bustamante, Karsten Schwan Native Data Representation: An Efficient Wire Format for High Performance Computing Technical Report College of Computing, Georgia Institute of Technology (GIT-CC-01-18), 2001. @techreport{NDR, title = {Native Data Representation: An Efficient Wire Format for High Performance Computing}, author = {Greg Eisenhauer and Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/GIT-CC-01-18.pdf}, year = {2001}, date = {2001-01-03}, number = {GIT-CC-01-18}, institution = {College of Computing, Georgia Institute of Technology}, abstract = {New trends in high-performance software development such as tool- and component-based approaches have increased the need for flexible and high-performance communication systems. High-performance computing applications are being integrated with a variety of software tools to allow on-line remote data visualization, enable real-time interaction with remote sensors and instruments, or provide novel environments for human collaboration. There has also been a growing interest among high-performance researchers in component-based approaches, in an attempt to facilitate software evolution and promote software reuse. When trying to reap the well-known benefits of these approaches, the question of what communications infrastructure should be used to link the various components arises. In this context, flexibility and high-performance seem to be incompatible goals. Traditional HPC-style communication libraries, such as MPI, offer good performance, but are not intended for loosely-coupled systems. Object- and metadata-based approaches like XML offer the needed plug-and-play flexibility, but with significantly lower performance. We observe that the flexibility and baseline performance of data exchange systems are strongly determined by their wire formats, or by how they represent data for transmission in the heterogeneous environments. Upon examining the performance implications of using a number of different wire formats, we propose an alternative approach for flexible high-performance data exchange, Native Data Representation, and evaluate its current implementation in the Portable Binary I/O library.}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } New trends in high-performance software development such as tool- and component-based approaches have increased the need for flexible and high-performance communication systems. High-performance computing applications are being integrated with a variety of software tools to allow on-line remote data visualization, enable real-time interaction with remote sensors and instruments, or provide novel environments for human collaboration. There has also been a growing interest among high-performance researchers in component-based approaches, in an attempt to facilitate software evolution and promote software reuse. When trying to reap the well-known benefits of these approaches, the question of what communications infrastructure should be used to link the various components arises. In this context, flexibility and high-performance seem to be incompatible goals. Traditional HPC-style communication libraries, such as MPI, offer good performance, but are not intended for loosely-coupled systems. Object- and metadata-based approaches like XML offer the needed plug-and-play flexibility, but with significantly lower performance. We observe that the flexibility and baseline performance of data exchange systems are strongly determined by their wire formats, or by how they represent data for transmission in the heterogeneous environments. Upon examining the performance implications of using a number of different wire formats, we propose an alternative approach for flexible high-performance data exchange, Native Data Representation, and evaluate its current implementation in the Portable Binary I/O library. |
2000 |
Fabián E. Bustamante, Greg Eisenhauer, Karsten Schwan, Patrick Widener Efficient Wire Formats for High Performance Computing Journal Article In Proc. of Supercomputing (SC), 2000. @article{EWFHPC, title = {Efficient Wire Formats for High Performance Computing}, author = {Fabián E. Bustamante and Greg Eisenhauer and Karsten Schwan and Patrick Widener}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante00EWF.pdf}, year = {2000}, date = {2000-11-03}, journal = {In Proc. of Supercomputing (SC)}, abstract = {High performance computing is being increasingly utilized in non-traditional circumstances where it must interoperate with other applications. For example, online visualization is being used to monitor the progress of applications, and real-world sensors are used as inputs to simulations. Whenever these situations arise, there is a question of what communications infrastructure should be used to link the different components. Traditional HPC-style communications systems such as MPI offer relatively high performance, but are poorly suited for developing these less tightly-coupled cooperating applications. Object-based systems and meta-data formats like XML offer substantial plug-and-play flexibility, but with substantially lower performance. We observe that the flexibility and baseline performance of all these systems is strongly determined by their `wire format', or how they represent data for transmission in a heterogeneous environment. We examine the performance implications of different wire formats and present an alternative with significant advantages in terms of both performance and flexibility.}, keywords = {}, pubstate = {published}, tppubtype = {article} } High performance computing is being increasingly utilized in non-traditional circumstances where it must interoperate with other applications. For example, online visualization is being used to monitor the progress of applications, and real-world sensors are used as inputs to simulations. Whenever these situations arise, there is a question of what communications infrastructure should be used to link the different components. Traditional HPC-style communications systems such as MPI offer relatively high performance, but are poorly suited for developing these less tightly-coupled cooperating applications. Object-based systems and meta-data formats like XML offer substantial plug-and-play flexibility, but with substantially lower performance. We observe that the flexibility and baseline performance of all these systems is strongly determined by their `wire format', or how they represent data for transmission in a heterogeneous environment. We examine the performance implications of different wire formats and present an alternative with significant advantages in terms of both performance and flexibility. |
Greg Eisenhauer, Fabián E. Bustamante, Karsten Schwan Event Services for High Performance Computing Journal Article In Proc. of Ninth International Symposium on High Performance Distributed Computing, HPDC-2000 , 2000. @article{ESHPC, title = {Event Services for High Performance Computing}, author = {Greg Eisenhauer and Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Eisenhauer00ESH.pdf}, year = {2000}, date = {2000-08-03}, journal = {In Proc. of Ninth International Symposium on High Performance Distributed Computing}, volume = {HPDC-2000}, abstract = {The Internet and the Grid are changing the face of high performance computing. Rather than tightly-coupled SPMD-style components running in a single cluster, on a parallel machine, or even on the Internet programmed in MPI, applications are evolving into sets of collaborating elements scattered across diverse computational elements. These collaborating components may run on different operating systems and hardware platforms and may be written by different organizations in different languages. Complete ``applications'' are constructed by assembling these components in a plug-and-play fashion. This new vision for high performance computing demands features and characteristics not easily provided by traditional high-performance communications middleware. In response to these needs, we have developed ECho, a high-performance event-delivery middleware that meets the new demands of the Grid environment. ECho provides efficient binary transmission of event data with unique features that support data-type discovery and enterprise-scale application evolution. We present measurements detailing ECho's performance to show that ECho significantly outperforms other systems intended to provide this functionality and provides throughput and latency comparable to the most efficient middleware infrastructures available.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The Internet and the Grid are changing the face of high performance computing. Rather than tightly-coupled SPMD-style components running in a single cluster, on a parallel machine, or even on the Internet programmed in MPI, applications are evolving into sets of collaborating elements scattered across diverse computational elements. These collaborating components may run on different operating systems and hardware platforms and may be written by different organizations in different languages. Complete ``applications'' are constructed by assembling these components in a plug-and-play fashion. This new vision for high performance computing demands features and characteristics not easily provided by traditional high-performance communications middleware. In response to these needs, we have developed ECho, a high-performance event-delivery middleware that meets the new demands of the Grid environment. ECho provides efficient binary transmission of event data with unique features that support data-type discovery and enterprise-scale application evolution. We present measurements detailing ECho's performance to show that ECho significantly outperforms other systems intended to provide this functionality and provides throughput and latency comparable to the most efficient middleware infrastructures available. |
Fabián E. Bustamante Pacioli: A Framework for Model Construction Technical Report Storage Systems Program, Computer Systems Laboratory, Hewlett-Packard Laboratory 2000. @techreport{Pacioli, title = {Pacioli: A Framework for Model Construction}, author = {Fabián E. Bustamante}, year = {2000}, date = {2000-02-03}, institution = {Storage Systems Program, Computer Systems Laboratory, Hewlett-Packard Laboratory}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
1999 |
Fabián E. Bustamante, Karsten Schwan Active I/O Streams for Heterogeneous High Performance Computing Conference Proc. of Parallel Computing (ParCo), 1999. @conference{AcitiveIOHHPC, title = {Active I/O Streams for Heterogeneous High Performance Computing}, author = {Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante99AIOS.pdf}, year = {1999}, date = {1999-03-03}, publisher = {Proc. of Parallel Computing (ParCo)}, abstract = {We are concerned with the attainment of high performance in I/O on distributed, heterogeneous hardware. Our approach is to combine a program's data retrieval and storage actions with operations executed on the resulting active I/O streams. Performance improvements are attained by exploitation of information about these operations and by runtime changes to their behavior and placement. In this fashion, active I/O can adjust to static system properties derived from the heterogeneous nature of distributed CPU, storage, and network devices, and it can respond to dynamic changes in system's conditions, thereby reducing the total bandwidth needs and/or the end-to-end latencies of I/O actions. Our prototype of an active I/O system, called Adios, implements I/O as a directed network comprised of streams originating at sources, destined for sinks, and routed through a number of intermediate vertices that act on the data units traversing the stream. Adaptive resource allocation methods based on this model are under development, with the goal of improving I/O performance of complex parallel programs running in shared heterogeneous computing environments.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } We are concerned with the attainment of high performance in I/O on distributed, heterogeneous hardware. Our approach is to combine a program's data retrieval and storage actions with operations executed on the resulting active I/O streams. Performance improvements are attained by exploitation of information about these operations and by runtime changes to their behavior and placement. In this fashion, active I/O can adjust to static system properties derived from the heterogeneous nature of distributed CPU, storage, and network devices, and it can respond to dynamic changes in system's conditions, thereby reducing the total bandwidth needs and/or the end-to-end latencies of I/O actions. Our prototype of an active I/O system, called Adios, implements I/O as a directed network comprised of streams originating at sources, destined for sinks, and routed through a number of intermediate vertices that act on the data units traversing the stream. Adaptive resource allocation methods based on this model are under development, with the goal of improving I/O performance of complex parallel programs running in shared heterogeneous computing environments. |
1998 |
Asmara Afework, Michael Benyon, Fabián E. Bustamante, Angelo DeMarzo, Renato Ferreira, Robert Miller, Mark Silberman, Joel Saltz, Alan Sussman, Hubert Tsang Digital Dynamic Telepathology - the Virtual Microscope Conference Proc. of the 1998 AMIA Annual Fall Symposium, 1998. @conference{DDT-TVM, title = {Digital Dynamic Telepathology - the Virtual Microscope}, author = {Asmara Afework and Michael Benyon and Fabián E. Bustamante and Angelo DeMarzo and Renato Ferreira and Robert Miller and Mark Silberman and Joel Saltz and Alan Sussman and Hubert Tsang}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante98DDT.pdf}, year = {1998}, date = {1998-08-03}, publisher = {Proc. of the 1998 AMIA Annual Fall Symposium}, abstract = {The Virtual Microscope is being designed as an integrated computer hardware and software system that generates a highly realistic digital simulation of analog, mechanical light microscopy. We present our work over the past year in meeting the challenges in building such a system. The enhancements we made are discussed, as well as the planned future improvements. Performance results are provided that show that the system scales well, so that many clients can be adequately serviced by an appropriately configured data server.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The Virtual Microscope is being designed as an integrated computer hardware and software system that generates a highly realistic digital simulation of analog, mechanical light microscopy. We present our work over the past year in meeting the challenges in building such a system. The enhancements we made are discussed, as well as the planned future improvements. Performance results are provided that show that the system scales well, so that many clients can be adequately serviced by an appropriately configured data server. |
1997 |
Mustaque Ahamad, Raja Das, Karsten Schwan, Sumeer Bhola, Fabián E. Bustamante, Greg Eisenhauer, Jeremy Heiner, Vijaykumar Krishnaswamy, Todd Rose, Beth Schroeder, Dong Zhou Agent and Object Technologies for High-end Collaborative Applications Conference Proc. of the 1997 Dartmouth Workshop on Transportable Agents, 1997. @conference{High-endcollaborative, title = {Agent and Object Technologies for High-end Collaborative Applications}, author = {Mustaque Ahamad and Raja Das and Karsten Schwan and Sumeer Bhola and Fabián E. Bustamante and Greg Eisenhauer and Jeremy Heiner and Vijaykumar Krishnaswamy and Todd Rose and Beth Schroeder and Dong Zhou}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Ahamad97AOT.pdf}, year = {1997}, date = {1997-02-14}, publisher = {Proc. of the 1997 Dartmouth Workshop on Transportable Agents}, abstract = {Complex distributed collaborative applications have rich computational and communication needs that cannot easily be met by the currently available web based software infrastructure. In this position paper, we claim that to address the needs of such highly demanding applications, it is necessary to develop an integrated framework that both supports high performance executions via distributed objects and makes use of agent based computations to address dynamic application behavior, mobility, and security needs. Specifically, we claim that based on application needs and resource availability, it should be possible for an application to switch at runtime between the remote invocation and evaluation mechanisms of the object and agent technologies being employed. To support such dynamically configurable applications, we identify several issues that arise for the required integrated object-agent system. These include: (1) system support for agent and object executions and (2) the efficient execution of agents and high performance object implementations using performance techniques like caching, replication, and fragmentation of the state being accessed and manipulated. We are currently developing a system supporting high end collaborative applications.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Complex distributed collaborative applications have rich computational and communication needs that cannot easily be met by the currently available web based software infrastructure. In this position paper, we claim that to address the needs of such highly demanding applications, it is necessary to develop an integrated framework that both supports high performance executions via distributed objects and makes use of agent based computations to address dynamic application behavior, mobility, and security needs. Specifically, we claim that based on application needs and resource availability, it should be possible for an application to switch at runtime between the remote invocation and evaluation mechanisms of the object and agent technologies being employed. To support such dynamically configurable applications, we identify several issues that arise for the required integrated object-agent system. These include: (1) system support for agent and object executions and (2) the efficient execution of agents and high performance object implementations using performance techniques like caching, replication, and fragmentation of the state being accessed and manipulated. We are currently developing a system supporting high end collaborative applications. |
Fabián E. Bustamante, Richard M. Fujimoto An Empirical Comparison of Time Warp and the NPSI Elastic Time Protocol Technical Report College of Computing, Georgia Institute of Technology, (GIT-CC-97-13), 1997. @techreport{Emerical-Comparison, title = {An Empirical Comparison of Time Warp and the NPSI Elastic Time Protocol}, author = {Fabián E. Bustamante and Richard M. Fujimoto}, year = {1997}, date = {1997-02-06}, number = {GIT-CC-97-13}, address = {College of Computing, Georgia Institute of Technology}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
Publications
2005 |
Characterizing and Predicting TCP Throughput on the Wide Area Network Journal Article In Proc. of the 25th IEEE International Conference on Distributed Computing Systems, 2005. |
FatNemo: Multi-Source Multicast Overlay Fat-Tree Journal Article Poster in Proc. of the Second Symposium on Networked Systems Design & Implementation, 2005. |
Magnolia: A novel DHT architecture for keyword-based searching Journal Article In Proc. of the Second Symposium on Networked Systems Design & Implementation , 2005. |
Modeling and Taming Parallel TCP on the Wide Area Network Journal Article In Proc. of the 19th IEEE International Parallel and Distributed Processing Symposium, 2005. |
Nemo: Resilient Peer-to-Peer Multicast without the Cost Journal Article In Proc. of the 12th Annual Multimedia Computing and Networking Conference, 2005. |
Reef: Efficiently designing and evaluating overlay algorithms Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-14), 2005. |
Modeling Vehicular Traffic and Mobility for Vehicular Wireless Networks Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-03), 2005. |
2004 |
Looking at the Server-Side of Peer-to-Peer Systems Journal Article In Proc. of the 7th Workshop on Languages, Compilers and Run-time Support for Scalable Systems, 2004. |
Applications of SRPT Scheduling with Inaccurate Information Journal Article Poster in Proc. of the 12th IEEE/ACM International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS), 2004. |
FatNemo: Building a Resilient Multi-Source Multicast Fat-Tree Journal Article In Proc. of the Ninth International Workshop on Web Content Caching and Distribution, 2004. |
Resilient Peer-to-Peer Multicast from the Ground Up Journal Article In Proc. of the IEEE Network Computing and Applications - Workshop on Adaptive Grid Computing, 2004. |
Wayback: A User-level Versioning File System for Linux Journal Article In Proc. of USENIX Annual Technical Conference, FREENIX Track ( Best Paper Award ), 2004. |
Can We Trust ICMP Measurements? Technical Report Department of Computer Science, Northwestern University (NWU-CS-04-48), 2004. |
Looking at the Server-Side of Peer-to-Peer Systems Technical Report Department of Computer Science, Northwestern University ( NWU-CS-04-37), 2004. |
Nemo: Resilient Peer-to-Peer Multicast without the Cost Technical Report Department of Computer Science, Northwestern University (NWU-CS-04-36), 2004. |
2003 |
Differential Data Protection for Dynamic Distributed Applications Journal Article In Proc. of the 19th Annual Computer Security Applications Conference, 2003. |
Elders Know Best: Lifespan-Based Ideas in P2P Systems Journal Article 19th Symposium on Operating Systems Principles, 2003. |
Friendships that last: Peer lifespan and its role in P2P protocols Journal Article In Proc. of the International Workshop on Web Content Caching and Distribution, 2003. |
The effect of lasting friendships in P2P protocols Technical Report Department of Computer Science, Northwestern University (NWU-CS-03-23 ), 2003. |
Resilient Peer-to-Peer Multicast from the Ground Up Technical Report Department of Computer Science, Northwestern University (NWU-CS-03-22), 2003. |
2002 |
Native Data Representation: An Efficient Wire Format for High-Performance Computing Technical Report IEEE Transactions on Parallel and Distributed Systems (13(12): 1234-1246), 2002. |
Scalable Directory Services Using Proactivity Journal Article In Proc. of Supercomputing , 2002. |
AIMS: Robustness Through Sensible Introspection Journal Article In Proc. of the 10th ACM SIGOPS European Workshop, 2002. |
Open Metadata Formats: Efficient XML-Based Communication for High Performance Computing Technical Report Cluster Computing (5(3):315-324), 2002. |
2001 |
The Active Streams Approach to Adaptive Distributed Applications And Services PhD Thesis Georgia Institute of Technology, 2001. |
The Case for Proactive Directory Services Technical Report Poster in Proc. of Supercomputing , 2001. |
Active Streams and the effects of stream specialization Journal Article Poster in Proc. of Tenth International Symposium on High Performance Distributed Computing, HPDC-2001 , 2001. |
Active Streams: An approach to adaptive distributed systems Journal Article In Proc. 8th Workshop on Hot Topics in Operating Systems, HotOS-VIII , 2001. |
Event Services in High Performance Systems Technical Report Cluster Computing (4(3):243-252 ), 2001. |
A Middleware Toolkit for Client-Initiated Service Specialization Journal Article ACM SIGOPS, 35 (2), 2001. |
Native Data Representation: An Efficient Wire Format for High Performance Computing Technical Report College of Computing, Georgia Institute of Technology (GIT-CC-01-18), 2001. |
2000 |
Efficient Wire Formats for High Performance Computing Journal Article In Proc. of Supercomputing (SC), 2000. |
Event Services for High Performance Computing Journal Article In Proc. of Ninth International Symposium on High Performance Distributed Computing, HPDC-2000 , 2000. |
Pacioli: A Framework for Model Construction Technical Report Storage Systems Program, Computer Systems Laboratory, Hewlett-Packard Laboratory 2000. |
1999 |
Active I/O Streams for Heterogeneous High Performance Computing Conference Proc. of Parallel Computing (ParCo), 1999. |
1998 |
Digital Dynamic Telepathology - the Virtual Microscope Conference Proc. of the 1998 AMIA Annual Fall Symposium, 1998. |
1997 |
Agent and Object Technologies for High-end Collaborative Applications Conference Proc. of the 1997 Dartmouth Workshop on Transportable Agents, 1997. |
An Empirical Comparison of Time Warp and the NPSI Elastic Time Protocol Technical Report College of Computing, Georgia Institute of Technology, (GIT-CC-97-13), 1997. |