2013 |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger Dasu: A measurement experimentation platform at the Internet’s edge Technical Report Department of Computer Science, Northwestern University ( NWU-EECS-13-09), 2013. @techreport{DASUe, title = { Dasu: A measurement experimentation platform at the Internet’s edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NU-EECS-13-09.pdf}, year = {2013}, date = {2013-09-06}, number = { NWU-EECS-13-09}, institution = {Department of Computer Science, Northwestern University}, abstract = {Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent well-known problem has served as motivation for numerous proposals to build or extend existing platforms by recruiting larger, more diverse vantage points. However, capturing the edge of the network remains an elusive goal. We argue that at its root the problem is one of incentives. Today’s measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present Dasu, a measurement experimentation platform built on an alternate model that explicitly aligns the objectives of the experimenters with those of the users hosting the platform. Dasu is designed to support both network measurement experimentation and broadband characterization. In this paper, we discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective our current deployment brings to Internet measurement. Dasu has been publicly available since July 2010 and is currently in use by over 95,000 users with a heterogeneous set of connections spreading across 1,802 networks and 151 countries. }, keywords = {}, pubstate = {published}, tppubtype = {techreport} } Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent well-known problem has served as motivation for numerous proposals to build or extend existing platforms by recruiting larger, more diverse vantage points. However, capturing the edge of the network remains an elusive goal. We argue that at its root the problem is one of incentives. Today’s measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present Dasu, a measurement experimentation platform built on an alternate model that explicitly aligns the objectives of the experimenters with those of the users hosting the platform. Dasu is designed to support both network measurement experimentation and broadband characterization. In this paper, we discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective our current deployment brings to Internet measurement. Dasu has been publicly available since July 2010 and is currently in use by over 95,000 users with a heterogeneous set of connections spreading across 1,802 networks and 151 countries. |
John S. Otto, Fabián E. Bustamante The hidden locality in swarms Journal Article In Proc. of IEEE P2P, 2013. @article{Swarms, title = {The hidden locality in swarms}, author = {John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/P2P2013Otto.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/P2P2013Otto-slides.pptx}, year = {2013}, date = {2013-09-03}, journal = {In Proc. of IEEE P2P}, abstract = {People use P2P systems such as BitTorrent to share an unprecedented variety and amount of content with others around the world. The random connection pattern used by BitTorrent has been shown to result in reduced performance for users and costly cross-ISP traffic. Although several client-side systems have been proposed to improve the locality of BitTorrent traffic, their effectiveness is limited by the availability of local peers. We show that sufficient locality is present in swarms -- if one looks at the right time. We find that 50% of ISPs have at least five local peers online during the ISP's peak hour, typically in the evening, compared to only 20% of ISPs during the median hour. To better discover these local peers, we show how to increase the overall peer discovery rate by over two orders of magnitude using client-side techniques: leveraging additional trackers, requesting more peers per sample, and sampling more frequently. We propose an approach to predict future availability of local peers based on observed diurnal patterns. This approach enables peers to selectively apply these techniques to minimize undue load on trackers.}, keywords = {}, pubstate = {published}, tppubtype = {article} } People use P2P systems such as BitTorrent to share an unprecedented variety and amount of content with others around the world. The random connection pattern used by BitTorrent has been shown to result in reduced performance for users and costly cross-ISP traffic. Although several client-side systems have been proposed to improve the locality of BitTorrent traffic, their effectiveness is limited by the availability of local peers. We show that sufficient locality is present in swarms -- if one looks at the right time. We find that 50% of ISPs have at least five local peers online during the ISP's peak hour, typically in the evening, compared to only 20% of ISPs during the median hour. To better discover these local peers, we show how to increase the overall peer discovery rate by over two orders of magnitude using client-side techniques: leveraging additional trackers, requesting more peers per sample, and sampling more frequently. We propose an approach to predict future availability of local peers based on observed diurnal patterns. This approach enables peers to selectively apply these techniques to minimize undue load on trackers. |
Zachary S. Bischof, Mario A. Sánchez, John S. Otto, John P. Rula, Fabián E. Bustamante Characterizing Broadband Services with Dasu Inproceedings 2013. @inproceedings{DASUd, title = {Characterizing Broadband Services with Dasu}, author = {Zachary S. Bischof and Mario A. Sánchez and John S. Otto and John P. Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi13-broadband-demo.pdf}, year = {2013}, date = {2013-04-09}, journal = {Demonstration at USENIX NSDI}, abstract = {We present the broadband characterization functionality of Dasu, showcase its user-interface, and include side-by-side comparisons of competing broadband services. This poster complements Sánchez et al. (appearing in NSDI) and its related demo submission; both focus on the design and implementation of Dasu as an experimental platform. As mentioned in the NSDI work, Dasu partially relies on service characterization as incentive for adoption. This side of Dasu is a prototype implementation of our crowdsourced-based, end-system approach to broadband characterization. By leveraging monitoring information from local hosts and home routers, our approach can attain scalability, continuity and end-user perspective while avoiding the potential pitfalls of similar models.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We present the broadband characterization functionality of Dasu, showcase its user-interface, and include side-by-side comparisons of competing broadband services. This poster complements Sánchez et al. (appearing in NSDI) and its related demo submission; both focus on the design and implementation of Dasu as an experimental platform. As mentioned in the NSDI work, Dasu partially relies on service characterization as incentive for adoption. This side of Dasu is a prototype implementation of our crowdsourced-based, end-system approach to broadband characterization. By leveraging monitoring information from local hosts and home routers, our approach can attain scalability, continuity and end-user perspective while avoiding the potential pitfalls of similar models. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Walter Willinger Experiments at the Internet's Edge with Dasu Journal Article Demonstration at USENIX NSDI, 2013. @article{DASUc, title = {Experiments at the Internet's Edge with Dasu}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi13-dasu-experiment-demo.pdf}, year = {2013}, date = {2013-04-06}, journal = {Demonstration at USENIX NSDI}, abstract = {Dasu is an extensible measurement experimentation platform for the Internet's edge. Dasu is composed of a distributed collection of clients, hosted by participating end hosts, and a core set of services for managing and coordinating experimentation. Dasu supports and builds on broadband characterization as an incentive for adoption to capture the network and service diversity of the commercial Internet. This demo presents Dasu in action, focusing on its experiment delegation mechanism and showing how it enables third-party experimentation and maintains security and accountability.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Dasu is an extensible measurement experimentation platform for the Internet's edge. Dasu is composed of a distributed collection of clients, hosted by participating end hosts, and a core set of services for managing and coordinating experimentation. Dasu supports and builds on broadband characterization as an incentive for adoption to capture the network and service diversity of the commercial Internet. This demo presents Dasu in action, focusing on its experiment delegation mechanism and showing how it enables third-party experimentation and maintains security and accountability. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger Dasu: Pushing Experiments to the Internet's Edge Journal Article In Proc. of the USENIX Symposium on Networked Systems Design and Implementation (NSDI), 2013. @article{DASUb, title = { Dasu: Pushing Experiments to the Internet's Edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/dasu-measurement.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Sanchez2013NSDISlides.pdf}, year = {2013}, date = {2013-04-03}, journal = {In Proc. of the USENIX Symposium on Networked Systems Design and Implementation (NSDI)}, abstract = {We present Dasu, a measurement experimentation platform for the Internet’s edge. Dasu supports both controlled network experimentation and broadband char- acterization, building on public interest on the latter to gain the adoption necessary for the former. We discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective it brings to Internet measurement. Dasu has been publicly available since July 2010 and has been installed by over 90,000 users with a heterogeneous set of connections spreading across 1,802 networks and 147 countries.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present Dasu, a measurement experimentation platform for the Internet’s edge. Dasu supports both controlled network experimentation and broadband char- acterization, building on public interest on the latter to gain the adoption necessary for the former. We discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective it brings to Internet measurement. Dasu has been publicly available since July 2010 and has been installed by over 90,000 users with a heterogeneous set of connections spreading across 1,802 networks and 147 countries. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, Fabián E. Bustamante Trying Broadband Characterization at Home Journal Article In Proc. of the Passive and Active Measurement Conference (PAM), 2013. @article{TBCH, title = {Trying Broadband Characterization at Home}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/pam-upnp.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Slides.pdf}, year = {2013}, date = {2013-03-03}, journal = {In Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {In recent years the quantity and diversity of Internet-enabled consumer devices in the home have increased significantly. These trends complicate device usability and home resource management and have implications for crowdsourced approaches to broadband characterization. The UPnP protocol has emerged as an open standard for device and service discovery to simplify device usability and resource management in home networks. In this work, we leverage UPnP to understand the dynamics of home device usage, both at a macro and micro level, and to sketch an effective approach to broadband characterization that runs behind the last meter. Using UPnP measurements collected from over 13K end users, we show that while home networks can be quite complex, the number of devices that actively and regularly connect to the Internet is limited. Furthermore, we find a high correlation between the number of UPnP-enabled devices in home networks and the presence of UPnP-enabled gateways, and show how this can be leveraged for effective broadband characterization.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In recent years the quantity and diversity of Internet-enabled consumer devices in the home have increased significantly. These trends complicate device usability and home resource management and have implications for crowdsourced approaches to broadband characterization. The UPnP protocol has emerged as an open standard for device and service discovery to simplify device usability and resource management in home networks. In this work, we leverage UPnP to understand the dynamics of home device usage, both at a macro and micro level, and to sketch an effective approach to broadband characterization that runs behind the last meter. Using UPnP measurements collected from over 13K end users, we show that while home networks can be quite complex, the number of devices that actively and regularly connect to the Internet is limited. Furthermore, we find a high correlation between the number of UPnP-enabled devices in home networks and the presence of UPnP-enabled gateways, and show how this can be leveraged for effective broadband characterization. |
2012 |
John S. Otto, Mario A. Sánchez, John P. Rula, Fabián E. Bustamante Content delivery and the natural evolution of DNS - Remote DNS Trends, Performance Issues and Alternative Solutions Journal Article In Proc. of IMC, 2012. @article{CDNEDNS, title = {Content delivery and the natural evolution of DNS - Remote DNS Trends, Performance Issues and Alternative Solutions}, author = {John S. Otto and Mario A. Sánchez and John P. Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/OttoIMC2012.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Otto2012IMCSlides.pptx}, year = {2012}, date = {2012-11-03}, journal = {In Proc. of IMC}, abstract = {Content Delivery Networks (CDNs) rely on the Domain Name System (DNS) for replica server selection. DNS-based server selection builds on the assumption that, in the absence of information about the client's actual network location, the location of a client's DNS resolver provides a good approximation. The recent growth of remote DNS services breaks this assumption and can negatively impact client's web performance. In this paper, we assess the end-to-end impact of using remote DNS services on CDN performance and present the first evaluation of an industry-proposed solution to the problem. We find that remote DNS usage can indeed significantly impact client's web performance and that the proposed solution, if available, can effectively address the problem for most clients. Considering the performance cost of remote DNS usage and the limited adoption base of the industry-proposed solution, we present and evaluate an alternative approach, Direct Resolution, to readily obtain comparable performance improvements without requiring CDN or DNS participation. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Content Delivery Networks (CDNs) rely on the Domain Name System (DNS) for replica server selection. DNS-based server selection builds on the assumption that, in the absence of information about the client's actual network location, the location of a client's DNS resolver provides a good approximation. The recent growth of remote DNS services breaks this assumption and can negatively impact client's web performance. In this paper, we assess the end-to-end impact of using remote DNS services on CDN performance and present the first evaluation of an industry-proposed solution to the problem. We find that remote DNS usage can indeed significantly impact client's web performance and that the proposed solution, if available, can effectively address the problem for most clients. Considering the performance cost of remote DNS usage and the limited adoption base of the industry-proposed solution, we present and evaluate an alternative approach, Direct Resolution, to readily obtain comparable performance improvements without requiring CDN or DNS participation. |
Zachary S. Bischof, John S. Otto, Fabián E. Bustamante Up, Down and Around the Stack: ISP Characterization from Network Intensive Applications Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2012. @article{UDS, title = {Up, Down and Around the Stack: ISP Characterization from Network Intensive Applications}, author = {Zachary S. Bischof and John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof12WMUST.pdf}, year = {2012}, date = {2012-10-06}, journal = {In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST)}, abstract = {Broadband characterization has recently attracted much attention from the research community and the general public. Given the important business and policy implications of residential Internet service characterization, recent years have brought a variety of approaches to profiling Internet services, ranging from Web-based platforms to dedicated infrastructure inside home networks. We have previously argued that network-intensive applications provide an almost ideal vantage point for broadband characterization at sufficient scale, nearly continuously and from end users. While we have shown that the approach is indeed effective at service characterization and can enable performance comparisons between service providers and geographic regions, a key unanswered question is how well the performance characteristics captured by these systems can predict the overall user experience with different applications. In this paper, using BitTorrent as an example host application, we present initial results that demonstrate how to obtain estimates of bandwidth and latency of a network connection by leveraging passive monitoring and limited active measurements from network intensive applications. We then analyze user experienced web performance under a variety of network conditions and show how estimates from a network intensive application can serve as good web performance predictors.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Broadband characterization has recently attracted much attention from the research community and the general public. Given the important business and policy implications of residential Internet service characterization, recent years have brought a variety of approaches to profiling Internet services, ranging from Web-based platforms to dedicated infrastructure inside home networks. We have previously argued that network-intensive applications provide an almost ideal vantage point for broadband characterization at sufficient scale, nearly continuously and from end users. While we have shown that the approach is indeed effective at service characterization and can enable performance comparisons between service providers and geographic regions, a key unanswered question is how well the performance characteristics captured by these systems can predict the overall user experience with different applications. In this paper, using BitTorrent as an example host application, we present initial results that demonstrate how to obtain estimates of bandwidth and latency of a network connection by leveraging passive monitoring and limited active measurements from network intensive applications. We then analyze user experienced web performance under a variety of network conditions and show how estimates from a network intensive application can serve as good web performance predictors. |
John S. Otto, Mario A. Sánchez, John P. Rula, Ted Stein, Fabián E. Bustamante namehelp: intelligent client-side DNS resolution Journal Article In ACM SIGCOMM CCR Special Issue, 42 (4), 2012. @article{namehelp, title = {namehelp: intelligent client-side DNS resolution}, author = {John S. Otto and Mario A. Sánchez and John P. Rula and Ted Stein and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/OttoSigcommPoster2012.pdf}, year = {2012}, date = {2012-10-03}, journal = {In ACM SIGCOMM CCR Special Issue}, volume = {42}, number = {4}, abstract = {The Domain Name System (DNS) is a fundamental component of today’s Internet. Recent years have seen radical changes to DNS with increases in usage of remote DNS and public DNS services such as OpenDNS. Given the close relationship between DNS and Content Delivery Networks (CDNs) and the pervasive use of CDNs by many popular applications including web browsing and real-time entertainment services, it is important to understand the impact of remote and public DNS services on users’ overall experience on the Web. This work presents a tool, namehelp, which comparatively evaluates DNS services in terms of the web performance they provide, and implements an end-host solution to address the performance impact of remote DNS on CDNs. The demonstration will show the functionality of namehelp with online results for its performance improvements.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The Domain Name System (DNS) is a fundamental component of today’s Internet. Recent years have seen radical changes to DNS with increases in usage of remote DNS and public DNS services such as OpenDNS. Given the close relationship between DNS and Content Delivery Networks (CDNs) and the pervasive use of CDNs by many popular applications including web browsing and real-time entertainment services, it is important to understand the impact of remote and public DNS services on users’ overall experience on the Web. This work presents a tool, namehelp, which comparatively evaluates DNS services in terms of the web performance they provide, and implements an end-host solution to address the performance impact of remote DNS on CDNs. The demonstration will show the functionality of namehelp with online results for its performance improvements. |
John Rula, Fabián E. Bustamante Crowd (Soft) Control: Moving Beyond the Opportunistic Journal Article In Proc. of the Thirteenth Workshop on Mobile Computing Systems and Applications (HotMobile), 2012. @article{CCMBO, title = {Crowd (Soft) Control: Moving Beyond the Opportunistic}, author = {John Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula12HotMobile.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula12HotMobileSlides.pdf }, year = {2012}, date = {2012-02-03}, journal = {In Proc. of the Thirteenth Workshop on Mobile Computing Systems and Applications (HotMobile)}, abstract = {A number of novel wireless networked services, ranging from participatory sensing to social networking, leverage the increasing capabilities of mobile devices and the movement of the individuals carrying them. For many of these systems, their effectiveness fundamentally depends on coverage and the particular mobility patterns of the participants. Given the strong spatial and temporal regularity of human mobility, the needed coverage can typically only be attained through a large participant base. In this paper we explore an alternative approach to attain coverage without scale -- (soft) controlling the movement of participants. We present Crowd Soft Control (CSC), an approach to exert limited control over the temporal and spatial movements of mobile users by leveraging the built-in incentives of location-based gaming and social applications. By pairing network services with these location-based apps, CSC allows researchers to use an application's incentives (e.g. games objectives) to control the movement of participating users, increasing the effectiveness and efficiency of the associated network service. After outlining the case for Crowd Soft Control, we present an initial prototype of our ideas and discuss potential benefits and costs in the context of two case studies. }, keywords = {}, pubstate = {published}, tppubtype = {article} } A number of novel wireless networked services, ranging from participatory sensing to social networking, leverage the increasing capabilities of mobile devices and the movement of the individuals carrying them. For many of these systems, their effectiveness fundamentally depends on coverage and the particular mobility patterns of the participants. Given the strong spatial and temporal regularity of human mobility, the needed coverage can typically only be attained through a large participant base. In this paper we explore an alternative approach to attain coverage without scale -- (soft) controlling the movement of participants. We present Crowd Soft Control (CSC), an approach to exert limited control over the temporal and spatial movements of mobile users by leveraging the built-in incentives of location-based gaming and social applications. By pairing network services with these location-based apps, CSC allows researchers to use an application's incentives (e.g. games objectives) to control the movement of participating users, increasing the effectiveness and efficiency of the associated network service. After outlining the case for Crowd Soft Control, we present an initial prototype of our ideas and discuss potential benefits and costs in the context of two case studies. |
2011 |
Zachary S. Bischof, John S. Otto, Fabián E. Bustamante Distributed Systems and Natural Disasters -- BitTorrent as a Global Witness Journal Article In Proc. of CoNEXT Special Workshop on the Internet and Disasters (SWID), 2011. @article{DSND, title = {Distributed Systems and Natural Disasters -- BitTorrent as a Global Witness}, author = {Zachary S. Bischof and John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11SWID.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11SWID_Slides.pdf}, year = {2011}, date = {2011-12-03}, journal = {In Proc. of CoNEXT Special Workshop on the Internet and Disasters (SWID)}, abstract = {Peer-to-peer (P2P) systems represent some of the largest distributed systems in today's Internet. Among P2P systems, BitTorrent is the most popular, potentially accounting for 20-50% of P2P file-sharing traffic. In this paper, we argue that this popularity can be leveraged to monitor the impact of natural disasters and political unrest on the Internet. We focus our analysis on the 2011 Tohoku earthquake and tsunami and use a view from BitTorrent to show that it is possible to identify specific regions and network links where Internet usage and connectivity were most affected.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Peer-to-peer (P2P) systems represent some of the largest distributed systems in today's Internet. Among P2P systems, BitTorrent is the most popular, potentially accounting for 20-50% of P2P file-sharing traffic. In this paper, we argue that this popularity can be leveraged to monitor the impact of natural disasters and political unrest on the Internet. We focus our analysis on the 2011 Tohoku earthquake and tsunami and use a view from BitTorrent to show that it is possible to identify specific regions and network links where Internet usage and connectivity were most affected. |
Zachary S. Bischof, John S. Otto, Mario A. Sánchez, John P. Rula, David R. Choffnes, Fabián E. Bustamante Crowdsourcing ISP Characterization to the Network Edge Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2011. @article{CISPCNE, title = {Crowdsourcing ISP Characterization to the Network Edge}, author = {Zachary S. Bischof and John S. Otto and Mario A. Sánchez and John P. Rula and David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11WMUST.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11WMUST_Slides.pdf}, year = {2011}, date = {2011-08-09}, journal = {In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST)}, abstract = {Evaluating and characterizing Internet Service Providers (ISPs) is critical to subscribers shopping for alternative ISPs, companies providing reliable Internet services, and governments surveying the coverage of broadband services to its citizens. Ideally, ISP characterization should be done at scale, continuously, and from end users. While there has been significant progress toward this end, current approaches exhibit apparently unavoidable tradeoffs between coverage, continuous monitoring and capturing user- perceived performance. In this paper, we argue that network-intensive applications running on end systems avoid these tradeoffs, thereby offering an ideal platform for ISP characterization. Based on data collected from 500,000 peer-to-peer BitTorrent users across 3,150 networks, together with the reported results from the U.K. Ofcom/SamKnows studies, we show the feasibility of this approach to characterize the service that subscribers can expect from a particular ISP. We discuss remaining research challenges and design requirements for a solution that enables efficient and accurate ISP characterization at an Internet scale. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Evaluating and characterizing Internet Service Providers (ISPs) is critical to subscribers shopping for alternative ISPs, companies providing reliable Internet services, and governments surveying the coverage of broadband services to its citizens. Ideally, ISP characterization should be done at scale, continuously, and from end users. While there has been significant progress toward this end, current approaches exhibit apparently unavoidable tradeoffs between coverage, continuous monitoring and capturing user- perceived performance. In this paper, we argue that network-intensive applications running on end systems avoid these tradeoffs, thereby offering an ideal platform for ISP characterization. Based on data collected from 500,000 peer-to-peer BitTorrent users across 3,150 networks, together with the reported results from the U.K. Ofcom/SamKnows studies, we show the feasibility of this approach to characterize the service that subscribers can expect from a particular ISP. We discuss remaining research challenges and design requirements for a solution that enables efficient and accurate ISP characterization at an Internet scale. |
John S. Otto, Mario A. Sánchez, David R. Choffnes, Fabián E. Bustamante, Georgos Siganos On Blind Mice and the Elephant -- Understanding the Network Impact of a Large Distributed System Journal Article In Proc. of ACM SIGCOMM, 2011. @article{BME, title = {On Blind Mice and the Elephant -- Understanding the Network Impact of a Large Distributed System}, author = {John S. Otto and Mario A. Sánchez and David R. Choffnes and Fabián E. Bustamante and Georgos Siganos}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JOtto11SIGCOMM.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/OttoSigcomm2011.pptx}, year = {2011}, date = {2011-08-06}, journal = {In Proc. of ACM SIGCOMM}, abstract = {A thorough understanding of the network impact of emerging large-scale distributed systems -- where traffic flows and what it costs -- must encompass users' behavior, the traffic they generate and the topology over which that traffic flows. In the case of BitTorrent, however, previous studies have been limited by narrow perspectives that restrict such analysis. This paper presents a comprehensive view of BitTorrent, using data from a representative set of 500,000 users sampled over a two year period, located in 169 countries and 3,150 networks. This unique perspective captures unseen trends and reveals several unexpected features of the largest peer-to-peer system. For instance, over the past year total BitTorrent traffic has increased by 12%, driven by 25% increases in per-peer hourly download volume despite a 10% decrease in the average number of online peers. We also observe stronger diurnal usage patterns and, surprisingly given the bandwidth-intensive nature of the application, a close alignment between these patterns and overall traffic. Considering the aggregated traffic across access links, this has potential implications on BitTorrent-associated costs for Internet Service Providers (ISPs). Using data from a transit ISP, we find a disproportionately large impact under a commonly used burstable (95th-percentile) billing model. Last, when examining BitTorrent traffic's paths, we find that for over half its users, most network traffic never reaches large transit networks, but is instead carried by small transit ISPs. This raises questions on the effectiveness of most in-network monitoring systems to capture trends on peer-to-peer traffic and further motivates our approach.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A thorough understanding of the network impact of emerging large-scale distributed systems -- where traffic flows and what it costs -- must encompass users' behavior, the traffic they generate and the topology over which that traffic flows. In the case of BitTorrent, however, previous studies have been limited by narrow perspectives that restrict such analysis. This paper presents a comprehensive view of BitTorrent, using data from a representative set of 500,000 users sampled over a two year period, located in 169 countries and 3,150 networks. This unique perspective captures unseen trends and reveals several unexpected features of the largest peer-to-peer system. For instance, over the past year total BitTorrent traffic has increased by 12%, driven by 25% increases in per-peer hourly download volume despite a 10% decrease in the average number of online peers. We also observe stronger diurnal usage patterns and, surprisingly given the bandwidth-intensive nature of the application, a close alignment between these patterns and overall traffic. Considering the aggregated traffic across access links, this has potential implications on BitTorrent-associated costs for Internet Service Providers (ISPs). Using data from a transit ISP, we find a disproportionately large impact under a commonly used burstable (95th-percentile) billing model. Last, when examining BitTorrent traffic's paths, we find that for over half its users, most network traffic never reaches large transit networks, but is instead carried by small transit ISPs. This raises questions on the effectiveness of most in-network monitoring systems to capture trends on peer-to-peer traffic and further motivates our approach. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, Fabián E. Bustamante Dasu - ISP Characterization from the Edge: A BitTorrent Implementation Journal Article Demo in Proc. of ACM SIGCOMM, 2011. @article{DASU, title = {Dasu - ISP Characterization from the Edge: A BitTorrent Implementation}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/MSanchez11SIGCOMM.pdf}, year = {2011}, date = {2011-08-03}, journal = {Demo in Proc. of ACM SIGCOMM}, abstract = {Evaluating and characterizing access ISPs is critical to consumers shopping for alternative services and governments surveying the availability of broadband services to their citizens. We present Dasu, a service for crowdsourcing ISP characterization to the edge of the network. Dasu is implemented as an extension to a popular BitTorrent client and has been available since July 2010. While the prototype uses BitTorrent as its host application, its design is agnostic to the particular host application. The demo showcases our current implementation using both a prerecorded execution trace and a live run.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Evaluating and characterizing access ISPs is critical to consumers shopping for alternative services and governments surveying the availability of broadband services to their citizens. We present Dasu, a service for crowdsourcing ISP characterization to the edge of the network. Dasu is implemented as an extension to a popular BitTorrent client and has been available since July 2010. While the prototype uses BitTorrent as its host application, its design is agnostic to the particular host application. The demo showcases our current implementation using both a prerecorded execution trace and a live run. |
2010 |
Gareth Bennett, Eoin A. King, Jan Curn, Vinny Cahill, Fabián E. Bustamante, Henry J. Rice Environmental Noise Mapping Using Measurements in Transit Journal Article In Proc. of the International Conference on Noise and Vibration Engineering (ISMA), 2010. @article{ENMUMT, title = {Environmental Noise Mapping Using Measurements in Transit}, author = {Gareth Bennett and Eoin A. King and Jan Curn and Vinny Cahill and Fabián E. Bustamante and Henry J. Rice}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ISMA10-Bennett.pdf}, year = {2010}, date = {2010-09-03}, journal = {In Proc. of the International Conference on Noise and Vibration Engineering (ISMA)}, abstract = {Due to the ever increasing level of environmental noise that the EU population is exposed to, all countries are directed to disseminate community noise level exposures to the public in accordance with EU Directive 2002/49/EC. Environmental noise maps are used for this purpose and as a means to avoid, prevent or reduce the harmful effects caused by exposure to environmental noise. There is no common standard to which these maps are generated in the EU and indeed these maps are in most cases inaccurate due to poorly informed predictive models. This paper develops a novel environmental noise monitoring methodology which will allow accurate road noise measurements to replace erroneous source model approximations in the generation of noise maps. The approach proposes the acquisition of sound levels and position coordinates by instrumented vehicles such as bicycles or cars or by pedestrians equipped with a Smartphone. The accumulation of large amounts of data over time will result in extremely high spatial and temporal resolution resulting in an accurate measurement of environmental noise.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Due to the ever increasing level of environmental noise that the EU population is exposed to, all countries are directed to disseminate community noise level exposures to the public in accordance with EU Directive 2002/49/EC. Environmental noise maps are used for this purpose and as a means to avoid, prevent or reduce the harmful effects caused by exposure to environmental noise. There is no common standard to which these maps are generated in the EU and indeed these maps are in most cases inaccurate due to poorly informed predictive models. This paper develops a novel environmental noise monitoring methodology which will allow accurate road noise measurements to replace erroneous source model approximations in the generation of noise maps. The approach proposes the acquisition of sound levels and position coordinates by instrumented vehicles such as bicycles or cars or by pedestrians equipped with a Smartphone. The accumulation of large amounts of data over time will result in extremely high spatial and temporal resolution resulting in an accurate measurement of environmental noise. |
David R. Choffnes, Fabián E. Bustamante, Zihui Ge Crowdsourcing Service-Level Network Event Detection Journal Article In Proc. of ACM SIGCOMM, 2010. @article{CSLNED, title = {Crowdsourcing Service-Level Network Event Detection}, author = {David R. Choffnes and Fabián E. Bustamante and Zihui Ge}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10SIGCOMM.pdf}, year = {2010}, date = {2010-08-03}, journal = {In Proc. of ACM SIGCOMM}, abstract = {The user experience for networked applications is becoming a key benchmark for customers and network providers. Perceived user experience is largely determined by the frequency, duration and severity of network events that impact a service. While today’s networks implement sophisticated infrastructure that issues alarms for most failures, there remains a class of silent outages (e.g., caused by configuration errors) that are not detected. Further, existing alarms provide little information to help operators understand the impact of network events on services. Attempts to address this through infrastructure that monitors end-to-end performance for customers have been hampered by the cost of deployment and by the volume of data generated by these solutions. We present an alternative approach that pushes monitoring to applications on end systems and uses their collective view to detect network events and their impact on services - an approach we call Crowdsourcing Event Monitoring (CEM). This paper presents a general framework for CEM systems and demonstrates its effectiveness for a P2P application using a large dataset gathered from BitTorrent users and confirmed network events from two ISPs. We discuss how we designed and deployed a prototype CEM implementation as an extension to BitTorrent. This system performs online service-level network event detection through passive monitoring and correlation of performance in end-users’ applications.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The user experience for networked applications is becoming a key benchmark for customers and network providers. Perceived user experience is largely determined by the frequency, duration and severity of network events that impact a service. While today’s networks implement sophisticated infrastructure that issues alarms for most failures, there remains a class of silent outages (e.g., caused by configuration errors) that are not detected. Further, existing alarms provide little information to help operators understand the impact of network events on services. Attempts to address this through infrastructure that monitors end-to-end performance for customers have been hampered by the cost of deployment and by the volume of data generated by these solutions. We present an alternative approach that pushes monitoring to applications on end systems and uses their collective view to detect network events and their impact on services - an approach we call Crowdsourcing Event Monitoring (CEM). This paper presents a general framework for CEM systems and demonstrates its effectiveness for a P2P application using a large dataset gathered from BitTorrent users and confirmed network events from two ISPs. We discuss how we designed and deployed a prototype CEM implementation as an extension to BitTorrent. This system performs online service-level network event detection through passive monitoring and correlation of performance in end-users’ applications. |
David R. Choffnes, Jordi Duch, Dean Malmgren, Roger Guimera, Fabián E. Bustamante, Luis Amaral Strange Bedfellows: Communities in BitTorrent Journal Article In Proc. of the 9th International Workshop on Peer-to-Peer Systems (IPTPS), 2010. @article{SB, title = {Strange Bedfellows: Communities in BitTorrent}, author = {David R. Choffnes and Jordi Duch and Dean Malmgren and Roger Guimera and Fabián E. Bustamante and Luis Amaral}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10IPTPS.pdf}, year = {2010}, date = {2010-04-06}, journal = {In Proc. of the 9th International Workshop on Peer-to-Peer Systems (IPTPS)}, abstract = {While P2P systems benefit from large numbers of interconnected nodes, each of these connections provides an opportunity for eavesdropping. Using only the connection patterns gathered from 10,000 BitTorrent (BT) users during a one-month period, we determine whether randomized connection patterns give rise to communities of users. Even though connections in BT require not only shared interest in content, but also concurrent sessions, we find that strong communities naturally form -- users inside a typical community are 5 to 25 times more likely to connect to each other than with users outside. These strong communities enable guilt by association, where the behavior of an entire community of users can be inferred by monitoring one of its members.Our study shows that through a single observation point, an attacker trying to identify such communities can uncover 50% of the network within a distance of two hops. Finally, we propose and evaluate a practical solution that mitigates this threat.}, keywords = {}, pubstate = {published}, tppubtype = {article} } While P2P systems benefit from large numbers of interconnected nodes, each of these connections provides an opportunity for eavesdropping. Using only the connection patterns gathered from 10,000 BitTorrent (BT) users during a one-month period, we determine whether randomized connection patterns give rise to communities of users. Even though connections in BT require not only shared interest in content, but also concurrent sessions, we find that strong communities naturally form -- users inside a typical community are 5 to 25 times more likely to connect to each other than with users outside. These strong communities enable guilt by association, where the behavior of an entire community of users can be inferred by monitoring one of its members.Our study shows that through a single observation point, an attacker trying to identify such communities can uncover 50% of the network within a distance of two hops. Finally, we propose and evaluate a practical solution that mitigates this threat. |
David R. Choffnes, Fabián E. Bustamante Pitfalls for Testbed Evaluations of Internet Systems Journal Article In ACM SIGCOMM CCR, 2010. @article{PTEIS, title = { Pitfalls for Testbed Evaluations of Internet Systems}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10CCR.pdf}, year = {2010}, date = {2010-04-03}, journal = {In ACM SIGCOMM CCR}, abstract = {Today's open platforms for network measurement and distributed system research, which we collectively refer to as testbeds in this article, provide opportunities for controllable experimentation and evaluations of systems at the scale of hundreds or thousands of hosts. In this article, we identify several issues with extending results from such platforms to Internet wide perspectives. Specifically, we try to quantify the level of inaccuracy and incompleteness of testbed results when applied to the context of a large-scale peer-to-peer (P2P) system. Based on our results, we emphasize the importance of measurements in the appropriate environment when evaluating Internet-scale systems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Today's open platforms for network measurement and distributed system research, which we collectively refer to as testbeds in this article, provide opportunities for controllable experimentation and evaluations of systems at the scale of hundreds or thousands of hosts. In this article, we identify several issues with extending results from such platforms to Internet wide perspectives. Specifically, we try to quantify the level of inaccuracy and incompleteness of testbed results when applied to the context of a large-scale peer-to-peer (P2P) system. Based on our results, we emphasize the importance of measurements in the appropriate environment when evaluating Internet-scale systems. |
David R. Choffnes, Mario A. Sánchez, Fabián E. Bustamante Network positioning from the edge: An empirical study of the effectiveness of network positioning in P2P systems Journal Article In Proc. of IEEE INFOCOM, 2010. @article{NPE, title = {Network positioning from the edge: An empirical study of the effectiveness of network positioning in P2P systems}, author = {David R. Choffnes and Mario A. Sánchez and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10Infocom.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10InfocomFinal.pdf}, year = {2010}, date = {2010-03-03}, journal = {In Proc. of IEEE INFOCOM}, abstract = {Network positioning systems provide an important service to large-scale P2P systems, potentially enabling clients to achieve higher performance, reduce cross-ISP traffic and improve the robustness of the system to failures. Because traces representative of this environment are generally unavailable, and there is no platform suited for experimentation at the appropriate scale, network positioning systems have been commonly implemented and evaluated in simulation and on research testbeds. The performance of network positioning remains an open question for large deployments at the edges of the network. This paper evaluates how four key classes of network po- sitioning systems fare when deployed at scale and measured in P2P systems where they are used. Using 2 billion network measurements gathered from more than 43,000 IP addresses probing over 8 million other IPs worldwide, we show that network positioning exhibits noticeably worse performance than previously reported in studies conducted on research testbeds. To explain this result, we identify several key properties of this environment that call into question fundamental assumptions driving network positioning research.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Network positioning systems provide an important service to large-scale P2P systems, potentially enabling clients to achieve higher performance, reduce cross-ISP traffic and improve the robustness of the system to failures. Because traces representative of this environment are generally unavailable, and there is no platform suited for experimentation at the appropriate scale, network positioning systems have been commonly implemented and evaluated in simulation and on research testbeds. The performance of network positioning remains an open question for large deployments at the edges of the network. This paper evaluates how four key classes of network po- sitioning systems fare when deployed at scale and measured in P2P systems where they are used. Using 2 billion network measurements gathered from more than 43,000 IP addresses probing over 8 million other IPs worldwide, we show that network positioning exhibits noticeably worse performance than previously reported in studies conducted on research testbeds. To explain this result, we identify several key properties of this environment that call into question fundamental assumptions driving network positioning research. |
David R. Choffnes, Fabián E. Bustamante Taming the Torrent Journal Article In USENIX, 2010. BibTeX | Links: @article{TTc, title = {Taming the Torrent}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10login.pdf}, year = {2010}, date = {2010-02-06}, journal = {In USENIX}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Guohan Lu, Yan Chen, Stefan Birrer, Fabian E. Bustamante, Xing Li POPI: A User-level Tool for Inferring Router Packet Forwarding Priority Journal Article In IEEE/ACM Transactions on Networking (ToN), 2010. @article{POPI, title = {POPI: A User-level Tool for Inferring Router Packet Forwarding Priority}, author = {Guohan Lu and Yan Chen and Stefan Birrer and Fabian E. Bustamante and Xing Li}, url = {https://ieeexplore.ieee.org/document/5233840}, year = {2010}, date = {2010-02-03}, journal = {In IEEE/ACM Transactions on Networking (ToN)}, abstract = {Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network operators. PFP can have a significant impact on the accuracy of network measurements, the performance of applications and the effectiveness of network troubleshooting procedures. Despite its potential impacts, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for PFP inference and its associated tool, POPI. This is the first attempt to infer router packet forwarding priority through end-to-end measurement. POPI enables users to discover such network policies through measurements of packet losses of different packet types. We evaluated our approach via statistical analysis, simulation and wide-area experimentation in PlanetLab. We employed POPI to analyze 156 paths among 162 PlanetLab sites. POPI flagged 15 paths with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them all confirming our inferences. Besides, we compared POPI with the inference mechanisms through other metrics such as packet reordering [called out-of-order (OOO)]. OOO is unable to find many priority paths such as those implemented via traffic policing. On the other hand, interestingly, we found it can detect existence of the mechanisms which induce delay differences among packet types such as slow processing path in the router and port-based load sharing. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network operators. PFP can have a significant impact on the accuracy of network measurements, the performance of applications and the effectiveness of network troubleshooting procedures. Despite its potential impacts, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for PFP inference and its associated tool, POPI. This is the first attempt to infer router packet forwarding priority through end-to-end measurement. POPI enables users to discover such network policies through measurements of packet losses of different packet types. We evaluated our approach via statistical analysis, simulation and wide-area experimentation in PlanetLab. We employed POPI to analyze 156 paths among 162 PlanetLab sites. POPI flagged 15 paths with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them all confirming our inferences. Besides, we compared POPI with the inference mechanisms through other metrics such as packet reordering [called out-of-order (OOO)]. OOO is unable to find many priority paths such as those implemented via traffic policing. On the other hand, interestingly, we found it can detect existence of the mechanisms which induce delay differences among packet types such as slow processing path in the router and port-based load sharing. |
2009 |
Kai Chen, David R. Choffnes, Rahul Potharaju, Yan Chen, Fabián E. Bustamante Where the Sidewalk Ends: Extending the Internet AS Graph Using Traceroutes From P2P Users Journal Article In Proc. of CoNEXT, 2009. @article{WSEEI, title = {Where the Sidewalk Ends: Extending the Internet AS Graph Using Traceroutes From P2P Users}, author = {Kai Chen and David R. Choffnes and Rahul Potharaju and Yan Chen and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/KChen09Conext.pdf}, year = {2009}, date = {2009-12-06}, journal = {In Proc. of CoNEXT}, abstract = {An accurate Internet topology graph is important in many areas of networking, from deciding ISP business relationships to diagnosing network anomalies. Most Internet mapping efforts have derived the network structure, at the level of interconnected autonomous systems (ASes), from a limited number of either BGP- or traceroute-based data sources. While techniques for charting the topology continue to improve, the growth of the number of vantage points is significantly outpaced by the rapid growth of the Internet. In this paper, we argue that a promising approach to revealing the hidden areas of the Internet topology is through active measurement from an observation platform that scales with the growing Internet. By leveraging measurements performed by an extension to a popular P2P system, we show that this approach indeed exposes significant new topological information. Based on traceroute measurements from more than 992,000 IPs in over 3,700 ASes distributed across the Internet hierarchy, our proposed heuristics identify 23,914 new AS links not visible in the publicly-available BGP data -- 12.86% more customer-provider links and 40.99% more peering links, than previously reported. We validate our heuristics using data from a tier-1 ISP and show that they correctly filter out all false links introduced by public IP-to-AS mapping. We have made the identified set of links and their inferred relationships publically available.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An accurate Internet topology graph is important in many areas of networking, from deciding ISP business relationships to diagnosing network anomalies. Most Internet mapping efforts have derived the network structure, at the level of interconnected autonomous systems (ASes), from a limited number of either BGP- or traceroute-based data sources. While techniques for charting the topology continue to improve, the growth of the number of vantage points is significantly outpaced by the rapid growth of the Internet. In this paper, we argue that a promising approach to revealing the hidden areas of the Internet topology is through active measurement from an observation platform that scales with the growing Internet. By leveraging measurements performed by an extension to a popular P2P system, we show that this approach indeed exposes significant new topological information. Based on traceroute measurements from more than 992,000 IPs in over 3,700 ASes distributed across the Internet hierarchy, our proposed heuristics identify 23,914 new AS links not visible in the publicly-available BGP data -- 12.86% more customer-provider links and 40.99% more peering links, than previously reported. We validate our heuristics using data from a tier-1 ISP and show that they correctly filter out all false links introduced by public IP-to-AS mapping. We have made the identified set of links and their inferred relationships publically available. |
Ao-Jan Su, David R. Choffnes, Aleksandar Kuzmanovic, Fabián E. Bustamante Drafting Behind Akamai: Inferring Network Conditions Based on CDN Redirections Journal Article In IEEE/ACM Transactions on Networking (ToN), 17 (6), 2009. @article{DBAc, title = {Drafting Behind Akamai: Inferring Network Conditions Based on CDN Redirections}, author = {Ao-Jan Su and David R. Choffnes and Aleksandar Kuzmanovic and Fabián E. Bustamante}, url = {https://ieeexplore.ieee.org/document/5238553}, year = {2009}, date = {2009-12-03}, journal = {In IEEE/ACM Transactions on Networking (ToN)}, volume = {17}, number = {6}, abstract = {To enhance Web browsing experiences, content distribution networks (CDNs) move Web content "closer" to clients by caching copies of Web objects on thousands of servers worldwide. Additionally, to minimize client download times, such systems perform extensive network and server measurements and use them to redirect clients to different servers over short time scales. In this paper, we explore techniques for inferring and exploiting network measurements performed by the largest CDN, Akamai; our objective is to locate and utilize quality Internet paths without performing extensive path probing or monitoring. Our contributions are threefold. First, we conduct a broad measurement study of Akamai's CDN. We probe Akamai's network from 140 PlanetLab (PL) vantage points for two months. We find that Akamai redirection times, while slightly higher than advertised, are sufficiently low to be useful for network control. Second, we empirically show that Akamai redirections overwhelmingly correlate with network latencies on the paths between clients and the Akamai servers. Finally, we illustrate how large-scale overlay networks can exploit Akamai redirections to identify the best detouring nodes for one-hop source routing. Our research shows that in more than 50% of investigated scenarios, it is better to route through the nodes "recommended" by Akamai than to use the direct paths. Because this is not the case for the rest of the scenarios, we develop low-overhead pruning algorithms that avoid Akamai-driven paths when they are not beneficial. Because these Akamai nodes are part of a closed system, we provide a method for mapping Akamai-recommended paths to those in a generic overlay and demonstrate that these one-hop paths indeed outperform direct ones. }, keywords = {}, pubstate = {published}, tppubtype = {article} } To enhance Web browsing experiences, content distribution networks (CDNs) move Web content "closer" to clients by caching copies of Web objects on thousands of servers worldwide. Additionally, to minimize client download times, such systems perform extensive network and server measurements and use them to redirect clients to different servers over short time scales. In this paper, we explore techniques for inferring and exploiting network measurements performed by the largest CDN, Akamai; our objective is to locate and utilize quality Internet paths without performing extensive path probing or monitoring. Our contributions are threefold. First, we conduct a broad measurement study of Akamai's CDN. We probe Akamai's network from 140 PlanetLab (PL) vantage points for two months. We find that Akamai redirection times, while slightly higher than advertised, are sufficiently low to be useful for network control. Second, we empirically show that Akamai redirections overwhelmingly correlate with network latencies on the paths between clients and the Akamai servers. Finally, we illustrate how large-scale overlay networks can exploit Akamai redirections to identify the best detouring nodes for one-hop source routing. Our research shows that in more than 50% of investigated scenarios, it is better to route through the nodes "recommended" by Akamai than to use the direct paths. Because this is not the case for the rest of the scenarios, we develop low-overhead pruning algorithms that avoid Akamai-driven paths when they are not beneficial. Because these Akamai nodes are part of a closed system, we provide a method for mapping Akamai-recommended paths to those in a generic overlay and demonstrate that these one-hop paths indeed outperform direct ones. |
John S. Otto, Fabián E. Bustamante Distributed or Centralized Traffic Advisory Systems -- The Application's Take Journal Article In Proc. of IEEE SECON, 2009. @article{DCTAS, title = {Distributed or Centralized Traffic Advisory Systems -- The Application's Take}, author = {John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/JOtto09SECON.pdf}, year = {2009}, date = {2009-06-06}, journal = {In Proc. of IEEE SECON}, abstract = {We consider the problem of data dissemination in vehicular networks. Our main goal is to compare the application-level performance of fully distributed and centralized data dissemination approaches in the context of traffic advisory systems. Vehicular networks are emerging as a new distributed system environment with myriad promising applications. Wirelessly-connected, GPS-equipped vehicles can be used, for instance, as probes for traffic advisory or pavement condition information services with significant improvements in cost, coverage and accuracy. There is an ongoing discussion on the pros and cons of alternative approaches to data distribution for these applications. Proposed centralized, or infrastructure-based, models rely on road-side equipment to upload information to a central location for later use. Distributed approaches take advantage of the direct exchanges between participating vehicles to achieve higher scalability at the potential cost of data consistency. While distributed solutions can significantly reduce infrastructures' deployment and maintenance costs, it is unclear what the impact of "imprecise" information is to an application or what level of adoption is needed for this model to be effective. This paper investigates the inherent trade-offs in the adoption of distributed or centralized approaches to a traffic advisory service, a commonly proposed application. We based our analysis on a measurements study of signal propagation in urban settings and an extensive simulation-based experimentation in the Chicago road network.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We consider the problem of data dissemination in vehicular networks. Our main goal is to compare the application-level performance of fully distributed and centralized data dissemination approaches in the context of traffic advisory systems. Vehicular networks are emerging as a new distributed system environment with myriad promising applications. Wirelessly-connected, GPS-equipped vehicles can be used, for instance, as probes for traffic advisory or pavement condition information services with significant improvements in cost, coverage and accuracy. There is an ongoing discussion on the pros and cons of alternative approaches to data distribution for these applications. Proposed centralized, or infrastructure-based, models rely on road-side equipment to upload information to a central location for later use. Distributed approaches take advantage of the direct exchanges between participating vehicles to achieve higher scalability at the potential cost of data consistency. While distributed solutions can significantly reduce infrastructures' deployment and maintenance costs, it is unclear what the impact of "imprecise" information is to an application or what level of adoption is needed for this model to be effective. This paper investigates the inherent trade-offs in the adoption of distributed or centralized approaches to a traffic advisory service, a commonly proposed application. We based our analysis on a measurements study of signal propagation in urban settings and an extensive simulation-based experimentation in the Chicago road network. |
John S. Otto, Fabián E. Bustamante, Randall A. Berry Down the Block and Around the Corner -- The Impact of Radio Propagation on Inter-vehicle Wireless Communication Journal Article In Proc. of IEEE International Conference on Distributed Computing Systems (ICDCS), 2009. @article{DBAC, title = {Down the Block and Around the Corner -- The Impact of Radio Propagation on Inter-vehicle Wireless Communication}, author = {John S. Otto and Fabián E. Bustamante and Randall A. Berry}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/JOtto09ICDCS.pdf}, year = {2009}, date = {2009-06-03}, journal = {In Proc. of IEEE International Conference on Distributed Computing Systems (ICDCS)}, abstract = {Vehicular networks are emerging as a new distributed system environment with myriad possible applications. Most studies on vehicular networks are carried out via simulation, given the logistical and economical problems with large-scale deployments. This paper investigates the impact of realistic radio propagation settings on the evaluation of VANET-based systems. Using a set of instrumented cars, we collected IEEE 802.11b signal propagation measurements between vehicles in a variety of urban and suburban environments. We found that signal propagation between vehicles varies in different settings, especially between line-of-sight ("down the block") and non line-of-sight ("around the corner") communication in the same setting. Using a probabilistic shadowing model, we evaluate the impact of different parameter settings on the performance of an epidemic data dissemination protocol and discuss the implications of our findings. We also suggest a variation of a basic signal propagation model that incorporates additional realism without sacrificing scalability by taking advantage of environmental information, including node locations and street information.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Vehicular networks are emerging as a new distributed system environment with myriad possible applications. Most studies on vehicular networks are carried out via simulation, given the logistical and economical problems with large-scale deployments. This paper investigates the impact of realistic radio propagation settings on the evaluation of VANET-based systems. Using a set of instrumented cars, we collected IEEE 802.11b signal propagation measurements between vehicles in a variety of urban and suburban environments. We found that signal propagation between vehicles varies in different settings, especially between line-of-sight ("down the block") and non line-of-sight ("around the corner") communication in the same setting. Using a probabilistic shadowing model, we evaluate the impact of different parameter settings on the performance of an epidemic data dissemination protocol and discuss the implications of our findings. We also suggest a variation of a basic signal propagation model that incorporates additional realism without sacrificing scalability by taking advantage of environmental information, including node locations and street information. |
David R. Choffnes, Fabián E. Bustamante On the Effectiveness of Measurement Reuse for Performance-Based Detouring Journal Article In Proc. of IEEE INFOCOM, 2009. @article{EMRPBD, title = {On the Effectiveness of Measurement Reuse for Performance-Based Detouring}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes09Infocom.pdf}, year = {2009}, date = {2009-04-03}, journal = {In Proc. of IEEE INFOCOM}, abstract = {For both technological and economic reasons, the default path between two end systems in the wide-area Internet can be suboptimal. This observation has motivated a number of systems that attempt to improve reliability and performance by routing over one or more hops in an overlay. Most of the proposed solutions, however, fall at an extreme in the cost-performance trade-off. While some provide near-optimal performance with an unscalable measurement overhead, others avoid measurement when selecting routes around network failures but make no attempt to optimize performance. This paper presents an experimental evaluation of an alternative approach to scalable, performance detouring based on the strategic reuse of measurements from other large-scale distributed systems, namely content distribution networks (CDN). By relying on CDN redirections as hints on network conditions, higher performance paths are readily found with little overhead and no active network measurement. We report results from a study of more than 13,700 paths between 170 widely-distributed hosts over a three-week period, showing the advantages of this approach. We demonstrate the practicality of our approach by implementing an FTP suite that uses our publicly available SideStep library to take advantage of these improved Internet routes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } For both technological and economic reasons, the default path between two end systems in the wide-area Internet can be suboptimal. This observation has motivated a number of systems that attempt to improve reliability and performance by routing over one or more hops in an overlay. Most of the proposed solutions, however, fall at an extreme in the cost-performance trade-off. While some provide near-optimal performance with an unscalable measurement overhead, others avoid measurement when selecting routes around network failures but make no attempt to optimize performance. This paper presents an experimental evaluation of an alternative approach to scalable, performance detouring based on the strategic reuse of measurements from other large-scale distributed systems, namely content distribution networks (CDN). By relying on CDN redirections as hints on network conditions, higher performance paths are readily found with little overhead and no active network measurement. We report results from a study of more than 13,700 paths between 170 widely-distributed hosts over a three-week period, showing the advantages of this approach. We demonstrate the practicality of our approach by implementing an FTP suite that uses our publicly available SideStep library to take advantage of these improved Internet routes. |
2008 |
Yi Qiao, Dong Lu, Fabián E. Bustamante, Peter Dinda, Stefan Birrer Improving Peer-to-Peer Performance Through Server-Side Scheduling Journal Article In ACM Transactions on Computer Systems (TOCS), 26 (4), 2008. @article{P2PPSSS, title = { Improving Peer-to-Peer Performance Through Server-Side Scheduling}, author = {Yi Qiao and Dong Lu and Fabián E. Bustamante and Peter Dinda and Stefan Birrer}, url = {https://dl.acm.org/citation.cfm?id=1455260}, year = {2008}, date = {2008-12-03}, journal = {In ACM Transactions on Computer Systems (TOCS)}, volume = {26}, number = {4}, abstract = {We show how to significantly improve the mean response time seen by both uploaders and downloaders in peer-to-peer data-sharing systems. Our work is motivated by the observation that response times are largely determined by the performance of the peers serving the requested objects, that is, by the peers in their capacity as servers. With this in mind, we take a close look at this server side of peers, characterizing its workload by collecting and examining an extensive set of traces. Using trace-driven simulation, we demonstrate the promise and potential problems with scheduling policies based on shortest-remaining-processing-time (SRPT), the algorithm known to be optimal for minimizing mean response time. The key challenge to using SRPT in this context is determining request service times. In addressing this challenge, we introduce two new estimators that enable predictive SRPT scheduling policies that closely approach the performance of ideal SRPT. We evaluate our approach through extensive single-server and system-level simulation coupled with real Internet deployment and experimentation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We show how to significantly improve the mean response time seen by both uploaders and downloaders in peer-to-peer data-sharing systems. Our work is motivated by the observation that response times are largely determined by the performance of the peers serving the requested objects, that is, by the peers in their capacity as servers. With this in mind, we take a close look at this server side of peers, characterizing its workload by collecting and examining an extensive set of traces. Using trace-driven simulation, we demonstrate the promise and potential problems with scheduling policies based on shortest-remaining-processing-time (SRPT), the algorithm known to be optimal for minimizing mean response time. The key challenge to using SRPT in this context is determining request service times. In addressing this challenge, we introduce two new estimators that enable predictive SRPT scheduling policies that closely approach the performance of ideal SRPT. We evaluate our approach through extensive single-server and system-level simulation coupled with real Internet deployment and experimentation. |
David R. Choffnes, Fabián E. Bustamante Taming the Torrent: A practical approach to reducing cross-ISP traffic in P2P systems Journal Article In Proc. of ACM SIGCOMM, 2008. @article{TTP2P, title = {Taming the Torrent: A practical approach to reducing cross-ISP traffic in P2P systems}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes08Sigcomm.pdf}, year = {2008}, date = {2008-08-03}, journal = {In Proc. of ACM SIGCOMM}, abstract = {Peer-to-peer (P2P) systems, which provide a variety of popular services, such as file sharing, video streaming and voice-over-IP, contribute a significant portion of today's Internet traffic. By building overlay networks that are oblivious to the underlying Internet topology and routing, these systems have become one of the greatest trafficengineering challenges for Internet Service Providers (ISPs) and the source of costly data traffic flows. In an attempt to reduce these operational costs, ISPs have tried to shape, block or otherwise limit P2P traffic, much to the chagrin of their subscribers, who consistently finds ways to eschew these controls or simply switch providers. In this paper, we present the design, deployment and evaluation of an approach to reducing this costly cross- ISP traffic without sacrificing system performance. Our approach recycles network views gathered at low cost from content distribution networks to drive biased neighbor selection without any path monitoring or probing. Using results collected from a deployment in BitTorrent with over 120,000 users in nearly 3,000 networks, we show that our lightweight approach significantly reduces cross-ISP traffic and over 33% of the time it selects peers along paths that are within a single autonomous system (AS). Further, we find that our system locates peers along paths that have two orders of magnitude lower latency and 30% lower loss rates than those picked at random, and that these highquality paths can lead to significant improvements in transfer rates. In challenged settings where peers are overloaded in terms of available bandwidth, our approach provides 31% average download-rate improvement; in environments with large available bandwidth, it increases download rates by 207% on average (and improves median rates by 883%). DATA SET As we state in the paper, data used for this study will be made available upon request to edgescope@aqua-lab.org. For privacy reasons, the data is provided at an AS-level granularity. Note that you will have to agree to these terms before we grant access to the data. Also note that the dataset consists of 10s of GB of compressed data, so plan accordingly. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Peer-to-peer (P2P) systems, which provide a variety of popular services, such as file sharing, video streaming and voice-over-IP, contribute a significant portion of today's Internet traffic. By building overlay networks that are oblivious to the underlying Internet topology and routing, these systems have become one of the greatest trafficengineering challenges for Internet Service Providers (ISPs) and the source of costly data traffic flows. In an attempt to reduce these operational costs, ISPs have tried to shape, block or otherwise limit P2P traffic, much to the chagrin of their subscribers, who consistently finds ways to eschew these controls or simply switch providers. In this paper, we present the design, deployment and evaluation of an approach to reducing this costly cross- ISP traffic without sacrificing system performance. Our approach recycles network views gathered at low cost from content distribution networks to drive biased neighbor selection without any path monitoring or probing. Using results collected from a deployment in BitTorrent with over 120,000 users in nearly 3,000 networks, we show that our lightweight approach significantly reduces cross-ISP traffic and over 33% of the time it selects peers along paths that are within a single autonomous system (AS). Further, we find that our system locates peers along paths that have two orders of magnitude lower latency and 30% lower loss rates than those picked at random, and that these highquality paths can lead to significant improvements in transfer rates. In challenged settings where peers are overloaded in terms of available bandwidth, our approach provides 31% average download-rate improvement; in environments with large available bandwidth, it increases download rates by 207% on average (and improves median rates by 883%). DATA SET As we state in the paper, data used for this study will be made available upon request to edgescope@aqua-lab.org. For privacy reasons, the data is provided at an AS-level granularity. Note that you will have to agree to these terms before we grant access to the data. Also note that the dataset consists of 10s of GB of compressed data, so plan accordingly. |
Ao-Jan Su, David R. Choffnes, Fabián E. Bustamante, Aleksandar Kuzmanovic Relative Network Positioning via CDN Redirections Journal Article In Proc. of the International Conference on Distributed Computing Systems (ICDCS), 2008. @article{RNPCDNR, title = {Relative Network Positioning via CDN Redirections}, author = {Ao-Jan Su and David R. Choffnes and Fabián E. Bustamante and Aleksandar Kuzmanovic}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/AJSu08CRP.pdf}, year = {2008}, date = {2008-06-06}, journal = { In Proc. of the International Conference on Distributed Computing Systems (ICDCS)}, abstract = {Many large-scale distributed systems can benefit from a service that allows them to select among alternative nodes based on their relative network positions. A variety of approaches propose new measurement infrastructures that attempt to scale this service to large numbers of nodes by reducing the amount of direct measurements to end hosts. In this paper, we introduce a new approach to relative network positioning that eliminates direct probing by leveraging pre-existing infrastructure. Specifically, we exploit the dynamic association of nodes with replica servers from large content distribution networks (CDNs) to determine relative position information -- we call this approach CDN-based Relative network Positioning (CRP). We demonstrate how CRP can support two common examples of location information used by distributed applications: server selection and dynamic node clustering. After describing CRP in detail, we present results from an extensive wide-area evaluation that demonstrates its effectiveness.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Many large-scale distributed systems can benefit from a service that allows them to select among alternative nodes based on their relative network positions. A variety of approaches propose new measurement infrastructures that attempt to scale this service to large numbers of nodes by reducing the amount of direct measurements to end hosts. In this paper, we introduce a new approach to relative network positioning that eliminates direct probing by leveraging pre-existing infrastructure. Specifically, we exploit the dynamic association of nodes with replica servers from large content distribution networks (CDNs) to determine relative position information -- we call this approach CDN-based Relative network Positioning (CRP). We demonstrate how CRP can support two common examples of location information used by distributed applications: server selection and dynamic node clustering. After describing CRP in detail, we present results from an extensive wide-area evaluation that demonstrates its effectiveness. |
Fabián E. Bustamante, Yi Qiao Designing Less-structured P2P Systems for the Expected High Churn Journal Article In IEEE/ACM Transactions on Networking, (ToN), 16 (3), 2008. @article{DesigningLess, title = {Designing Less-structured P2P Systems for the Expected High Churn}, author = {Fabián E. Bustamante and Yi Qiao}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/BustamanteTON07.pdf}, year = {2008}, date = {2008-06-03}, journal = {In IEEE/ACM Transactions on Networking, (ToN)}, volume = {16}, number = {3}, abstract = {We address the problem of highly transient populations in unstructured and loosely-structured peer-to-peer systems. We propose a number of illustrative query-related strategies and organizational protocols that, by taking into consideration the expected session times of peers (their lifespans), yield systems with performance characteristics more resilient to the natural instability of their environments. We first demonstrate the benefits of lifespan-based organizational protocols in terms of end-application performance and in the context of dynamic and heterogeneous Internet environments. We do this using a number of currently adopted and proposed query-related strategies, including methods for query distribution, caching and replication. We then show, through trace-driven simulation and wide-area experimentation, the performance advantages of lifespan-based, query-related strategies when layered over currently employed and lifespan-based organizational protocols. While merely illustrative, the evaluated strategies and protocols clearly demonstrate the advantages of considering peers' session time in designing widely-deployed peer-to-peer systems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We address the problem of highly transient populations in unstructured and loosely-structured peer-to-peer systems. We propose a number of illustrative query-related strategies and organizational protocols that, by taking into consideration the expected session times of peers (their lifespans), yield systems with performance characteristics more resilient to the natural instability of their environments. We first demonstrate the benefits of lifespan-based organizational protocols in terms of end-application performance and in the context of dynamic and heterogeneous Internet environments. We do this using a number of currently adopted and proposed query-related strategies, including methods for query distribution, caching and replication. We then show, through trace-driven simulation and wide-area experimentation, the performance advantages of lifespan-based, query-related strategies when layered over currently employed and lifespan-based organizational protocols. While merely illustrative, the evaluated strategies and protocols clearly demonstrate the advantages of considering peers' session time in designing widely-deployed peer-to-peer systems. |
2007 |
Stefan Birrer, Fabián E. Bustamante A Comparison of Resilient Overlay Multicast Approaches Journal Article n IEEE Journal on Selected Areas in Communications (JSAC) -- Special Issue on Advances in Peer-to-Peer Streaming Systems, 25 (9), 2007. @article{CROMA, title = {A Comparison of Resilient Overlay Multicast Approaches}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrerJSAC07.pdf}, year = {2007}, date = {2007-12-03}, journal = {n IEEE Journal on Selected Areas in Communications (JSAC) -- Special Issue on Advances in Peer-to-Peer Streaming Systems}, volume = {25}, number = {9}, abstract = {Overlay-based multicast has been proposed as a key alternative for large-scale group communication. There is ample motivation for such an approach, as it delivers the scalability advantages of multicast while avoiding the deployment issues of a network-level solution. As multicast functionality is pushed to autonomous, unpredictable end systems, however, significant performance loss can result from their higher degree of transiency when compared to routers. Consequently, a number of techniques have recently been proposed to improve overlays' resilience by exploiting path diversity and minimizing node dependencies. Delivering high application performance at relatively low costs and under high degree of transiency has proven to be a difficult task. Each of the proposed resilient techniques comes with a different trade-off in terms of delivery ratio, end-to-end latency and additional network traffic. In this paper, we review some of these approaches and evaluate their effectiveness by contrasting the performance and associated cost of representative protocols through simulation and wide area experimentation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Overlay-based multicast has been proposed as a key alternative for large-scale group communication. There is ample motivation for such an approach, as it delivers the scalability advantages of multicast while avoiding the deployment issues of a network-level solution. As multicast functionality is pushed to autonomous, unpredictable end systems, however, significant performance loss can result from their higher degree of transiency when compared to routers. Consequently, a number of techniques have recently been proposed to improve overlays' resilience by exploiting path diversity and minimizing node dependencies. Delivering high application performance at relatively low costs and under high degree of transiency has proven to be a difficult task. Each of the proposed resilient techniques comes with a different trade-off in terms of delivery ratio, end-to-end latency and additional network traffic. In this paper, we review some of these approaches and evaluate their effectiveness by contrasting the performance and associated cost of representative protocols through simulation and wide area experimentation. |
Jack Lange, Peter Dinda, Fabián E. Bustamante Vortex: Enabling Cooperative Selective Wormholing for Network Security Systems Journal Article In Proc. of 10th International Symposium on Recent Advances in Intrusion Detection , 2007. @article{Vortex, title = {Vortex: Enabling Cooperative Selective Wormholing for Network Security Systems}, author = {Jack Lange and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/JLange07VRTX.pdf}, year = {2007}, date = {2007-09-03}, journal = {In Proc. of 10th International Symposium on Recent Advances in Intrusion Detection }, abstract = {We present a novel approach to remote traffic aggregation for Network Intrusion Detection Systems (NIDS) called Cooperative Selective Wormholing (CSW). Our approach works by selectively aggregating traffic bound for unused network ports on a volunteer’s commodity PC. CSW could enable NIDS operators to cheaply and efficiently monitor large distributed portions of the Internet, something they are currently incapable of. Based on a study of several hundred hosts in a university network, we posit that there is sufficient heterogeneity in hosts’ network service configurations to achieve a high degree of network coverage by re-using unused port space on client machines. We demonstrate Vortex, a proof-of-concept CSW implementation that runs on a wide range of commodity PCs (Unix and Windows). Our experiments show that Vortex can selectively aggregate traffic to a virtual machine backend, effectively allowing two machines to share the same IP address transparently. We close with a discussion of the basic requirements for a large-scale CSW deployment.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present a novel approach to remote traffic aggregation for Network Intrusion Detection Systems (NIDS) called Cooperative Selective Wormholing (CSW). Our approach works by selectively aggregating traffic bound for unused network ports on a volunteer’s commodity PC. CSW could enable NIDS operators to cheaply and efficiently monitor large distributed portions of the Internet, something they are currently incapable of. Based on a study of several hundred hosts in a university network, we posit that there is sufficient heterogeneity in hosts’ network service configurations to achieve a high degree of network coverage by re-using unused port space on client machines. We demonstrate Vortex, a proof-of-concept CSW implementation that runs on a wide range of commodity PCs (Unix and Windows). Our experiments show that Vortex can selectively aggregate traffic to a virtual machine backend, effectively allowing two machines to share the same IP address transparently. We close with a discussion of the basic requirements for a large-scale CSW deployment. |
David R. Choffnes, Fabián E. Bustamante Exploiting Emergent Behavior for Inter-Vehicle Communication Journal Article In Proc. of 2nd International Workshop on Hot Topics in Autonomic Computing, 2007. @article{EEBIVC, title = { Exploiting Emergent Behavior for Inter-Vehicle Communication}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes07EEB.pdf}, year = {2007}, date = {2007-06-03}, journal = {In Proc. of 2nd International Workshop on Hot Topics in Autonomic Computing}, abstract = {We introduce Virtual Ferry Networking (VFN), a novel approach to data dissemination services on mobile adhoc networks. VFN exploits the emergent patterns of vehicles’mobility to buffer and carry messages when immediately forwarding those messages would fail. Instead of depending on a fixed, small set of vehicles and paths for ferrying messages, VFN allows any vehicle moving along part of a virtual route to become a possible carrier for messages. VFN helps address many of the challenges with supporting distributed applications in challenging ad-hoc vehicular networks with rapidly changing topologies, fast-moving vehicles and signal-weakening obstructions such as bridges and buildings. We discuss the challenges with implementing VFN and present evaluation results from an early prototype.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We introduce Virtual Ferry Networking (VFN), a novel approach to data dissemination services on mobile adhoc networks. VFN exploits the emergent patterns of vehicles’mobility to buffer and carry messages when immediately forwarding those messages would fail. Instead of depending on a fixed, small set of vehicles and paths for ferrying messages, VFN allows any vehicle moving along part of a virtual route to become a possible carrier for messages. VFN helps address many of the challenges with supporting distributed applications in challenging ad-hoc vehicular networks with rapidly changing topologies, fast-moving vehicles and signal-weakening obstructions such as bridges and buildings. We discuss the challenges with implementing VFN and present evaluation results from an early prototype. |
Guohan Lu, Yan Chen, Stefan Birrer, Fabián E. Bustamante, Chin Yin Cheung, Xing Li End-to-end Inference of Router Packet Forwarding Priority Journal Article In Proc. of IEEE INFOCOM, 2007. @article{E2EIRPFP, title = {End-to-end Inference of Router Packet Forwarding Priority}, author = {Guohan Lu and Yan Chen and Stefan Birrer and Fabián E. Bustamante and Chin Yin Cheung and Xing Li}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/GLu07POPI.pdf}, year = {2007}, date = {2007-05-03}, journal = { In Proc. of IEEE INFOCOM}, abstract = {Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network administrators. PFP can have a significant impact on the performance of applications, the accuracy of measurement tools’ results and the effectiveness of network troubleshooting procedures. Despite their potential impact, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for packet forwarding priority inference and its associated tool, POPI. This is the first attempt to infer router packetforwarding priority through end-to-end measurement. Our POPI tool enables users to discover such network policies through the monitoring and rank classification of loss rates for different packet types. We validated our approach via statistical analysis, simulation, and wide-area experimentation in PlanetLab. As part of our wide-area experiments, we employed POPI to analyze 156 random paths across 162 PlanetLab nodes. We discovered 15 paths flagged with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them confirming our inferences. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network administrators. PFP can have a significant impact on the performance of applications, the accuracy of measurement tools’ results and the effectiveness of network troubleshooting procedures. Despite their potential impact, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for packet forwarding priority inference and its associated tool, POPI. This is the first attempt to infer router packetforwarding priority through end-to-end measurement. Our POPI tool enables users to discover such network policies through the monitoring and rank classification of loss rates for different packet types. We validated our approach via statistical analysis, simulation, and wide-area experimentation in PlanetLab. As part of our wide-area experiments, we employed POPI to analyze 156 random paths across 162 PlanetLab nodes. We discovered 15 paths flagged with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them confirming our inferences. |
2006 |
Stefan Birrer, Fabián E. Bustamante Resilience in Overlay Multicast Protocols Journal Article In Proc. of the 14th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems, 2006. @article{ROMP, title = {Resilience in Overlay Multicast Protocols}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer06ROMP.pdf}, year = {2006}, date = {2006-10-03}, journal = {In Proc. of the 14th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems}, abstract = {One of the most important challenges of self-organized, overlay systems for large-scale group communication lies in these systems ability to handle the high degree of transiency inherent to their environment. While a number of resilient protocols and techniques have been recently proposed, achieving high delivery ratios without sacrificing end-to-end latencies or incurring significant additional costs has proven to be a difficult task. In this paper we review some of these approaches and experimentally evaluate their effectiveness by contrasting their performance and associated cost through simulation and wide-area experimentation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } One of the most important challenges of self-organized, overlay systems for large-scale group communication lies in these systems ability to handle the high degree of transiency inherent to their environment. While a number of resilient protocols and techniques have been recently proposed, achieving high delivery ratios without sacrificing end-to-end latencies or incurring significant additional costs has proven to be a difficult task. In this paper we review some of these approaches and experimentally evaluate their effectiveness by contrasting their performance and associated cost through simulation and wide-area experimentation. |
Ao-Jan Su, David R. Choffnes, Alekzandar Kuzmanovic, Fabián E. Bustamante Drafting Behind Akamai (Travelocity-Based Detouring) Journal Article In Proc. of ACM SIGCOMM 2006, 2006. @article{DBA, title = { Drafting Behind Akamai (Travelocity-Based Detouring)}, author = {Ao-Jan Su and David R. Choffnes and Alekzandar Kuzmanovic and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/Ajsu06DBA.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/Ajsu06DBA.ppt}, year = {2006}, date = {2006-09-03}, journal = { In Proc. of ACM SIGCOMM 2006}, abstract = {To enhance web browsing experiences, content distribution networks (CDNs) move web content closer to clients by caching copies of web objects on thousands of servers worldwide. Additionally, to minimize client download times, such systems perform extensive network and server measurements, and use them to redirect clients to different servers over short time scales. In this paper, we explore techniques for inferring and exploiting network measurements performed by the largest CDN, Akamai; our objective is to locate and utilize quality Internet paths without performing extensive path probing or monitoring. Our contributions are threefold. First, we conduct a broad measurement study of Akamai's CDN. We probe Akamai's network from 140 PlanetLab vantage points for two months. We find that Akamai redirection times, while slightly higher than advertised, are sufficiently low to be useful for network control. Second, we empirically show that Akamai redirections overwhelmingly correlate with network latencies on the paths between clients and the Akamai servers. Finally, we illustrate how large-scale overlay networks can exploit Akamai redirections to identify the best detouring nodes for one-hop source routing. Our research shows that in more than 50%of investigated scenarios, it is better to route through the nodes recommended by Akamai, than to use the direct paths. Because this is not the case for the rest of the scenarios, we develop low overhead pruning algorithms that avoid Akamai-driven paths when they are not beneficial.}, keywords = {}, pubstate = {published}, tppubtype = {article} } To enhance web browsing experiences, content distribution networks (CDNs) move web content closer to clients by caching copies of web objects on thousands of servers worldwide. Additionally, to minimize client download times, such systems perform extensive network and server measurements, and use them to redirect clients to different servers over short time scales. In this paper, we explore techniques for inferring and exploiting network measurements performed by the largest CDN, Akamai; our objective is to locate and utilize quality Internet paths without performing extensive path probing or monitoring. Our contributions are threefold. First, we conduct a broad measurement study of Akamai's CDN. We probe Akamai's network from 140 PlanetLab vantage points for two months. We find that Akamai redirection times, while slightly higher than advertised, are sufficiently low to be useful for network control. Second, we empirically show that Akamai redirections overwhelmingly correlate with network latencies on the paths between clients and the Akamai servers. Finally, we illustrate how large-scale overlay networks can exploit Akamai redirections to identify the best detouring nodes for one-hop source routing. Our research shows that in more than 50%of investigated scenarios, it is better to route through the nodes recommended by Akamai, than to use the direct paths. Because this is not the case for the rest of the scenarios, we develop low overhead pruning algorithms that avoid Akamai-driven paths when they are not beneficial. |
Yi Qiao, Fabián E. Bustamante Structured and Unstructured Overlays Under the Microscope - A Measurement-based View of Two P2P Systems That People Use Journal Article In Proc. of the 2006 USENIX Annual Technical Confrence, 2006. @article{SUOUM, title = {Structured and Unstructured Overlays Under the Microscope - A Measurement-based View of Two P2P Systems That People Use}, author = {Yi Qiao and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao06SUO.pdf}, year = {2006}, date = {2006-06-03}, journal = {In Proc. of the 2006 USENIX Annual Technical Confrence}, abstract = {Existing peer-to-peer systems rely on overlay network protocols for object storage and retrieval and message routing. These overlay protocols can be broadly classified as structured and unstructured -- structured overlays impose constraints on the network topology for efficient object discovery, while unstructured overlays organize nodes in a random graph topology that is arguably more resilient to peer population transiency. There is an ongoing discussion on the pros and cons of both approaches. This paper contributes to the discussion a multiple-site, measurement-based study of two operational and widely-deployed file-sharing systems. The two protocols are evaluated in terms of resilience, message overhead, and query performance. We validate our findings and further extend our conclusions through detailed analysis and simulation experiments.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Existing peer-to-peer systems rely on overlay network protocols for object storage and retrieval and message routing. These overlay protocols can be broadly classified as structured and unstructured -- structured overlays impose constraints on the network topology for efficient object discovery, while unstructured overlays organize nodes in a random graph topology that is arguably more resilient to peer population transiency. There is an ongoing discussion on the pros and cons of both approaches. This paper contributes to the discussion a multiple-site, measurement-based study of two operational and widely-deployed file-sharing systems. The two protocols are evaluated in terms of resilience, message overhead, and query performance. We validate our findings and further extend our conclusions through detailed analysis and simulation experiments. |
Greg Eisenhauer, Fabián Bustamante, Karsten Schwan Publish-subscribe for high-performance computing Journal Article IEEE Internet Computing - Special Asynchronous Middleware and Services, 10(1): 8-25 , 2006. @article{PSHPC, title = {Publish-subscribe for high-performance computing}, author = {Greg Eisenhauer and Fabián Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Eisenhauer06PSHP.pdf}, year = {2006}, date = {2006-01-03}, journal = {IEEE Internet Computing - Special Asynchronous Middleware and Services}, volume = {10(1): 8-25}, abstract = {High-performance computing could significantly benefit from publish-subscribe communication, but current systems don't deliver the kind of performance required by applications in that domain. In response, the authors developed Echo, a high-performance event-delivery middleware designed to scale to the data rates typically found in grid environments.This article provides an overview of Echo, the infrastructure on which it's built, and the techniques used to implement it.}, keywords = {}, pubstate = {published}, tppubtype = {article} } High-performance computing could significantly benefit from publish-subscribe communication, but current systems don't deliver the kind of performance required by applications in that domain. In response, the authors developed Echo, a high-performance event-delivery middleware designed to scale to the data rates typically found in grid environments.This article provides an overview of Echo, the infrastructure on which it's built, and the techniques used to implement it. |
2005 |
David R. Choffnes, Fabián E. Bustamante An Integrated Mobility and Traffic Model for Vehicular Wireless Networks Journal Article In Proc. of the 2nd ACM International Workshop on Vehicular Ad Hoc Networks, 2005. @article{IMTMVMN, title = {An Integrated Mobility and Traffic Model for Vehicular Wireless Networks}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes05vanet.pdf }, year = {2005}, date = {2005-12-03}, journal = {In Proc. of the 2nd ACM International Workshop on Vehicular Ad Hoc Networks}, abstract = {Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring and vehicular safety applications. When evaluating application performance in simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This paper analyzes ad-hoc wireless network performance in a vehicular network in which nodes move according to a simplified vehicular traffic model on roads defined by real map data. We show that when nodes move according to our street mobility model, STRAW, network performance is significantly different from that of the commonly used random waypoint model. We also demonstrate that protocol performance varies with the type of urban environment. Finally, we use these results to argue for the development of integrated vehicular and network traffic simulators to evaluate vehicular ad-hoc network applications, particularly when the information passed through the network affects node mobility. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring and vehicular safety applications. When evaluating application performance in simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This paper analyzes ad-hoc wireless network performance in a vehicular network in which nodes move according to a simplified vehicular traffic model on roads defined by real map data. We show that when nodes move according to our street mobility model, STRAW, network performance is significantly different from that of the commonly used random waypoint model. We also demonstrate that protocol performance varies with the type of urban environment. Finally, we use these results to argue for the development of integrated vehicular and network traffic simulators to evaluate vehicular ad-hoc network applications, particularly when the information passed through the network affects node mobility. |
Stefan Birrer, Fabián E. Bustamante The Feasibility of DHT-based Streaming Multicast Journal Article In Proc. of the 13th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems, 2005. @article{FDHTSM, title = {The Feasibility of DHT-based Streaming Multicast}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05FDSM.pdf}, year = {2005}, date = {2005-11-03}, journal = { In Proc. of the 13th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems}, abstract = {We explore the feasibility of streaming applications over DHT-based substrates. In particular, we focus our study on the implications of bandwidth heterogeneity and transiency, both characteristic of these systems' target environment. Our discussion is grounded on an initial evaluation of SplitStream, a representative DHT-based cooperative multicast system.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We explore the feasibility of streaming applications over DHT-based substrates. In particular, we focus our study on the implications of bandwidth heterogeneity and transiency, both characteristic of these systems' target environment. Our discussion is grounded on an initial evaluation of SplitStream, a representative DHT-based cooperative multicast system. |
Yi Qiao, Fabián E. Bustamante Elders Know Best - Handling Churn in Less Structured P2P Systems Journal Article In Proc. of the Fifth IEEE International Conference on Peer-to-Peer Computing, 2005. @article{EKBb, title = {Elders Know Best - Handling Churn in Less Structured P2P Systems}, author = {Yi Qiao and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao05EKB.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao05EKB.pps}, year = {2005}, date = {2005-10-03}, journal = {In Proc. of the Fifth IEEE International Conference on Peer-to-Peer Computing}, abstract = {We address the problem of highly transient populations in unstructured and loosely-structured peer-to-peer systems. We propose a number of illustrative query-related strategies and organizational protocols that, by taking into consideration the expected session times of peers (their lifespans), yield systems with performance characteristics more resilient to the natural instability of their environments. We first demonstrate the benefits of lifespan-based organizational protocols in terms of end-application performance and in the context of dynamic and heterogeneous Internet environments. We do this using a number of currently adopted and proposed query-related strategies, including methods for query distribution, caching and replication. We then show, through trace-driven simulation and wide-area experimentation, the performance advantages of lifespan-based, query-related strategies when layered over currently employed and lifespan-based organizational protocols. While merely illustrative, the evaluated strategies and protocols clearly demonstrate the advantages of considering peers' session time in designing widely-deployed peer-to-peer systems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We address the problem of highly transient populations in unstructured and loosely-structured peer-to-peer systems. We propose a number of illustrative query-related strategies and organizational protocols that, by taking into consideration the expected session times of peers (their lifespans), yield systems with performance characteristics more resilient to the natural instability of their environments. We first demonstrate the benefits of lifespan-based organizational protocols in terms of end-application performance and in the context of dynamic and heterogeneous Internet environments. We do this using a number of currently adopted and proposed query-related strategies, including methods for query distribution, caching and replication. We then show, through trace-driven simulation and wide-area experimentation, the performance advantages of lifespan-based, query-related strategies when layered over currently employed and lifespan-based organizational protocols. While merely illustrative, the evaluated strategies and protocols clearly demonstrate the advantages of considering peers' session time in designing widely-deployed peer-to-peer systems. |
Stefan Birrer, Fabián E. Bustamante Magellan: Performance-based, Cooperative Multicast Journal Article In Proc. of the Tenth International Workshop on Web Content Caching and Distribution, 2005. @article{Magellan, title = {Magellan: Performance-based, Cooperative Multicast}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05MPCM.pdf}, year = {2005}, date = {2005-09-03}, journal = {In Proc. of the Tenth International Workshop on Web Content Caching and Distribution}, abstract = {Among the proposed overlay multicast protocols, tree-based systems have proven to be highly scalable and efficient in terms of physical link stress and end-to-end latency. Conventional tree-based protocols, however, distribute the forwarding load unevenly among the participating peers. An effective approach for addressing this problem is to stripe the multicast content across a forest of disjoint trees, evenly sharing the forwarding responsibility among participants. DHTs seem to be naturally well suited for the task, as they are able to leverage the inherent properties of their routing model in building such a forest. In heterogeneous environments, though, DHT-based schemes for tree (and forest) construction may yield deep, unbalanced structures with potentially large delivery latencies. This paper introduces Magellan, a new overlay multicast protocol we have built to explore the tradeoff between fairness and performance in these environments. Magellan builds a data-distribution forest out of multiple performance-centric, balanced trees. It assigns every peer in the system a primary tree with priority over the peer's resources. The peers' spare resources are then made available to secondary trees. In this manner, Magellan achieves fairness, ensuring that every participating peer contributes resources to the system. By employing a balanced distribution tree with O(log N)-bounded, end-to-end hop-distance, Magellan also provides high delivery ratio with comparable low latency. Preliminary simulation results show the advantage of this approach.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Among the proposed overlay multicast protocols, tree-based systems have proven to be highly scalable and efficient in terms of physical link stress and end-to-end latency. Conventional tree-based protocols, however, distribute the forwarding load unevenly among the participating peers. An effective approach for addressing this problem is to stripe the multicast content across a forest of disjoint trees, evenly sharing the forwarding responsibility among participants. DHTs seem to be naturally well suited for the task, as they are able to leverage the inherent properties of their routing model in building such a forest. In heterogeneous environments, though, DHT-based schemes for tree (and forest) construction may yield deep, unbalanced structures with potentially large delivery latencies. This paper introduces Magellan, a new overlay multicast protocol we have built to explore the tradeoff between fairness and performance in these environments. Magellan builds a data-distribution forest out of multiple performance-centric, balanced trees. It assigns every peer in the system a primary tree with priority over the peer's resources. The peers' spare resources are then made available to secondary trees. In this manner, Magellan achieves fairness, ensuring that every participating peer contributes resources to the system. By employing a balanced distribution tree with O(log N)-bounded, end-to-end hop-distance, Magellan also provides high delivery ratio with comparable low latency. Preliminary simulation results show the advantage of this approach. |
Ashish Gupta, Peter Dinda, Fabián E. Bustamante Distributed Popularity Indices Journal Article Poster in Proc. of ACM SIGCOMM, 2005. BibTeX | Links: @article{DPI, title = {Distributed Popularity Indices}, author = {Ashish Gupta, Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/AGupta05DPI.pdf}, year = {2005}, date = {2005-08-03}, journal = {Poster in Proc. of ACM SIGCOMM}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Dong Lu, Yi Qiao, Peter Dinda, Fabián E. Bustamante Characterizing and Predicting TCP Throughput on the Wide Area Network Journal Article In Proc. of the 25th IEEE International Conference on Distributed Computing Systems, 2005. @article{TTWAN, title = {Characterizing and Predicting TCP Throughput on the Wide Area Network}, author = {Dong Lu and Yi Qiao and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Donglu05ICDCS.pdf}, year = {2005}, date = {2005-06-03}, journal = {In Proc. of the 25th IEEE International Conference on Distributed Computing Systems}, abstract = {DualPats exploits the strong correlation between TCP throughput and flow size, and the statistical stability of Internet path characteristics to accurately predict the TCP throughput of large transfers using active probing. We propose additional mechanisms to explain the correlation, and then analyze why traditional TCP benchmarking fails to predict the throughput of large transfers well. We characterize stability and develop a dynamic sampling rate adjustment algorithm so that we probe a path based on its stability. Our analysis, design, and evaluation is based on a large-scale measurement study.}, keywords = {}, pubstate = {published}, tppubtype = {article} } DualPats exploits the strong correlation between TCP throughput and flow size, and the statistical stability of Internet path characteristics to accurately predict the TCP throughput of large transfers using active probing. We propose additional mechanisms to explain the correlation, and then analyze why traditional TCP benchmarking fails to predict the throughput of large transfers well. We characterize stability and develop a dynamic sampling rate adjustment algorithm so that we probe a path based on its stability. Our analysis, design, and evaluation is based on a large-scale measurement study. |
Stefan Birrer, Fabián E. Bustamante, Dong Lu, Peter Dinda, Yi Qiao FatNemo: Multi-Source Multicast Overlay Fat-Tree Journal Article Poster in Proc. of the Second Symposium on Networked Systems Design & Implementation, 2005. @article{FatNemob, title = {FatNemo: Multi-Source Multicast Overlay Fat-Tree}, author = {Stefan Birrer and Fabián E. Bustamante and Dong Lu and Peter Dinda and Yi Qiao}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05NSDI.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi05poster.pdf}, year = {2005}, date = {2005-05-03}, journal = {Poster in Proc. of the Second Symposium on Networked Systems Design & Implementation}, abstract = {This poster presents the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We describes FatNemo, a novel overlay multi-source multicast protocol based on this idea, and present early experimental and analytical results showing the advatanges of this approach. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This poster presents the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We describes FatNemo, a novel overlay multi-source multicast protocol based on this idea, and present early experimental and analytical results showing the advatanges of this approach. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures. |
Ashish Gupta, Manan Sanghi, Peter Dinda, Fabián E. Bustamante Magnolia: A novel DHT architecture for keyword-based searching Journal Article In Proc. of the Second Symposium on Networked Systems Design & Implementation , 2005. BibTeX | Links: @article{Magnolia, title = {Magnolia: A novel DHT architecture for keyword-based searching}, author = {Ashish Gupta and Manan Sanghi and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Gupta05NSDIPoster.pdf}, year = {2005}, date = {2005-05-03}, journal = {In Proc. of the Second Symposium on Networked Systems Design & Implementation }, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Dong Lu, Yi Qiao, Peter A. Dinda, Fabián E. Bustamante Modeling and Taming Parallel TCP on the Wide Area Network Journal Article In Proc. of the 19th IEEE International Parallel and Distributed Processing Symposium, 2005. @article{MTPTWAN, title = {Modeling and Taming Parallel TCP on the Wide Area Network}, author = {Dong Lu and Yi Qiao and Peter A. Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DongLuIPDPS05.pdf}, year = {2005}, date = {2005-04-03}, journal = {In Proc. of the 19th IEEE International Parallel and Distributed Processing Symposium}, abstract = {Parallel TCP flows are broadly used in the high performance distributed computing community to enhance network throughput, particularly for large data transfers. Previous research has studied the mechanism by which parallel TCP improves aggregate throughput, but there doesn't exist any practical mechanism to predict its throughput. In this work, we address how to predict parallel TCP throughput as a function of the number of flows, as well as how to predict the corresponding impact on cross traffic. To the best of our knowledge, we are the first to answer the following question on behalf of a user: what number of parallel flows will give the highest throughput with less than a p% impact on cross traffic? We term this the maximum nondisruptive throughput. We begin by studying the behavior of parallel TCP in simulation to help derive a model for predicting parallel TCP throughput and its impact on cross traffic. Combining this model with some previous findings we derive a simple, yet effective, online advisor. We evaluate our advisor through simulation-based and wide-area experimentation.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Parallel TCP flows are broadly used in the high performance distributed computing community to enhance network throughput, particularly for large data transfers. Previous research has studied the mechanism by which parallel TCP improves aggregate throughput, but there doesn't exist any practical mechanism to predict its throughput. In this work, we address how to predict parallel TCP throughput as a function of the number of flows, as well as how to predict the corresponding impact on cross traffic. To the best of our knowledge, we are the first to answer the following question on behalf of a user: what number of parallel flows will give the highest throughput with less than a p% impact on cross traffic? We term this the maximum nondisruptive throughput. We begin by studying the behavior of parallel TCP in simulation to help derive a model for predicting parallel TCP throughput and its impact on cross traffic. Combining this model with some previous findings we derive a simple, yet effective, online advisor. We evaluate our advisor through simulation-based and wide-area experimentation. |
Stefan Birrer, Fabián E. Bustamante Nemo: Resilient Peer-to-Peer Multicast without the Cost Journal Article In Proc. of the 12th Annual Multimedia Computing and Networking Conference, 2005. @article{Nemoc, title = {Nemo: Resilient Peer-to-Peer Multicast without the Cost}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05MMCN.pdf}, year = {2005}, date = {2005-03-03}, journal = {In Proc. of the 12th Annual Multimedia Computing and Networking Conference}, abstract = {We introduce Nemo, a novel peer-to-peer multicast protocol that achieves high delivery ratio without sacrificing end-to-end latency or incurring additional costs. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo's design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We contrast the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%).}, keywords = {}, pubstate = {published}, tppubtype = {article} } We introduce Nemo, a novel peer-to-peer multicast protocol that achieves high delivery ratio without sacrificing end-to-end latency or incurring additional costs. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo's design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We contrast the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%). |
Stefan Birrer, Fabián E. Bustamante Reef: Efficiently designing and evaluating overlay algorithms Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-14), 2005. @techreport{Reef, title = {Reef: Efficiently designing and evaluating overlay algorithms}, author = {Stefan Birrer and Fabián E. Bustamante}, year = {2005}, date = {2005-02-03}, number = {NWU-CS-05-14}, institution = {Department of Computer Science, Northwestern University}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } |
David R. Choffnes, Fabián E. Bustamante Modeling Vehicular Traffic and Mobility for Vehicular Wireless Networks Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-03), 2005. @techreport{MVTMVEN, title = {Modeling Vehicular Traffic and Mobility for Vehicular Wireless Networks}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-05-03-1-1.pdf}, year = {2005}, date = {2005-01-03}, number = {NWU-CS-05-03}, institution = {Department of Computer Science, Northwestern University}, abstract = {Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring, and battleground communication. When evaluating application performance through simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This technical report discusses the implementation of STRAW, a new mobility model for VANETs in which nodes move according to a realistic vehicular traffic model on roads defined by real street map data. The challenge is to create a traffic model that accounts for individual vehicle motion without incurring significant overhead relative to the cost of performing the wireless network simulation. We identify essential and optional techniques for modeling vehicular motion that can be integrated into any wireless network simulator. We then detail choices we made in implementing STRAW.}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring, and battleground communication. When evaluating application performance through simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This technical report discusses the implementation of STRAW, a new mobility model for VANETs in which nodes move according to a realistic vehicular traffic model on roads defined by real street map data. The challenge is to create a traffic model that accounts for individual vehicle motion without incurring significant overhead relative to the cost of performing the wireless network simulation. We identify essential and optional techniques for modeling vehicular motion that can be integrated into any wireless network simulator. We then detail choices we made in implementing STRAW. |
Publications
2013 |
Dasu: A measurement experimentation platform at the Internet’s edge Technical Report Department of Computer Science, Northwestern University ( NWU-EECS-13-09), 2013. |
The hidden locality in swarms Journal Article In Proc. of IEEE P2P, 2013. |
Characterizing Broadband Services with Dasu Inproceedings 2013. |
Experiments at the Internet's Edge with Dasu Journal Article Demonstration at USENIX NSDI, 2013. |
Dasu: Pushing Experiments to the Internet's Edge Journal Article In Proc. of the USENIX Symposium on Networked Systems Design and Implementation (NSDI), 2013. |
Trying Broadband Characterization at Home Journal Article In Proc. of the Passive and Active Measurement Conference (PAM), 2013. |
2012 |
Content delivery and the natural evolution of DNS - Remote DNS Trends, Performance Issues and Alternative Solutions Journal Article In Proc. of IMC, 2012. |
Up, Down and Around the Stack: ISP Characterization from Network Intensive Applications Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2012. |
namehelp: intelligent client-side DNS resolution Journal Article In ACM SIGCOMM CCR Special Issue, 42 (4), 2012. |
Crowd (Soft) Control: Moving Beyond the Opportunistic Journal Article In Proc. of the Thirteenth Workshop on Mobile Computing Systems and Applications (HotMobile), 2012. |
2011 |
Distributed Systems and Natural Disasters -- BitTorrent as a Global Witness Journal Article In Proc. of CoNEXT Special Workshop on the Internet and Disasters (SWID), 2011. |
Crowdsourcing ISP Characterization to the Network Edge Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2011. |
On Blind Mice and the Elephant -- Understanding the Network Impact of a Large Distributed System Journal Article In Proc. of ACM SIGCOMM, 2011. |
Dasu - ISP Characterization from the Edge: A BitTorrent Implementation Journal Article Demo in Proc. of ACM SIGCOMM, 2011. |
2010 |
Environmental Noise Mapping Using Measurements in Transit Journal Article In Proc. of the International Conference on Noise and Vibration Engineering (ISMA), 2010. |
Crowdsourcing Service-Level Network Event Detection Journal Article In Proc. of ACM SIGCOMM, 2010. |
Strange Bedfellows: Communities in BitTorrent Journal Article In Proc. of the 9th International Workshop on Peer-to-Peer Systems (IPTPS), 2010. |
Pitfalls for Testbed Evaluations of Internet Systems Journal Article In ACM SIGCOMM CCR, 2010. |
Network positioning from the edge: An empirical study of the effectiveness of network positioning in P2P systems Journal Article In Proc. of IEEE INFOCOM, 2010. |
Taming the Torrent Journal Article In USENIX, 2010. |
POPI: A User-level Tool for Inferring Router Packet Forwarding Priority Journal Article In IEEE/ACM Transactions on Networking (ToN), 2010. |
2009 |
Where the Sidewalk Ends: Extending the Internet AS Graph Using Traceroutes From P2P Users Journal Article In Proc. of CoNEXT, 2009. |
Drafting Behind Akamai: Inferring Network Conditions Based on CDN Redirections Journal Article In IEEE/ACM Transactions on Networking (ToN), 17 (6), 2009. |
Distributed or Centralized Traffic Advisory Systems -- The Application's Take Journal Article In Proc. of IEEE SECON, 2009. |
Down the Block and Around the Corner -- The Impact of Radio Propagation on Inter-vehicle Wireless Communication Journal Article In Proc. of IEEE International Conference on Distributed Computing Systems (ICDCS), 2009. |
On the Effectiveness of Measurement Reuse for Performance-Based Detouring Journal Article In Proc. of IEEE INFOCOM, 2009. |
2008 |
Improving Peer-to-Peer Performance Through Server-Side Scheduling Journal Article In ACM Transactions on Computer Systems (TOCS), 26 (4), 2008. |
Taming the Torrent: A practical approach to reducing cross-ISP traffic in P2P systems Journal Article In Proc. of ACM SIGCOMM, 2008. |
Relative Network Positioning via CDN Redirections Journal Article In Proc. of the International Conference on Distributed Computing Systems (ICDCS), 2008. |
Designing Less-structured P2P Systems for the Expected High Churn Journal Article In IEEE/ACM Transactions on Networking, (ToN), 16 (3), 2008. |
2007 |
A Comparison of Resilient Overlay Multicast Approaches Journal Article n IEEE Journal on Selected Areas in Communications (JSAC) -- Special Issue on Advances in Peer-to-Peer Streaming Systems, 25 (9), 2007. |
Vortex: Enabling Cooperative Selective Wormholing for Network Security Systems Journal Article In Proc. of 10th International Symposium on Recent Advances in Intrusion Detection , 2007. |
Exploiting Emergent Behavior for Inter-Vehicle Communication Journal Article In Proc. of 2nd International Workshop on Hot Topics in Autonomic Computing, 2007. |
End-to-end Inference of Router Packet Forwarding Priority Journal Article In Proc. of IEEE INFOCOM, 2007. |
2006 |
Resilience in Overlay Multicast Protocols Journal Article In Proc. of the 14th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems, 2006. |
Drafting Behind Akamai (Travelocity-Based Detouring) Journal Article In Proc. of ACM SIGCOMM 2006, 2006. |
Structured and Unstructured Overlays Under the Microscope - A Measurement-based View of Two P2P Systems That People Use Journal Article In Proc. of the 2006 USENIX Annual Technical Confrence, 2006. |
Publish-subscribe for high-performance computing Journal Article IEEE Internet Computing - Special Asynchronous Middleware and Services, 10(1): 8-25 , 2006. |
2005 |
An Integrated Mobility and Traffic Model for Vehicular Wireless Networks Journal Article In Proc. of the 2nd ACM International Workshop on Vehicular Ad Hoc Networks, 2005. |
The Feasibility of DHT-based Streaming Multicast Journal Article In Proc. of the 13th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems, 2005. |
Elders Know Best - Handling Churn in Less Structured P2P Systems Journal Article In Proc. of the Fifth IEEE International Conference on Peer-to-Peer Computing, 2005. |
Magellan: Performance-based, Cooperative Multicast Journal Article In Proc. of the Tenth International Workshop on Web Content Caching and Distribution, 2005. |
Distributed Popularity Indices Journal Article Poster in Proc. of ACM SIGCOMM, 2005. |
Characterizing and Predicting TCP Throughput on the Wide Area Network Journal Article In Proc. of the 25th IEEE International Conference on Distributed Computing Systems, 2005. |
FatNemo: Multi-Source Multicast Overlay Fat-Tree Journal Article Poster in Proc. of the Second Symposium on Networked Systems Design & Implementation, 2005. |
Magnolia: A novel DHT architecture for keyword-based searching Journal Article In Proc. of the Second Symposium on Networked Systems Design & Implementation , 2005. |
Modeling and Taming Parallel TCP on the Wide Area Network Journal Article In Proc. of the 19th IEEE International Parallel and Distributed Processing Symposium, 2005. |
Nemo: Resilient Peer-to-Peer Multicast without the Cost Journal Article In Proc. of the 12th Annual Multimedia Computing and Networking Conference, 2005. |
Reef: Efficiently designing and evaluating overlay algorithms Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-14), 2005. |
Modeling Vehicular Traffic and Mobility for Vehicular Wireless Networks Technical Report Department of Computer Science, Northwestern University (NWU-CS-05-03), 2005. |