@inproceedings{carisimo:lhl, title = {A hop away from everywhere: A view of the intercontinental long-haul infrastructure}, author = {Esteban Carisimo and Caleb Wang and Mia Weaver and Fabián E. Bustamante and Paul Barford}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/10/carisimo-lhl-24.pdf}, year = {2024}, date = {2024-06-10}, booktitle = {Proc. of ACM SIGMETRICS}, abstract = {We present a longitudinal study of intercontinental long-haul links (LHLs) – links with latencies significantly higher than that of all other links in a traceroute path. Our study is motivated by the recognition of these LHLs as a network-layer manifestation of critical transoceanic undersea cables. We present a methodology and associated processing system for identifying long-haul links in traceroute measurements. We apply this system to a large corpus of traceroute data and report on multiple aspects of long haul connectivity including country-level prevalence, routers as international gateways, preferred long-haul destinations, and the evolution of these characteristics over a 7 year period. We identify 85,620 layer-3 links (out of 2.7M links in a large traceroute dataset) that satisfy our definition for intercontinental long haul with many of them terminating in a relatively small number of nodes. An analysis of connected components shows a clearly dominant component with a relative size that remains stable despite a significant growth of the long-haul infrastructure.}, keywords = {Internet measurement, SCN}, pubstate = {forthcoming}, tppubtype = {inproceedings} } @inproceedings{bischof:sigcomm23, title = {Destination Unreachable: Characterizing Internet Outages and Shutdowns}, author = {Zachary Bischof, Kennedy Pitcher, Esteban Carisimo, Amanda Meng, Rafaek Nunes, Ramakrishna Padmanabhan, Margaret E. Roberts, Alex C. Snoeren, and Alberto Dainotti}, year = {2023}, date = {2023-09-11}, booktitle = {Proc. of ACM SIGCOMM}, abstract = { In this paper, we provide the first comprehensive longitudinal analysis of government-ordered Internet shutdowns and spontaneous outages (i.e., disruptions not ordered by the government). We describe the available tools, data sources and methods to identify and analyze Internet shutdowns. We then merge manually curated datasets on known government-ordered shutdowns and large-scale Internet outages, further augmenting them with data on real-world events, macroeconomic and sociopolitical indicators, and network operator statistics. Our analysis confirms previous findings on the economic and political profiles of countries with government-ordered shutdowns. Extending this analysis, we find that countries with national-scale spontaneous outages often have profiles similar to countries with shutdowns, differing from countries that experience neither. However, we find that government-ordered shutdowns are many more times likely to occur on days of mobilization, coinciding with elections, protests, and coups. Our study also characterizes the temporal characteristics of Internet shutdowns and finds that they differ significantly in terms of duration, recurrence interval, and start times when compared to spontaneous outages.}, keywords = {Internet measurement}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{global:sigmetrics23, title = {Each at its own pace: Third-party Dependency and Centralization Around the World}, author = {Rashna Kumar and Sana Asif and Elise Lee and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/RKumar-SIGMETRICS23.pdf}, year = {2023}, date = {2023-06-19}, booktitle = {Proc. of ACM SIGMETRICS}, keywords = {Internet centralization, Internet measurement}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{as2orgplus:PAM23, title = {as2org+ : Enriching AS-to-Organization Mappings with PeeringDB}, author = {Augusto Arturi and Esteban Carisimo and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/AArturi-PAM23.pdf}, year = {2023}, date = {2023-03-21}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, keywords = {Measurement tools}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{sergi:gmobile, title = {Global Mobile Network Aggregators: Taxonomy, Roaming Performance and Optimization}, author = {Sergi Alcalá-Marín and Aravindh Raman and Weili Wu and Andra Lutu and Marcelo Bagnulo and Ozgu Alay and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2022/05/SAlcala-Marin-Mobisys22.pdf}, year = {2022}, date = {2022-06-27}, booktitle = {Proc. of ACM International Conference on Mobile Systems, Applications, and Services (MobiSys)}, abstract = {A new model of global virtual Mobile Network Operator (MNO) – the Mobile Network Aggregator (MNA) – has recently been gaining significant traction. MNAs provide mobile communications services to their customers by leveraging multiple MNOs, and connecting through the one that best match their customers’ needs at any point in time (and space). MNAs naturally provide optimized global coverage by connecting through local MNOs across the different geographic regions they provide service. In this paper, we dissect the operations of three MNAs, namely, Google Fi, Twilio, and Truphone. We perform measurements using the three selected MNAs to assess their performance for three major applications, namely, DNS, web browsing, and video streaming. We benchmark their performance comparing it to the one of a traditional MNO. We find that even MNAs provide some delay penalty compared to the service accessed through the local MNOs in the geographic area where the user is roaming, they can significantly improve performance compared to traditional roaming model of the MNOs (e.g. home routed roaming). Finally, in order to fully quantify the potential benefits that can be realized using the MNA model, we perform a set of emulations by deploying both control and user plane functions of open-source 5G implementations in different locations of AWS, and measure the potential gains.}, keywords = {Internet measurement, mobile}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{varvello:batterylabb, title = {BatteryLab: A Collaborative Platform for Power Monitoring}, author = {Matteo Varvello and Kleomenis Katevas and Mihai Plesa and Hamed Haddadi and Fabián E. Bustamante and Bel Livshits }, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2022/03/Varvello2022_Chapter_BatteryLabACollaborativePlatfo.pdf}, year = {2022}, date = {2022-03-28}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {Advances in cloud computing have simplified the way that both software development and testing are performed. This is not true for battery testing for which state of the art test-beds simply consist of one phone attached to a power meter. These test-beds have limited resources, access, and are overall hard to maintain; for these reasons, they often sit idle with no experiment to run. In this paper, we propose to share existing battery testbeds and transform them into vantage points of BatteryLab, a power monitoring platform offering heterogeneous devices and testing conditions. We have achieved this vision with a combination of hardware and software which allow to augment existing battery test-beds with remote capabilities. BatteryLab currently counts three vantage points, one in Europe and two in the US, hosting three Android devices and one iPhone 7. We benchmark BatteryLab with respect to the accuracy of its battery readings, system performance, and platform heterogeneity. Next, we demonstrate how measurements can be run atop of BatteryLab by developing the “Web Power Monitor” (WPM), a tool which can measure website power consumption at scale. We released WPM and used it to report on the energy consumption of Alexa’s top 1,000 websites across 3 locations and 4 devices (both Android and iOS).}, keywords = {measurement}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{carisimo:jitterbug, title = {Jitterbug: A new framework for jitter-based congestion inference}, author = {Esteban Carisimo and Ricky K. P. Mok and David D. Clark and kc claffy}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/ECarisimo-PAM22.pdf}, year = {2022}, date = {2022-03-28}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {We investigate a novel approach to the use of jitter to infer network congestion using data collected by probes in access networks. We discovered a set of features in jitter and jitter dispersion —a jitter- derived time series we define in this paper— time series that are char- acteristic of periods of congestion. We leverage these concepts to create a jitter-based congestion inference framework that we call Jitterbug. We apply Jitterbug’s capabilities to a wide range of traffic scenarios and discover that Jitterbug can correctly identify both recurrent and one-off congestion events. We validate Jitterbug inferences against state-of-the- art autocorrelation-based inferences of recurrent congestion. We find that the two approaches have strong congruity in their inferences, but Jitter- bug holds promise for detecting one-off as well as recurrent congestion. We identify several future directions for this research including lever- aging ML/AI techniques to optimize performance and accuracy of this approach in operational settings.}, keywords = {congestion}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{carisimo:cti, title = {Quantifying Nations' Exposure to Traffic Observation and Selective Tampering}, author = {Alexander Gamero-Garrido and Esteban Carisimo and Shuai Hao and Bradley Huffaker and Alex C. Snoeren and Alberto Dainotti}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/AGameroG-PAM22.pdf https://github.com/estcarisimo/state-owned-ases}, year = {2022}, date = {2022-03-28}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {Almost all popular Internet services are hosted in a select set of countries, forcing other nations to rely on international connectivity to access them. We identify nations where traffic towards a large portion of the country is serviced by a small number of Autonomous Systems, and, therefore, may be exposed to observation or selective tampering by these ASes. We introduce the Country-level Transit Influence (CTI) metric to quantify the significance of a given AS on the international transit service of a particular country. By studying the CTI values for the top ASes in each country, we find that 34 nations have transit ecosystems that render them particularly exposed, where a single AS is privy to traffic destined to over 40% of their IP addresses. In the nations where we are able to validate our findings with in-country operators, our top- five ASes are 90% accurate on average. In the countries we examine, CTI reveals two classes of networks frequently play a particularly prominent role: submarine cable operators and state-owned ASes.}, keywords = {bgp}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{bjun:hotmobile22, title = {Reining in Mobile Web Performance with Document and Permission Policies}, author = {Byungjin Jun and Fabián E. Bustamante and Ben Greenstein and Ian Clelland}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2022/03/bjun-hotmobile22.pdf}, year = {2022}, date = {2022-03-09}, booktitle = {Proc. of International Workshop on Mobile Computing Systems and Applications (HotMobile)}, abstract = {The quality of mobile web experience remains poor, partially as a result of complex websites and design choices that worsen performance, particularly for users on suboptimal networks or with low-end devices. Prior proposed solutions have seen limited adoption due to the demand they place on developers and content providers, and the performing infrastructure needed to support them. We argue that Document and Permissions Policies – ongoing efforts to enforce good practices on web design – may offer the basis for a readily-available and easily-adoptable solution, as they encode key best practices for web development. In this paper, as a first step, we evaluate the potential performance cost of violating these well-understood best practices and how common such violations are in today’s web. Our analysis shows, for example, that controlling for unsized-media policy, something applicable to 70% of the top Alexa websites, can indeed significantly reduce Cumulative Layout Shift, a core metric for evaluating the performance of the web.}, keywords = {Edge Experimentation, Internet measurement, mobile, Web}, pubstate = {published}, tppubtype = {inproceedings} } @article{claffy:wombir21, title = {Workshop on Overcoming Measurement Barriers to Internet Research (WOMBIR 2021) final report.}, author = {kc claffy and David D. Clark and John S. Heidemann and Fabián E. Bustamante and Mattijs Jonker and Aaron Schulman and Ellen Zegura}, url = {https://dl.acm.org/doi/10.1145/3477482.3477489}, year = {2021}, date = {2021-12-11}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {51}, number = {3}, pages = {33-40}, abstract = {In January and April 2021 we held the Workshop on Overcoming Measurement Barriers to Internet Research (WOMBIR) with the goal of understanding challenges in network and security data set collection and sharing. Most workshop attendees provided white papers describing their perspectives, and many participated in short-talks and discussion in two virtual workshops over five days. That discussion produced consensus around several points. First, many aspects of the Internet are characterized by decreasing visibility of important network properties, which is in tension with the Internet’s role as critical infrastructure. We discussed three specific research areas that illustrate this tension: security, Internet access; and mobile networking. We discussed visibility challenges at all layers of the networking stack, and the challenge of gathering data and validating inferences. Important data sets require longitudinal (long-term, ongoing) data collection and sharing, support for which is more challenging for Internet research than other fields. We discussed why a combination of technical and policy methods are necessary to safeguard privacy when using or sharing measurement data. Workshop participants proposed several opportunities to accelerate progress, some of which require coordination across government, industry, and academia.}, keywords = {Internet measurement}, pubstate = {published}, tppubtype = {article} } @inproceedings{carisimo:stateowned:imc21, title = {Identifying ASes of State-Owned Internet Operators}, author = {Esteban Carisimo and Alexander Gamero-Garrido and Alex C. Snoeren and Alberto Dainotti }, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/10/ECarisimo-IMC21.pdf https://github.com/estcarisimo/state-owned-ases}, year = {2021}, date = {2021-11-02}, booktitle = {Proc. of the ACM Internet Measurement Conference (IMC)}, abstract = {In this paper we present and apply a methodology to accurately identify state-owned Internet operators worldwide and their Autonomous System Numbers (ASNs). Obtaining an accurate dataset of ASNs of state-owned Internet operators enables studies where state ownership is an important dimension, including research related to Internet censorship and surveillance, cyber-warfare and international relations, ICT development and digital divide, critical infrastructure protection, and public policy. Our approach is based on a multi-stage, in-depth manual analysis of datasets that are highly diverse in nature. We find that each of these datasets contributes in different ways to the classification process and we identify limitations and shortcomings of these data sources. We obtain the first data set of this type, make it available to the research community together with the several lessons we learned in the process, and perform a preliminary analysis based on our data. We find that 53% (i.e., 123) of the world’s countries are majority owners of Internet operators, highlighting that this is a widespread phenomenon. We also find and document the existence of subsidiaries of state-owned governments operating in foreign countries, an aspect that touches every continent and particularly affects Africa. We hope that this work and the associated data set will inspire and enable a broad set of Internet measurement studies and interdisciplinary research.}, keywords = {Internet measurement}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{sasif:witnesses:imc21, title = {Networked Systems as Witnesses - Association Between Content Demand, Human Mobility and an Infection Spread}, author = {Sana Asif and Byungjin Jun and Fabián E. Bustamante and John P. Rula}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/10/SAsif-IMC21-3.pdf}, year = {2021}, date = {2021-11-02}, booktitle = {Proc. of ACM Internet Measurement Conference (IMC)}, abstract = {While non-pharmaceutical interventions (NPIs) such as stay-at-home, shelter-in-place, and school closures are considered the most effective ways to limit the spread of infectious diseases, their use is generally controversial given the political, ethical, and socioeconomic issues they raise. Part of the challenge is the non-obvious link between the level of compliance with such measures and their effectiveness. In this paper, we argue that users' demand on networked services can serve as a proxy for the social distancing behavior of communities, offering a new approach to evaluate these measures' effectiveness. We leverage the vantage point of one of the largest worldwide CDNs together with public-available datasets of mobile users' behavior, to examine the relationship between changes in user demand on the CDN and different interventions including stay-at-home/shelter-in-place, mask mandates, and school closures. As networked systems become integral parts of our everyday lives, they can act as witnesses of our individual and collective actions. Our study illustrates the potential value of this new role.}, keywords = {COVID-19, Internet measurement}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{jun:webtune, title = {WebTune: A Distributed Platform for Web Performance Measurements}, author = {Byungjin Jun and Matteo Varvello and Yasir Zaki and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/07/WebTune-4.pdf}, year = {2021}, date = {2021-09-14}, booktitle = {Proc. of the Network Traffic Measurement and Analysis Conference (TMA)}, abstract = {Web performance researchers have to regularly choose between synthetic and in-the-wild experiments. In the one hand, synthetic tests are useful to isolate \textit{what} needs to be measured, but lack the realism of real networks, websites, and server-specific configurations. Even enumerating all these conditions can be challenging, and no existing tool or testbed currently allows for this. In this paper, as in life, we argue that \textit{unity makes strength}: by sharing part of their experimenting resources, researchers can naturally build their desired realistic conditions without compromising on the flexibility of synthetic tests. We take a step toward realizing this vision with WebTune, a distributed platform for web measurements. At a high level, WebTune seamlessly integrates with popular web measurements tools like Lighthouse and Puppeteer exposing to an experimenter fine-grained control on real networks and servers, as one would expect in synthetic tests. Under the hood, tool serves ``Webtuned'' versions of websites which are \textit{cloned} and distributed to a testing network built on resources donated by the community. We evaluate WebTune with respect to its cloning \textit{accuracy} and the \textit{complexity} of network conditions to be reproduced. Further, we demonstrate its functioning via a 5-nodes deployment.}, keywords = {mobile, Web, Web QoE TCP HTTP}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{lutu:sigcomm21, title = {Insights from Operating an IP Exchange Provider}, author = {Andra Lutu and Diego Perino and Marcelo Bangulo and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/07/sigcomm2021-1.pdf}, year = {2021}, date = {2021-08-23}, booktitle = {Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM)}, abstract = {IP Exchange Providers (IPX-Ps) offer to their customers (e.g., mobile or IoT service providers) global data roaming and support for a variety of emerging services. They peer to other IPX-Ps and form the IPX network, which interconnects 800 MNOs worldwide offering their customers access to mobile services in any other country. Despite the importance of IPX-Ps, little is known about their operations and performance. In this paper, we shed light on these opaque providers by analyzing a large IPX-P with more than 100 PoPs in 40+ countries, with a particularly strong presence in America and Europe. Specifically, we characterize the traffic and performance of the main infrastructures of the IPX-P (i.e., 2-3-4G signaling and GTP tunneling), and provide implications for its operation, as well as for the IPX-P’s customers. Our analysis is based on statistics we collected during two time periods (i.e., prior and during COVID-19 pandemic) and includes insights on the main service the platform supports (i.e., IoT and data roaming), traffic breakdown and geographical/temporal distribution, communication performance (e.g., tunnel setup time, RTTs). Our results constitute a step towards advancing the understanding of IPX-Ps at their core, and provide guidelines for their operations and customer satisfaction.}, keywords = {mobile IPX roaming}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{rkumar:dns:sigcomm21poster, title = {Decentralization, privacy and performance for DNS}, author = {Rashna Kumar and Fabián E. Bustamante:}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/10/RKumar-DNS-Poster.pdf https://src.acm.org/winners/2022}, year = {2021}, date = {2021-08-23}, booktitle = {Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM) - Poster - Winner SIGCOMM ACM SRC Competition}, abstract = {The Domain Name System (DNS) is both key determinant of a users' quality of experience (QoE) and privy to their tastes, preferences, and even the devices they own. Growing concern about user privacy and QoE has brought a number of alternative DNS techniques and services, from public DNS to encrypted and oblivious DNS. Today, a user choosing among these services and its few providers is forced to prioritize -- aware of it or not -- between web performance, privacy, reliability, and the potential for a centralized market and its consequences. We present Ónoma, a DNS resolver that addresses the concerns about DNS centralization without sacrificing privacy or QoE by sharding requests across alternative DNS services, placing these services in competition with each other, and pushing resolution to the network edge. Our preliminary evaluation shows the potential benefits of this approach across locales, with different DNS services, content providers, and content distribution networks.}, keywords = {DNS, Internet centralization}, pubstate = {published}, tppubtype = {inproceedings} } @techreport{jnewman:mbz, title = {Back in control -- An extensible middle-box on your phone}, author = {James Newman and Abbas Razaghpanah and Narseo Vallina-Rodriguez and Fabian E. Bustamante and Mark Allman and Diego Perino and Alessandro Finamore}, url = {https://arxiv.org/abs/2012.07695 https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/03/JNewman-arXiv20.pdf }, year = {2020}, date = {2020-12-14}, number = {arXiv:2012.07695}, institution = {arXiv}, abstract = {The closed design of mobile devices -- with the increased security and consistent user interfaces -- is in large part responsible for their becoming the dominant platform for accessing the Internet. These benefits, however, are not without a cost. Their operation of mobile devices and their apps is not easy to understand by either users or operators. We argue for recovering transparency and control on mobile devices through an extensible platform that can intercept and modify traffic before leaving the device or, on arrival, before it reaches the operating system. Conceptually, this is the same view of the traffic that a traditional middlebox would have at the far end of the first link in the network path. We call this platform ``middlebox zero'' or MBZ. By being on-board, MBZ also leverages local context as it processes the traffic and complements the network-wide view of standard middleboxes. We discuss the challenges of the MBZ approach, sketch a working design, and illustrate its potential with some concrete examples.}, keywords = {measurement, middlebox zero, mobile}, pubstate = {published}, tppubtype = {techreport} } @inproceedings{nagarwal:delaytcp, title = {Mind the Delay: The Adverse Effects of Delay-Based TCP on HTTP}, author = {Neil Agarwal and Matteo Varvello and Andrius Aucinas and Fabián E. Bustamante and Ravi Netravali}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/03/NAgarwal-CoNEXT20.pdf}, year = {2020}, date = {2020-12-01}, booktitle = {Proc. of ACM International Conference on emerging Networking EXperiments and Technologies (CoNEXT) }, abstract = {The last three decades have seen much evolution in web and network protocols: amongst them, a transition from HTTP/1.1 to HTTP/2 and a shift from loss-based to delay-based TCP congestion control algorithms. This paper argues that these two trends come at odds with one another, ultimately hurting web performance. Using a controlled synthetic study, we show how delay-based congestion control protocols (e.g., BBR and CUBIC + Hybrid Slow Start) result in the underestimation of the available congestion window in mobile networks, and how that dramatically hampers the effectiveness of HTTP/2. To quantify the impact of such findings in the current web, we evolved the web performance toolbox in two ways. First, we develop Igor, a client-side TCP congestion control detection tool that can differentiate between loss-based and delay-based algorithms by focusing on their behavior during slow start. Second, we develop a Chromium patch that allows fine-grained control on the HTTP version to be used per domain. Using these new web performance tools, we analyze over 300 real websites and find that 67% of sites relying solely on delay-based congestion control algorithms have better performance with HTTP/1.1.}, keywords = {Web QoE TCP HTTP}, pubstate = {published}, tppubtype = {inproceedings} } @article{lutu:ccr20, title = {A first look at the IP eXchange Ecosystem}, author = {Andra Lutu and Byungjin Jun and Fabián E. Bustamante and Diego Perino and Marcelo Bagnulo and Carlos Gamboa Bontje}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2020/10/ccr20-2.pdf}, year = {2020}, date = {2020-10-31}, journal = {ACM SIGCOMM Computer Communication Review (CCR)}, volume = {50}, number = {4}, abstract = {The IPX Network interconnects about 800 Mobile Network Operators (MNOs) worldwide and a range of other service providers (such as cloud and content providers). It forms the core that enables global data roaming while supporting emerging applications, from VoLTE and video streaming to IoT verticals. This paper presents the first characterization of this, so-far opaque, IPX ecosystem and a first-of-its-kind in-depth analysis of an IPX Provider (IPX-P). The IPX Network is a private network formed by a small set of tightly interconnected IPX-Ps. We analyze an operational dataset from a large IPX-P that includes BGP data as well as statistics from signaling. We shed light on the structure of the IPX Network as well as on the temporal, structural and geographic features of the IPX traffic. Our results are a first step in understanding the IPX Network at its core, key to fully understand the global mobile Internet.}, keywords = {mobile IPX roaming}, pubstate = {published}, tppubtype = {article} } @inproceedings{aliu:imc20, title = {Out of Sight, Not Out of Mind - A User-View on the Criticality of the Submarine Cable Network}, author = {Shucheng Liu and Zachary S. Bischof and Ishaan Madan and Peter K. Chan and Fabián E. Bustamante}, url = {https://github.com/NU-AquaLab/Criticality-SCN https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2020/10/SLiu-IMC2020.pdf}, year = {2020}, date = {2020-10-27}, booktitle = {Proc. of ACM Internet Measurement Conference (IMC)}, abstract = {Nearly all international data is carried by a mesh of submarine cables connecting virtually every region in the world. It is generally assumed that Internet services rely on this submarine cable network (SCN) for backend traffic, but that most users do not directly depend on it, as popular resources are either local or cached nearby. In this paper, we study the criticality of the SCN from the perspective of end users. We present a general methodology for analyzing the reliance on the SCN for a given region, and apply it to the most popular web resources accessed by users in 63 countries from every inhabited continent, collectively capturing ≈80% of the global Internet population. We find that as many as 64.33% of all web resources accessed from a specific country rely on the SCN. Despite the explosive growth of data center and CDN infrastructure around the world, at least 28.22% of the CDN-hosted resources traverse a submarine cable.}, keywords = {SCN, Submarine Cables}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{alutu:imc20, title = {Where Things Roam: Uncovering Cellular IoT/M2M Connectivity}, author = {Andra Lutu and Byungjin Jun and Alessandro Finamore and Fabián E. Bustamante and Diego Perino}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2020/10/ALutu-IMC2020a.pdf}, year = {2020}, date = {2020-10-27}, booktitle = {Proc. of ACM Internet Measurement Conference (IMC)}, abstract = {Support for "things" roaming internationally has become critical for Internet of Things (IoT) verticals, from connected cars to smart meters and wearables, and explains the commercial success of Machine-to-Machine (M2M) platforms. We analyze IoT verticals operating with connectivity via IoT SIMs, and present the first large-scale study of commercially deployed IoT SIMs for energy meters. We also present the first characterization of an operational M2M platform and the first analysis of the rather opaque associated ecosystem. For operators, the exponential growth of IoT has meant increased stress on the infrastructure shared with traditional roaming traffic. Our analysis quantifies the adoption of roaming by M2M platforms and the impact they have on the underlying visited Mobile Network Operators (MNOs). To manage the impact of massive deployments of devices operating with an IoT SIM, operators must be able to distinguish between the latter and traditional inbound roamers. We build a comprehensive dataset capturing the device population of a large European MNO over three weeks. With this, we propose and validate a classification approach that can allow operators to distinguish inbound roaming IoT devices.}, keywords = {IoT, mobile, Roaming}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{zhang:swarmtalk20, title = {SwarmTalk - Towards Benchmark Software Suites for Swarm Robotics Platforms}, author = {Yihan Zhang and Lyon Zhang and Hanlin Wang and Fabián E. Bustamante and Michael Rubenstein }, url = {http://ifaamas.org/Proceedings/aamas2020/pdfs/p1638.pdf}, year = {2020}, date = {2020-05-09}, booktitle = {Proc. of the International Conference on Autonomous Agents and Multiagent Systems (AAMAS)}, abstract = {With nearly every new swarm robotic platform built, the designers develop its software stack, from low-level drivers to high-level algorithmic implementations. And while the different software stacks frequently share components, especially in robot-to-robot communication, these common components are also developed from scratch time and again. We present SwarmTalk, a new communication library that can be quickly ported to new and existing swarm hardware. SwarmTalk adopts a publish-subscribe communication model that satisfies the severe hardware constraints found in many swarms, and provides an easy-to-use programming interface. We port our SwarmTalk prototype to two hardware swarm platforms and two simulator-based platforms, and implement commonly-used swarm algorithms on these four platforms. We present the design and implementation of SwarmTalk, discuss some of the system challenges in implementation and cross-platform porting, and report on our initial experiences as a common communication abstraction for a community benchmarking suite.}, keywords = {swarm robotics}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{mvarvello:batterylab, title = {BatteryLab, a distributed power monitoring platform for mobile devices: demo abstract}, author = {Matteo Varvello and Kleomenis Katevas and Wei Hang and Mihai Plesa and Hamed Haddadi and Fabián E. Bustamante and Benjamin Livshits }, year = {2019}, date = {2019-11-11}, booktitle = {Proc. of the ACM Conference on Embedded Networked Sensor Systems (SensSys)}, abstract = {There has been a growing interest in measuring and optimizing the power efficiency of mobile apps. Traditional power evaluations rely either on inaccurate software-based solutions or on ad-hoc testbeds composed of a power meter and a mobile device. This demonstration presents BatteryLab, our solution to share existing battery testing setups to build a distributed platform for battery measurements. Our vision is to transform independent battery testing setups into vantage points of a planetary-scale measurement platform offering heterogeneous devices and testing conditions. We demonstrate BatteryLab functionalities by investigating the energy efficiency of popular websites when loaded via both Android and iOS browsers. Our demonstration is also live at https://batterylab.dev/.}, keywords = {measurement, mobile}, pubstate = {published}, tppubtype = {inproceedings} } @workshop{newman:scaleup, title = {Scaling up your web experience, everywhere}, author = {James Newman and Robert H. Belson and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/Newman-Scaleup.pdf}, year = {2019}, date = {2019-01-06}, booktitle = {Proc. of the International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {Hotmobile}, abstract = {We present an approach to improve users’ web experience by dynamically reducing the complexity of websites rendered based on network conditions. Our approach is based on a simple insight – adjusting a browser window’s scale (i.e., zooming in/out), changes the number of objects placed abovethe-fold and thus hides the loading of objects pushed below the fold in the user scroll time. We design ScaleUp , a browser extension that tracks network conditions and dynamically adjusts browser scale appropriately to improve user web Quality of Experience (QoE) while preserving the design integrity of websites. Through control experiments, we demonstrate the impact of ScaleUp on a number of key QoE metrics over a random sample of 50 from the top 500 Alexa websites. We show that a simple adjustment in scale can result in an over 19% improvement on Above-The-Fold (ATF) time in the median case. While adjusting a scale factor can improve proxy metrics of QoE, it is unclear if that translates in an improved web experience for users. We summarize findings from a large, crowdsourced experiment with 1,000 users showing that, indeed, improvement to QoE metrics correlate with an enhanced user experience. We have released ScaleUp as a Chrome Extension that now counts with over 1,000 users worldwide, and report on some of the lessons learned from this deployment.}, keywords = {In-flight, mobile, QoE, scaleup, Web}, pubstate = {published}, tppubtype = {workshop} } @inproceedings{newman:impressions, title = {The Value of First Impressions: The Impact of Ad-Blocking on Web QoE}, author = {James Newman and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JNewman-Adblock.pdf}, year = {2019}, date = {2019-01-03}, booktitle = {Proc. of the Passive and Active Measurement (PAM)}, journal = {Passive and Active Measurement (PAM)}, abstract = {We present the first detailed analysis of ad-blocking’s impact on user Web quality of experience (QoE). We use the most popular webbased ad-blocker to capture the impact of ad-blocking on QoE for the top Alexa 5,000 websites. We find that ad-blocking reduces the number of objects loaded by 15% in the median case, and that this reduction translates into a 12.5% improvement on page load time (PLT) and a slight worsening of time to first paint (TTFP) of 6.54%. We show the complex relationship between ad-blocking and quality of experience - despite the clear improvements to PLT in the average case, for the bottom 10 percentile, this improvement comes at the cost of a slowdown on the initial responsiveness of websites, with a 19% increase to TTFP. To understand the relative importance of this tradeoff on user experience, we run a large, crowdsourced experiment with 1,000 users in Amazon Turk. For this experiment, users were presented with websites for which adblocking results in both, a reduction of PLT and a significant increase in TTFP. We find, surprisingly, 71.5% of the time users show a clear preference for faster first paint over faster page load times, hinting at the importance of first impressions on web QoE.}, keywords = {publication}, pubstate = {published}, tppubtype = {inproceedings} } @conference{jun:ampup, title = {AMP up your Mobile Web Experience: Characterizing the Impact of Google’s Accelerated Mobile Project}, author = {Byungjin Jun and Fabián E. Bustamante and Sung Yoon Whang and Zachary S. Bischof}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/AMP-Mobicom-2019.pdf}, year = {2019}, date = {2019-01-02}, booktitle = {Proc. of the Annual International Conference on Mobile Computing and Networking (MobiCom)}, journal = {Mobicom}, abstract = {The rapid growth in the number of mobile devices, subscriptions and their associated traffic, has served as motivation for several projects focused on improving mobile users' quality of experience (QoE). Few have been as contentious as the Google-initiated Accelerated Mobile Project (AMP), both praised for its seemingly instant mobile web experience and criticized based on concerns about the enforcement of its formats. This paper presents the first characterization of AMP’s impact on users’ QoE.We do this using a corpus of over 2,100 AMP webpages, and their corresponding non-AMP counterparts, based on trendy-keyword-based searches. We characterized AMP’s impact looking at common web QoE metrics, including Page Load Time, Time to First Byte and SpeedIndex (SI). Our results show that AMP significantly improves SI, yielding on average a 60% lower SI than non-AMP pages without accounting for prefetching. Prefetching of AMP pages pushes this advantage even further, with prefetched pages loading over 2,000ms faster than non-prefetched AMP pages. This clear boost may come, however, at a non-negligible cost for users with limited data plans as it incurs an average of over 1.4 MB of additional data downloaded, unbeknownst to users.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @workshop{bischof:untangling, title = {Untangling the world-wide mesh of undersea cables}, author = {Zachary S. Bischof and Romain Fontugne and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/undersea.pdf}, year = {2018}, date = {2018-11-19}, booktitle = {ACM Workshop on Hot Topics in Networks (HotNets)}, abstract = {The growth of global Internet traffic has driven an exponential expansion of the submarine cable network, both in terms of the sheer number of links and its total capacity. Today, a complex mesh of hundreds of cables, stretched over 1 million kilometers, connects nearly every corner of the earth and is instrumental in closing the remaining connectivity gaps. Despite the scale and critical role of the submarine network to both business and society at large, our community has mostly ignored it, treating it as a black box in most studies from connectivity to inter-domain traffic and reliability. In this paper, we make the case for a new research agenda focused on characterizing the global submarine network and the critical role it plays as basic component of any inter-continental end-to-end connection.}, keywords = {publication, Reliability, SCN, Submarine Cables}, pubstate = {published}, tppubtype = {workshop} } @conference{bischof:tprc, title = {The Growing Importance of Being Always On -- A first look at the reliability of broadband Internet access}, author = {Zachary S. Bischof and Fabián E. Bustamante and Nick Feamster}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/tprc46-reliability.pdf}, year = {2018}, date = {2018-09-03}, booktitle = {Research Conference on Communication, Information and Internet Policy (TPRC)}, journal = {In Proc. of TPRC46}, abstract = {Broadband availability and performance continue to improve rapidly, spurred by both government and private investment and motivated by the recognized social and economic benefits of connectivity. A recent ITU ``State of Broadband'' reports that there are over 60 countries where fixed or mobile broadband penetration is above 25% and more than 70 countries where the majority of the population is online. According to Akamai's ``State of the Internet'' report, over the last four years, the top four countries in terms of average connection speed have nearly doubled their capacity. Although providing access and sufficient capacity remains a challenge in many parts of the world, in most developed countries, broadband providers are offering sufficiently high capacities to encourage consumers to migrate services for entertainment, communication and home monitoring to over-the-top (OTT) alternatives. According to a recent survey, nearly 78% of U.S. broadband households subscribe to an OTT video service. Enterprises are following the same path, with over one-third opting to use VoIP phones instead of landline ones. The proliferation of high-capacity access and the migration to OTT services have raised users' expectations of service reliability. A recent survey on consumer experience by the UK Office of Communication (Ofcom) ranks reliability first--- higher than even the speed of connection ---as the main reason for customer complaints. Our empirical study of access-ISP outages and user demand corroborates these observations, showing the effects of low reliability on user behavior, as captured by their demand on the network. Researchers and regulators alike have also recognized the need for clear standards and a better understanding of the role that service reliability plays in shaping the behavior of broadband users. Despite its growing importance, both the reliability of broadband services and potential ways to improve on it have received scant attention from the research community. In this paper, we introduce an approach for characterizing broadband reliability using data collected by the many emerging national efforts to study broadband (in over 30 countries and apply this approach to the data gathered by the Measuring Broadband America (MBA) project, which is operated by the United States Federal Communications Commission (FCC). We show, among other findings, that current broadband services deliver an average availability of at most two nines (99%), with an average annual downtime of 17.8 hours. Motivated by our findings, we quantify the potential benefits of multihomed broadband access and study its feasibility as a solution for increasing reliability. Using the FCC MBA dataset and measurements collected by over 6,000 end-host vantage points in 75 countries, we show that multihoming the access link at the home gateway with two different providers adds two nines of service availability, matching the minimum four nines (99.99%) required by the FCC for the public switched telephone network (PSTN).}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @conference{wassermann:anycast, title = {Anycast on the Move: A Look at Mobile Anycast Performance,}, author = {Sarah Wassermann and John P. Rula and Fabián E. Bustamante and Pedro Casas}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/SWasserman-AnycastMove.pdf}, year = {2018}, date = {2018-06-11}, booktitle = {Network Traffic Measurement and Analysis Conference (TMA)}, abstract = {The appeal and clear operational and economic benefits of anycast to service providers have motivated a number of recent experimental studies on its potential performance impact for end users. For CDNs on mobile networks, in particular, anycast provides a simpler alternative to existing request routing systems challenged by a growing, complex, and commonly opaque cellular infrastructure. This paper presents the first analysis of anycast performance for mobile users. In particular, our evaluation focuses on two distinct anycast services, both providing part of the DNS Root zone and together covering all major geographical regions. Our results show that mobile clients are routed to suboptimal replicas in terms of geographical distance and associated latencies, more frequently while on a cellular connection than on WiFi, with a significant impact on performance. We find that this is not simply an issue of lacking better alternatives, and that the problem is not specific to particular geographic areas or autonomous systems. We close with a first analysis of the root causes of this phenomenon and describe some of the major classes of anycast anomalies revealed during our study, additionally including a systematic approach to automatically detect such anomalies without any sort of training or labeled measurements.}, keywords = {anycast, mobile, publication}, pubstate = {published}, tppubtype = {conference} } @conference{rula:mhwifi, title = {Mile High WiFI: A First Look At In-Flight Internet Connectivity}, author = {John P. Rula and Fabián E. Bustamante and James Newman and Arash Molavi Khaki and Dave Choffnes}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula-WWW18.pdf}, year = {2018}, date = {2018-04-03}, booktitle = {The Web Conference (WWW)}, journal = {Proc. of WWW}, abstract = {In-Flight Communication (IFC), which can be purchased on a growing number of commercial flights, is often received by consumers with both awe for its mere availability and harsh criticism for its poor performance. Indeed, IFC provides Internet connectivity in some of the most challenging conditions with aircraft traveling at speeds in excess of 500 mph at 30,000 feet above the ground. Yet, while existing services do provide basic Interneaccessibility, anecdotal reports rank their quality of service as, at best, poor. In this paper, we present the first characterization of deployed IFC systems. Using over 45 flight-hours of measurements, we profile the performance of IFC across the two dominant access technologies -- direct air-to-ground communication (DA2GC) and mobile satellite service (MSS). We show that IFC QoS is in large part determined by the high latencies inherent to DA2GC and MSS, with RTTs averaging 200ms and 750ms, respectively, and that these high latencies directly impact the performance of common applications such as web browsing. While each IFC technology is based on well studied wireless communication technologies, our findings reveal that IFC links experience further degraded link performance than their technological antecedents. We find median loss rates of 7%, and nearly 40% loss at the 90th percentile for MSS, an order of magnitude larger than recent characterizations of residential satellite networks. We extend our IFC study exploring the potential of the newly released HTTP/2 and QUIC protocols in an emulated IFC environmen, finding that QUIC is able to improve page load times by as much as 7.9 times. In addition, we find that HTTP/2's use of multiplexing multiple requests onto a single TCP connection performs up to 4.8x worse than HTTP/1.1 when faced with large numbers of objects. We use network emulation to explore proposed technological improvements to existing IFC systems finding that high link losses account for the largest factor of performance degradation, and that to improving link bandwidth does little to improve the quality of experience for applications such as web browsing.}, keywords = {In-flight, mobile, publication}, pubstate = {published}, tppubtype = {conference} } @conference{CellSpotting, title = {Cell Spotting -- Studying the Role of Cellular Networks in the Internet}, author = {John P. Rula and Fabián E. Bustamante and Moritz Steiner}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/census.pdf}, year = {2017}, date = {2017-11-03}, booktitle = {Internet Measurement Conference (IMC)}, journal = {In Proc. of IMC}, abstract = {The increasingly dominant role of the mobile Internet and its economic implications have been the topic of several stud- ies and surveys from industry and academia. Most previous work has focused on mobile devices, as a whole, independently of their connectivity, and taken the limited perspectives of either a few individual handsets or a single operator. We lack a comprehensive and global view of cellular net- works, their scope, configurations and usage. In this paper, we present a comprehensive analysis of global cellular networks. We describe an approach to accurately identify cellular network IP addresses using the Network Information API, a non-standard Javascipt API in several mobile browsers, and show its effectiveness in a range cellular network configurations. We combine this approach with the vantage point of one of the world’s largest CDNs, with over 200,000 servers in 1,450 networks and clients in over 46,000 ASes across 245 countries, to characterize cellular access around the globe. We discover over 350 thousand /24 and 23 thousand /48 cellular IPv4 and IPv6 prefixes respectively. We find that the majority of cellular networks exist as mixed networks (i.e., networks that share both fixline and cellular devices), requiring prefix – not ASN – level identification. By utilizing addresses level traffic from the same CDN, we calculate the fraction of traffic coming from cellular addresses. Overall we find that cellular traffic comprises 16.2% of the CDN’s global traffic, and that cellular traffic ranges widely in importance between countries, from cap- turing nearly 96% of all traffic in Ghana to just 12.1% in France.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @online{bischof:breliability, title = {Characterizing and Improving the Reliability of Broadband Internet Access}, author = {Zachary S. Bischof and Fabián E. Bustamante and Nick Feamster.}, url = {https://arxiv.org/abs/1709.09349}, year = {2017}, date = {2017-09-03}, organization = {arXiv.org}, abstract = {In this paper, we empirically demonstrate the growing importance of reliability by measuring its effect on user behavior. We present an approach for broadband reliability characterization using data collected by many emerging national initiatives to study broadband and apply it to the data gathered by the Federal Communications Commission's Measuring Broadband America project. Motivated by our findings, we present the design, implementation, and evaluation of a practical approach for improving the reliability of broadband Internet access with multihoming }, keywords = {publication}, pubstate = {published}, tppubtype = {online} } @conference{bischof:sla, title = {The utility argument — Making a case for broadband SLAs}, author = {Zachary S. Bischof and Fabián E. Bustamante and Rade Stanojevic}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/PAM17_Bischof.pdf}, year = {2017}, date = {2017-03-03}, booktitle = {Passive and Active Measurement (PAM)}, journal = {In Proc. of PAM}, abstract = {Most residential broadband services are described in terms of their maximum potential throughput rate, often advertised as having speeds "up to X Mbps". Though such promises are often met, they are fairly limited in scope and, unfortunately, there is no basis for an appeal if a customer were to receive compromised quality of service. While this 'best effort' model was sufficient in the early days, we argue that as broadband customers and their devices become more dependent on Internet connectivity, we will see an increased demand for more encompassing Service Level Agreements (SLA). In this paper, we study the design space of broadband SLAs and explore some of the trade-offs between the level of strictness of SLAs and the cost of delivering them. We argue that certain SLAs could be offered almost immediately with minimal impact on retail prices, and that ISPs (or third parties) could accurately infer the risk of offering SLA to individual customers – with accuracy comparable to that in the car or credit insurance industry – and price the SLA service accordingly.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @article{bustamante:qoe, title = {Workshop on Tracking Quality of Experience in the Internet: Summary and Outcomes}, author = {Fabián E. Bustamante and David Clark and Nick Feamster}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/p55-bustamante.pdf}, year = {2017}, date = {2017-01-03}, journal = {SIGCOMM Computer Communication Review (CCR)}, volume = {47}, number = {1}, abstract = {This is a report on the Workshop on Tracking Quality of Experience in the Internet, held at Princeton, October 21– 22, 2015, jointly sponsored by the National Science Foundation and the Federal Communication Commission. The term Quality of Experience (QoE) describes a user’s subjective assessment of their experience when using a particular application. In the past, network engineers have typically focused on Quality of Service (QoS): performance metrics such as throughput, delay and jitter, packet loss, and the like. Yet, performance as measured by QoS parameters only matters if it affects the experience of users, as they attempt to use a particular application. Ultimately, the user’s experience is determined by QoE impairments (e.g., rebuffering). Although QoE and QoS are related—for example, a video rebuffering event may be caused by high packet-loss rate— QoE metrics ultimately affect a user’s experience. Identifying the causes of QoE impairments is complex, since the impairments may arise in one or another region of the network, in the home network, on the user’s device, in servers that are part of the application, or in supporting services such as the DNS. Additionally, metrics for QoE continue to evolve, as do the methods for relating QoE impairments to underlying causes that could be measurable using standard network measurement techniques. Finally, as the capabilities of the underlying network infrastructure continues to evolve, researchers should also consider how to design infrastructure and tools can best support measurements that can better identify the locations and causes of QoE impairments. The workshop's aim was to understand the current state of QoE research and to contemplate a community agenda to integrate ongoing threads of QoE research into a collaboration. This summary report describes the topics discussed and summarize the key points of the discussion. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @conference{jha:xfinity, title = { eXploring Xfinity: A First Look at Provider-Enabled Community Networks}, author = {Dipendra Jha, John P. Rula, Fabián E. Bustamante }, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/pam-xfinity.pdf}, year = {2016}, date = {2016-03-03}, booktitle = {Passive and Active Measurement (PAM)}, journal = { In Proc. PAM}, abstract = {Several broadband providers have been offering community WiFi as an additional service for existing customers and paid subscribers. These community networks provide Internet connectivity on the go for mobile devices and a path to offload cellular traffic. Rather than deploying new infrastructure or relying on the resources of an organized community, these provider-enabled community WiFi services leverage the existing hardware and connections of their customers. The past few years have seen a significant growth in their popularity and coverage and some municipalities and institutions have started to considered them as the basis for public Internet access. In this paper, we present the first characterization of one such service – the Xfinity Community WiFi network. Taking the perspectives of the home-router owner and the public hotspot user, we characterize the performance and availability of this service in urban and suburban settings, at different times, between September, 2014 and 2015. Our results highlight the challenges of providing these services in urban environments considering the tensions between coverage and interference, large obstructions and high population densities. Through a series of controlled experiments, we measure the impact to hosting customers, finding that in certain cases, the use of the public hotspot can degrade host network throughput by up-to 67% under high traffic on the public hotspot.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @workshop{rula:ipsfly, title = {When IPs Fly: A Case for Redefining Airline Communication}, author = {John Rula and Fabián E. Bustamante and David R. Choffnes}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/airline.pdf}, year = {2016}, date = {2016-02-03}, booktitle = {International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {In Proc. HotMobile}, abstract = {The global airline industry conducted over 33 million flights in 2014 alone, carrying over 3.3 billion passengers. Surpris- ingly, the traffic management system handling this flight volume communicates over either VHF audio transmissions or plane transponders, exhibiting several seconds of latency and single bits per second of throughput. There is a general consensus that for the airline industry to serve the growing demand will require of significant improvements to the air traffic management system; we believe that many of these improvements can leverage the past two decades of mobile networking research. In this paper, we make the case that moving to a common IP-based data channel to support flight communication can radically change the airline industry. While there remain many challenges to achieve this vision, we believe that such a shift can greatly improve the rate of innovation, overall efficiency of global air traffic management, enhance aircraft safety and create new applications that leverage the capability of an advanced data channel. Through preliminary measurements on existing in-flight Internet communication systems, we show that existing in-flight connectivity achieves order of magnitude higher throughput and lower latency than current systems, and operates as a highly reliable and available data link. This position paper takes a first look at the opportunity for IP-based flight communication, and identifies several promising research areas in this space.}, keywords = {In-flight, mobile, publication}, pubstate = {published}, tppubtype = {workshop} } @article{sanchez:dasu-ton, title = {A measurement experimentation platform at the Internet’s edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/ton-dasu.pdf}, year = {2015}, date = {2015-12-01}, journal = {IEEE/ACM Transactions on Networking (TON)}, volume = {23}, number = {6}, abstract = {Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent, well-known problem has served as motivation for numerous proposals to build or extend existing Internet measurement platforms by recruiting larger, more diverse vantage points. Capturing the edge of the network, however, remains an elusive goal. We argue that at its root the problem is one of incentives. Today's measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present a measurement experimentation platform that reaches the network edge by explicitly aligning the objectives of the experimenters with those of the users hosting the platform. Dasu -- our current prototype -- is designed to support both network measurement experimentation and broadband characterization. Dasu has been publicly available since July 2010 and is currently in use by over 100K users with a heterogeneous set of connections spreading across 2,431 networks and 166 countries. We discuss some of the challenges we faced building and using a platform for the Internet's edge, describe its design and implementation, and illustrate the unique perspective its current deployment brings to Internet measurement.}, keywords = {Edge Experimentation, publication}, pubstate = {published}, tppubtype = {article} } @conference{Cuba, title = {In and Out of Cuba: Characterizing Cuba's Connectivity}, author = {Zachary S. Bischof and John P. Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/imc207s-bischofA.pdf}, year = {2015}, date = {2015-10-03}, booktitle = {Internet Measurement Conference (IMC)}, journal = {In Proc. of IMC}, abstract = {The goal of our work is to characterize the current state of Cuba's access to the wider Internet. This work is motivated by recent improvements in connectivity to the island and the growing commercial interest following the ease of restrictions on travel and trade with the US. In this paper, we profile Cuba’s networks, their connections to the rest of the world, and the routes of international traffic going to and from the island. Despite the addition of the ALBA-1 submarine cable, we find that round trip times to websites hosted off the island remain very high; pings to popular websites frequently took over 300 ms. We also find a high degree of path asymmetry in traffic to/from Cuba. Specifically, in our analysis we find that traffic going out of Cuba typically travels through the ALBA-1 cable, but, surprisingly, traffic on the reverse path often traverses high-latency satellite links, adding over 200 ms to round trip times. Last, we analyze queries to public DNS servers and SSL certificate requests to characterize the availability of network services in Cuba.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @workshop{sanchez:coordination, title = {Experiment coordination for large-scale measurement platforms}, author = {Mario A. Sánchez and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/c2b16-sanchezAPT.pdf}, year = {2015}, date = {2015-08-06}, booktitle = {ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D)}, journal = {In Proc. Sigcomm C2B(I)D Workshop}, abstract = {The risk of placing an undesired load on networks and networked services through probes originating from measurement platforms has always been present. While several scheduling schemes have been proposed to avoid undue loads or DDoS-like effects from uncontrolled experiments, the motivation scenarios for such schemes have generally been considered “sufficiently unlikely” and safely ignored by most existing measurement platforms. We argue that the growth of large, crowdsourced measurement systems means we cannot ignore this risk any longer. In this paper we expand on our original lease-based coordination scheme designed for measurement platforms that embrace crowdsourcing as their method-of-choice. We compare it with two alternative strategies currently implemented by some of the existing crowdsourced measurement platforms: centralized rate-limiting and individual rate limiting. Our preliminary results show that our solution outperforms these two naive strategies for coordination according to at least two different intuitive metrics: resource utilization and bound compliance. We find that our scheme efficiently allows the scalable and effective coordination of measurements among potentially thousands of hosts while providing individual clients with enough flexibility to act on their own. }, keywords = {Edge Experimentation, publication}, pubstate = {published}, tppubtype = {workshop} } @workshop{rula:bdiversity, title = {Second Chance - Understanding diversity in broadband access network performance}, author = {John P. Rula and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/c2b15-rula.pdf}, year = {2015}, date = {2015-08-03}, booktitle = {ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D)}, journal = {In Proc. Sigcomm C2B(I)D workshop}, abstract = {In recognition of the increasing importance of broadband, several governments have embarked on large-scale efforts to measure broadband services from devices within end-user’s homes. Participants for these studies were selected based on features that, a priori, were thought to be relevant to service performance such as geographic region, access technology and subscription level. Every new-year deployment since has followed the same model, ensuring that the number of measurement points remains stable despite the natural churn. In this paper, we start to explore the issue of vantage point selection in residential broadband networks by lever- aging the publicly available datasets collected as part of the FCC Broadband America study. We present the first analysis of the variation of performance in edge networks and diversity of individual vantage points. We explore the underlying causes of this diversity through a factor analysis of contextual factors within an ISP such as the geographic location of subscribers. The goal of this analysis is to inform additional deployments in ongoing studies, and guide the design and deployment of future investigations into broadband networks.}, keywords = {Edge Experimentation, publication}, pubstate = {published}, tppubtype = {workshop} } @conference{rula:softcontrol, title = {Crowdsensing Under (Soft) Control}, author = {John P. Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/csc.pdf}, year = {2015}, date = {2015-04-03}, booktitle = {IEEE INFOCOM}, journal = { In Proc. of INFOCOM}, abstract = {Crowdsensing leverages the pervasiveness and power of mobile devices such as smartphones and tablets, to enable ordinary citizens to collect, transport and verify data. Application domains range from environment monitoring, to infrastructure management and social computing. Crowdsensing services' effectiveness is a direct result of their coverage, which is driven by the recruitment and mobility patterns of participants. Due to the population distribution of most areas, and the regular mobility patterns of participants, less popular or populated areas suffer from poor coverage. In this paper, we present Crowd Soft Control (CSC), an approach to exert limited control over the actions of participants by leveraging the built-in incentives of location-based gaming and social applications. By pairing community sensing with location-based applications, CSC allows sensing services to reuse the incentives of location-based apps to steer the actions of participating users and increase the effectiveness of sensing campaigns. While there are several domains where this intentional movement is useful such as data muling, the paper presents the design, implementation and evaluation of CSC applied to crowdsensing. We built a prototype of CSC and integrated it with two location-based applications, and crowdsensing services. Experimental results demonstrate the low-cost of integration and minimal overhead of CSC.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @workshop{rula:appt, title = {Mobile AD(D): Estimating Mobile App Session Times for Better Ads}, author = {John Rula and Byungjin Jun and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/hot81-rula.pdf}, year = {2015}, date = {2015-02-03}, booktitle = {International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {In Proc. HotMobile}, abstract = {While mobile advertisements are the dominant source of revenue for mobile apps, the usage patterns of mobile users, and thus their engagement times, may be in conflict with the effectiveness of these ads. With any given application, a user may engage for anywhere between a few seconds to several minutes depending on a number of factors such as their location and goals. Despite the resulting wide-range of session times, the current nature of ad auctions dictates that ads are priced and sold prior to actual viewing, that is regardless of the actual display time. We argue that the wealth of easy-to-gather contextual information on mobile devices is sufficient to make better choices by effectively predicting exposure time. We analyze mobile device usage patterns with a detailed two-week long user study of 37 users in the US and South Korea. After characterizing application session times, we use factor analysis to derive a simple predictive model and show that this model is able to offer improved accuracy compared to mean session time over 90% of the time. We make the case for including predicted ad exposure duration in the price of mobile advertisements and posit that such information could significantly improve the effectiveness of mobile advertisement, giving publishers the ability to tune campaigns for engagement length and enabling a more efficient market for ad impressions, select appropriate media for an ad impression and lowering the cost to users including network utilization and device power.}, keywords = {publication}, pubstate = {published}, tppubtype = {workshop} } @conference{gavalda:p2pbehaviour, title = {User behavior and change: File sharers and copyright laws}, author = {Arnau Gavaldà-Miralles and John S. Otto and Fabián E. Bustamante and Luís A. N. Amaral and Jordi Duch and Roger Guimerà}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/conext14.pdf}, year = {2014}, date = {2014-12-06}, booktitle = {International Conference on emerging Networking EXperiments and Technologies (CoNEXT)}, journal = {In Proc. of CoNEXT}, abstract = {Though the impact of file-sharing of copyrighted content has been discussed for over a decade, only in the past few years have countries begun to adopt legislation to criminalize this behavior. These laws impose penalties ranging from warnings and monetary fines to disconnecting Internet service. While their supporters are quick to point out trends showing the efficacy of these laws at reducing use of file-sharing sites, their analyses rely on brief snapshots of activity that cannot reveal long- and short-term trends. In this paper, we introduce an approach to model user behavior based on a hidden Markov model and apply it to analyze a two-year-long user-level trace of download activity of over 38k users from around the world. This approach allows us to quantify the true impact of file- sharing laws on user behavior, identifying behavioral trends otherwise difficult to identify. For instance, despite an initial reduction in activity in New Zealand when a three-strikes law took effect, after two months activity had returned to the level observed prior to the law being enacted. Given that punishment results only at best on short-term compliance, we suggest that incentives-based approaches may be more effective at changing user behavior. }, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @article{Domain, title = {Internet Inter-Domain Traffic Estimation for the Outsider}, author = {Mario A. Sánchez and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger and Georgios Smaragdakis and Jeffrey Erman}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/altps-camera-ready.pdf}, year = {2014}, date = {2014-11-09}, journal = {In Proc. of IMC}, abstract = {Characterizing the flow of Internet traffic is important in a wide range of contexts, from network engineering and application design to understanding the network impact of consumer demand and business relationships. Despite the growing interest, the nearly impossible task of collecting large-scale, Internet-wide traffic data has severely constrained the focus of traffic-related studies. In this paper, we introduce a novel approach to characterize inter-domain traffic by reusing large, publicly available traceroute datasets. Our approach builds on a simple insight -- the popularity of a route on the Internet can serve as an informative proxy for the volume of traffic it carries. It applies structural analysis to a dual-representation of the AS-level connectivity graph derived from available traceroute datasets. Drawing analogies with city grids and traffic, it adapts data transformations and metrics of route popularity from urban planning to serve as proxies for traffic volume. We call this approach Network Syntax, highlighting the connection to urban planning Space Syntax. We apply Network Syntax in the context of a global ISP and a large Internet eXchange Point and use ground-truth data to demonstrate the strong correlation (r^2 values of up to 0.9) between inter-domain traffic volume and the different proxy metrics. Working with these two network entities, we show the potential of Network Syntax for identifying critical links and inferring missing traffic matrix measurements.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @inproceedings{zbichof:broadband, title = {Need, Want, or Can Afford - Broadband Markets and the Behavior of Users}, author = {Zachary S. Bischof and Fabián E. Bustamante and Rade Stanojevic}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/imc220-bischof.pdf}, year = {2014}, date = {2014-11-06}, booktitle = {In Proc. of ACM IMC}, journal = {In Proc. of IMC}, abstract = {We present the first study of broadband services in their broader context, evaluating the impact of service characteristics (such as capacity, latency and loss), their broadband pricing and user demand. We explore these relationships, beyond correlation, with the application of natural experiments. Most efforts on broadband service characterization have so far focused on performance and availability, yet we lack a clear understanding of how such services are being utilized and how their use is impacted by the particulars of the market. By analyzing over 23-months of data collected from 53,000 end hosts and residential gateways in 160 countries, along with a global survey of retail broadband plans, we empirically study the relationship between broadband service characteristics, pricing and demand. We show a strong correlation between capacity and demand, even though subscribers rarely fully utilize their links, but note a law of diminishing returns with relatively smaller increases in demand at higher capacities. Despite the fourfold increase in global IP traffic, we find that user demand on the network over a three year period remained constant for a given bandwidth capacity. We exploit natural experiments to examine the causality between these factors. The reported findings represent an important step towards understanding how user behavior, and the market features that shape it, affect broadband networks and the Internet at large.}, keywords = {publication}, pubstate = {published}, tppubtype = {inproceedings} } @conference{jrula:DNSb, title = {Behind the Curtain - Cellular DNS and Content Replica Selection}, author = {John Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/dns.pdf}, year = {2014}, date = {2014-11-03}, booktitle = {In Proc. ACM IMC}, journal = {In Proc. IMC}, abstract = {DNS plays a critical role in the performance of smartdevices within cellular networks. Besides name resolution, DNS is commonly relied upon for directing users to nearby content caches for better performance. In light of this, it is surprising how little is known about the structure of cellular DNS and its effectiveness as a client localization method. In this paper we take a close look at cellular network DNS and uncover several features of cellular DNS, such as cellular network opaqueness and client to resolver inconsistency, that make it unsuitable for client localization in modern cellular networks. We study these issues in two leading mobile network markets – US and South Korea – using a collection of over 340 volunteer devices to probe the DNS infrastructure of each client’s cellular provider. We show the extent of the problem with regards to replica selec- tion and compare its localization performance against public DNS alternatives. As a testament to cellular DNS’s poor localization, we find surprisingly that public DNS can render equal or better replica performance over 75% of the time.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @article{Decentralized, title = {Impact of heterogeneity and socieconomic factors on individual behavior in decentralized sharing ecosystems}, author = {Arnau Gavaldà-Miralles and David R. Choffnes and John S. Otto and Mario A. Sánchez and Fabián E. Bustamante and Luís A. N. Amaral and Jordi Duch and Roger Guimerà}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/PNAS-2014-Gavaldà-Miralles-1309389111.pdf}, year = {2014}, date = {2014-09-03}, journal = {Proc. of the National Academy of Science}, volume = {111}, number = {43}, pages = {15322-15327}, abstract = {Tens of millions of individuals around the world use decentralized content distribution systems, a fact of growing social, economic, and technological importance. These sharing systems are poorly understood because, unlike in other technosocial systems, it is difficult to gather large-scale data about user behavior. Here, we investigate user activity patterns and the socioeconomic factors that could explain the behavior. Our analysis reveals that (i) the ecosystem is heterogeneous at several levels: content types are heterogeneous, users specialize in a few content types, and countries are heterogeneous in user profiles; and (ii) there is a strong correlation between socioeconomic indicators of a country and users behavior. Our findings open a research area on the dynamics of decentralized sharing ecosystems and the socioeconomic factors affecting them, and may have implications for the design of algorithms and for policymaking.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{TRTGIBA, title = {A Time for Reliability – The Growing Importance of Being Always On}, author = {Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bischof-Sigcomm-Poster-2014.pdf}, year = {2014}, date = {2014-08-09}, journal = {Poster in Proc. of ACM SIGCOMM}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{CCC, title = {A cliq of content curators}, author = {Angela H. Jiang and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Jiang-Sigcomm-Poster-2014-1.pdf}, year = {2014}, date = {2014-08-06}, journal = {Poster in Proc. of ACM SIGCOMM}, abstract = {A social news site presents user-curated content, ranked by popularity. Popular curators like Reddit, or Facebook have become effective way of crowdsourcing news or sharing personal opinions. Traditionally, these services require a centralized authority to aggregate data and determine what to display. However, the trust issues that arise from a centralized system are particularly damaging to the "Web democracy" that social news sites are meant to provide. We present cliq, a decentralized social news curator. cliq is a P2P based social news curator that provides private and unbiased reporting. All users in cliq share responsibility for tracking and providing popular content. Any user data that cliq needs to store is also managed across the network. We first inform our design of cliq through an analysis of Reddit. We design a way to provide content curation without a persistent moderator, or usernames. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{Mobilec, title = {Behind the Curtain: The importance of replica selection in next generation cellular networks}, author = {John Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/dns-poster.pdf}, year = {2014}, date = {2014-08-03}, journal = {Poster in ACM Sigcomm}, abstract = {Smartdevices are becoming the primary or only Internet point of access for an ever larger fraction of the population. Nearly a quarter of current web traffic is mobile, and recent industry studies have estimated a fourfold increase on global mobile data traffic by 2018, mainly driven by the content demands and growing number of smart phones and tablets [2]. The most recent CISCO VNI report estimates that by 2018, the majority of North America devices and connections will have 4G capability and, while 4G will be 15% of world-wide connections then, these connections will be responsible for 51% of traffic. Cellular networks pose a challenge to content delivery networks (CDNs) given their opaque network structure, limited number of ingress points, and obfuscated DNS infrastructure. Previously, large cellular radio latencies meant CDN replica selection had little impact on the total end-to-end latency. However, the advancement of 4G networks such as LTE has lowered mobile device access latency to make it comparable with many existing broadband services, making the choice of content replica server a significant contributor to end-to-end performance. In general, but particularly in cellular networks, CDNs have limited signals for locating clients. Mobile IPs have been shown to be dynamic for mobile end hosts [1], and external entities such as CDNs are prevented from probing their mobile clients or their infrastructure by NAT and firewall policies implemented by cellular operators. In this poster, we present preliminary work looking at the impact of replica selection in next generation cellular networks. Using a collection of over 250 mobile end-hosts over a two-month period, we explore CDN replica selection in cellular networks measuring the latency to content replicas for a selection of popular mobile websites. We find that clients in next generation radio technologies can see up to 400% differences in latency to selected replicas. We discover that, in large part, these poor selections are due to current localization approaches employed by CDNs such as DNS redirection which, while fairly effective in wired hosts, performs rather poorly within cellular networks mainly due to cellular DNS behavior. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{Mobileb, title = {No "One-size fits all": Towards a principled approach for incentives in mobile crowdsourcing}, author = {John P. Rula and Vishnu Navda and Fabián E. Bustamante and Ranjita Bhagwan and Saikat Guha}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/incentives_hotmobile.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/hotmobile_2014.pdf}, year = {2014}, date = {2014-02-03}, journal = {In Proc. of the Fifteenth Workshop on Mobile Computing Systems and Applications (HotMobile)}, abstract = {We are becoming increasingly aware that the effectiveness of mobile crowdsourcing systems critically depends on the whims of their human participants, impacting everything from participant engagement to their compliance with the crowdsourced tasks. In response, a number of such systems have started to incorpo- rate different incentive features aimed at a wide range of goals that span from improving participation levels, to extending the systems’ coverage, and enhancing the quality of the collected data. Despite the many related efforts, the inclusion of incentives in crowdsourced systems has so far been mostly ad-hoc, treating incentives as a wild-card response fitted for any occasion and goal. Using data from a large, 2-day experiment with 96 participants at a corporate conference, we present an analysis of the impact of two incentive structures on the recruitment, compliance and user effort of a basic mobile crowdsourced service. We build on these preliminary results to argue for a principled approach for selecting incentive and incentive structures to match the variety of requirements of mobile crowdsourcing applications and discuss key issues in working toward that goal.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @techreport{DASUe, title = { Dasu: A measurement experimentation platform at the Internet’s edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NU-EECS-13-09.pdf}, year = {2013}, date = {2013-09-06}, number = { NWU-EECS-13-09}, institution = {Department of Computer Science, Northwestern University}, abstract = {Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent well-known problem has served as motivation for numerous proposals to build or extend existing platforms by recruiting larger, more diverse vantage points. However, capturing the edge of the network remains an elusive goal. We argue that at its root the problem is one of incentives. Today’s measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present Dasu, a measurement experimentation platform built on an alternate model that explicitly aligns the objectives of the experimenters with those of the users hosting the platform. Dasu is designed to support both network measurement experimentation and broadband characterization. In this paper, we discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective our current deployment brings to Internet measurement. Dasu has been publicly available since July 2010 and is currently in use by over 95,000 users with a heterogeneous set of connections spreading across 1,802 networks and 151 countries. }, keywords = {publication}, pubstate = {published}, tppubtype = {techreport} } @article{Swarms, title = {The hidden locality in swarms}, author = {John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/P2P2013Otto.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/P2P2013Otto-slides.pptx}, year = {2013}, date = {2013-09-03}, journal = {In Proc. of IEEE P2P}, abstract = {People use P2P systems such as BitTorrent to share an unprecedented variety and amount of content with others around the world. The random connection pattern used by BitTorrent has been shown to result in reduced performance for users and costly cross-ISP traffic. Although several client-side systems have been proposed to improve the locality of BitTorrent traffic, their effectiveness is limited by the availability of local peers. We show that sufficient locality is present in swarms -- if one looks at the right time. We find that 50% of ISPs have at least five local peers online during the ISP's peak hour, typically in the evening, compared to only 20% of ISPs during the median hour. To better discover these local peers, we show how to increase the overall peer discovery rate by over two orders of magnitude using client-side techniques: leveraging additional trackers, requesting more peers per sample, and sampling more frequently. We propose an approach to predict future availability of local peers based on observed diurnal patterns. This approach enables peers to selectively apply these techniques to minimize undue load on trackers.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @inproceedings{DASUd, title = {Characterizing Broadband Services with Dasu}, author = {Zachary S. Bischof and Mario A. Sánchez and John S. Otto and John P. Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi13-broadband-demo.pdf}, year = {2013}, date = {2013-04-09}, journal = {Demonstration at USENIX NSDI}, abstract = {We present the broadband characterization functionality of Dasu, showcase its user-interface, and include side-by-side comparisons of competing broadband services. This poster complements Sánchez et al. (appearing in NSDI) and its related demo submission; both focus on the design and implementation of Dasu as an experimental platform. As mentioned in the NSDI work, Dasu partially relies on service characterization as incentive for adoption. This side of Dasu is a prototype implementation of our crowdsourced-based, end-system approach to broadband characterization. By leveraging monitoring information from local hosts and home routers, our approach can attain scalability, continuity and end-user perspective while avoiding the potential pitfalls of similar models.}, keywords = {publication}, pubstate = {published}, tppubtype = {inproceedings} } @article{DASUc, title = {Experiments at the Internet's Edge with Dasu}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi13-dasu-experiment-demo.pdf}, year = {2013}, date = {2013-04-06}, journal = {Demonstration at USENIX NSDI}, abstract = {Dasu is an extensible measurement experimentation platform for the Internet's edge. Dasu is composed of a distributed collection of clients, hosted by participating end hosts, and a core set of services for managing and coordinating experimentation. Dasu supports and builds on broadband characterization as an incentive for adoption to capture the network and service diversity of the commercial Internet. This demo presents Dasu in action, focusing on its experiment delegation mechanism and showing how it enables third-party experimentation and maintains security and accountability.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{DASUb, title = { Dasu: Pushing Experiments to the Internet's Edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/dasu-measurement.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Sanchez2013NSDISlides.pdf}, year = {2013}, date = {2013-04-03}, journal = {In Proc. of the USENIX Symposium on Networked Systems Design and Implementation (NSDI)}, abstract = {We present Dasu, a measurement experimentation platform for the Internet’s edge. Dasu supports both controlled network experimentation and broadband char- acterization, building on public interest on the latter to gain the adoption necessary for the former. We discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective it brings to Internet measurement. Dasu has been publicly available since July 2010 and has been installed by over 90,000 users with a heterogeneous set of connections spreading across 1,802 networks and 147 countries.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{TBCH, title = {Trying Broadband Characterization at Home}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/pam-upnp.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Slides.pdf}, year = {2013}, date = {2013-03-03}, journal = {In Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {In recent years the quantity and diversity of Internet-enabled consumer devices in the home have increased significantly. These trends complicate device usability and home resource management and have implications for crowdsourced approaches to broadband characterization. The UPnP protocol has emerged as an open standard for device and service discovery to simplify device usability and resource management in home networks. In this work, we leverage UPnP to understand the dynamics of home device usage, both at a macro and micro level, and to sketch an effective approach to broadband characterization that runs behind the last meter. Using UPnP measurements collected from over 13K end users, we show that while home networks can be quite complex, the number of devices that actively and regularly connect to the Internet is limited. Furthermore, we find a high correlation between the number of UPnP-enabled devices in home networks and the presence of UPnP-enabled gateways, and show how this can be leveraged for effective broadband characterization.}, keywords = {Edge Experimentation, publication}, pubstate = {published}, tppubtype = {article} } @article{CDNEDNS, title = {Content delivery and the natural evolution of DNS - Remote DNS Trends, Performance Issues and Alternative Solutions}, author = {John S. Otto and Mario A. Sánchez and John P. Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/OttoIMC2012.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Otto2012IMCSlides.pptx}, year = {2012}, date = {2012-11-03}, journal = {In Proc. of IMC}, abstract = {Content Delivery Networks (CDNs) rely on the Domain Name System (DNS) for replica server selection. DNS-based server selection builds on the assumption that, in the absence of information about the client's actual network location, the location of a client's DNS resolver provides a good approximation. The recent growth of remote DNS services breaks this assumption and can negatively impact client's web performance. In this paper, we assess the end-to-end impact of using remote DNS services on CDN performance and present the first evaluation of an industry-proposed solution to the problem. We find that remote DNS usage can indeed significantly impact client's web performance and that the proposed solution, if available, can effectively address the problem for most clients. Considering the performance cost of remote DNS usage and the limited adoption base of the industry-proposed solution, we present and evaluate an alternative approach, Direct Resolution, to readily obtain comparable performance improvements without requiring CDN or DNS participation. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{UDS, title = {Up, Down and Around the Stack: ISP Characterization from Network Intensive Applications}, author = {Zachary S. Bischof and John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof12WMUST.pdf}, year = {2012}, date = {2012-10-06}, journal = {In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST)}, abstract = {Broadband characterization has recently attracted much attention from the research community and the general public. Given the important business and policy implications of residential Internet service characterization, recent years have brought a variety of approaches to profiling Internet services, ranging from Web-based platforms to dedicated infrastructure inside home networks. We have previously argued that network-intensive applications provide an almost ideal vantage point for broadband characterization at sufficient scale, nearly continuously and from end users. While we have shown that the approach is indeed effective at service characterization and can enable performance comparisons between service providers and geographic regions, a key unanswered question is how well the performance characteristics captured by these systems can predict the overall user experience with different applications. In this paper, using BitTorrent as an example host application, we present initial results that demonstrate how to obtain estimates of bandwidth and latency of a network connection by leveraging passive monitoring and limited active measurements from network intensive applications. We then analyze user experienced web performance under a variety of network conditions and show how estimates from a network intensive application can serve as good web performance predictors.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{namehelp, title = {namehelp: intelligent client-side DNS resolution}, author = {John S. Otto and Mario A. Sánchez and John P. Rula and Ted Stein and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/OttoSigcommPoster2012.pdf}, year = {2012}, date = {2012-10-03}, journal = {In ACM SIGCOMM CCR Special Issue}, volume = {42}, number = {4}, abstract = {The Domain Name System (DNS) is a fundamental component of today’s Internet. Recent years have seen radical changes to DNS with increases in usage of remote DNS and public DNS services such as OpenDNS. Given the close relationship between DNS and Content Delivery Networks (CDNs) and the pervasive use of CDNs by many popular applications including web browsing and real-time entertainment services, it is important to understand the impact of remote and public DNS services on users’ overall experience on the Web. This work presents a tool, namehelp, which comparatively evaluates DNS services in terms of the web performance they provide, and implements an end-host solution to address the performance impact of remote DNS on CDNs. The demonstration will show the functionality of namehelp with online results for its performance improvements.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{CCMBO, title = {Crowd (Soft) Control: Moving Beyond the Opportunistic}, author = {John Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula12HotMobile.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula12HotMobileSlides.pdf }, year = {2012}, date = {2012-02-03}, journal = {In Proc. of the Thirteenth Workshop on Mobile Computing Systems and Applications (HotMobile)}, abstract = {A number of novel wireless networked services, ranging from participatory sensing to social networking, leverage the increasing capabilities of mobile devices and the movement of the individuals carrying them. For many of these systems, their effectiveness fundamentally depends on coverage and the particular mobility patterns of the participants. Given the strong spatial and temporal regularity of human mobility, the needed coverage can typically only be attained through a large participant base. In this paper we explore an alternative approach to attain coverage without scale -- (soft) controlling the movement of participants. We present Crowd Soft Control (CSC), an approach to exert limited control over the temporal and spatial movements of mobile users by leveraging the built-in incentives of location-based gaming and social applications. By pairing network services with these location-based apps, CSC allows researchers to use an application's incentives (e.g. games objectives) to control the movement of participating users, increasing the effectiveness and efficiency of the associated network service. After outlining the case for Crowd Soft Control, we present an initial prototype of our ideas and discuss potential benefits and costs in the context of two case studies. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{DSND, title = {Distributed Systems and Natural Disasters -- BitTorrent as a Global Witness}, author = {Zachary S. Bischof and John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11SWID.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11SWID_Slides.pdf}, year = {2011}, date = {2011-12-03}, journal = {In Proc. of CoNEXT Special Workshop on the Internet and Disasters (SWID)}, abstract = {Peer-to-peer (P2P) systems represent some of the largest distributed systems in today's Internet. Among P2P systems, BitTorrent is the most popular, potentially accounting for 20-50% of P2P file-sharing traffic. In this paper, we argue that this popularity can be leveraged to monitor the impact of natural disasters and political unrest on the Internet. We focus our analysis on the 2011 Tohoku earthquake and tsunami and use a view from BitTorrent to show that it is possible to identify specific regions and network links where Internet usage and connectivity were most affected.}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{CISPCNE, title = {Crowdsourcing ISP Characterization to the Network Edge}, author = {Zachary S. Bischof and John S. Otto and Mario A. Sánchez and John P. Rula and David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11WMUST.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11WMUST_Slides.pdf}, year = {2011}, date = {2011-08-09}, journal = {In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST)}, abstract = {Evaluating and characterizing Internet Service Providers (ISPs) is critical to subscribers shopping for alternative ISPs, companies providing reliable Internet services, and governments surveying the coverage of broadband services to its citizens. Ideally, ISP characterization should be done at scale, continuously, and from end users. While there has been significant progress toward this end, current approaches exhibit apparently unavoidable tradeoffs between coverage, continuous monitoring and capturing user- perceived performance. In this paper, we argue that network-intensive applications running on end systems avoid these tradeoffs, thereby offering an ideal platform for ISP characterization. Based on data collected from 500,000 peer-to-peer BitTorrent users across 3,150 networks, together with the reported results from the U.K. Ofcom/SamKnows studies, we show the feasibility of this approach to characterize the service that subscribers can expect from a particular ISP. We discuss remaining research challenges and design requirements for a solution that enables efficient and accurate ISP characterization at an Internet scale. }, keywords = {Edge Experimentation, publication}, pubstate = {published}, tppubtype = {article} } @article{BME, title = {On Blind Mice and the Elephant -- Understanding the Network Impact of a Large Distributed System}, author = {John S. Otto and Mario A. Sánchez and David R. Choffnes and Fabián E. Bustamante and Georgos Siganos}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JOtto11SIGCOMM.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/OttoSigcomm2011.pptx}, year = {2011}, date = {2011-08-06}, journal = {In Proc. of ACM SIGCOMM}, abstract = {A thorough understanding of the network impact of emerging large-scale distributed systems -- where traffic flows and what it costs -- must encompass users' behavior, the traffic they generate and the topology over which that traffic flows. In the case of BitTorrent, however, previous studies have been limited by narrow perspectives that restrict such analysis. This paper presents a comprehensive view of BitTorrent, using data from a representative set of 500,000 users sampled over a two year period, located in 169 countries and 3,150 networks. This unique perspective captures unseen trends and reveals several unexpected features of the largest peer-to-peer system. For instance, over the past year total BitTorrent traffic has increased by 12%, driven by 25% increases in per-peer hourly download volume despite a 10% decrease in the average number of online peers. We also observe stronger diurnal usage patterns and, surprisingly given the bandwidth-intensive nature of the application, a close alignment between these patterns and overall traffic. Considering the aggregated traffic across access links, this has potential implications on BitTorrent-associated costs for Internet Service Providers (ISPs). Using data from a transit ISP, we find a disproportionately large impact under a commonly used burstable (95th-percentile) billing model. Last, when examining BitTorrent traffic's paths, we find that for over half its users, most network traffic never reaches large transit networks, but is instead carried by small transit ISPs. This raises questions on the effectiveness of most in-network monitoring systems to capture trends on peer-to-peer traffic and further motivates our approach.}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{DASU, title = {Dasu - ISP Characterization from the Edge: A BitTorrent Implementation}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/MSanchez11SIGCOMM.pdf}, year = {2011}, date = {2011-08-03}, journal = {Demo in Proc. of ACM SIGCOMM}, abstract = {Evaluating and characterizing access ISPs is critical to consumers shopping for alternative services and governments surveying the availability of broadband services to their citizens. We present Dasu, a service for crowdsourcing ISP characterization to the edge of the network. Dasu is implemented as an extension to a popular BitTorrent client and has been available since July 2010. While the prototype uses BitTorrent as its host application, its design is agnostic to the particular host application. The demo showcases our current implementation using both a prerecorded execution trace and a live run.}, keywords = {Edge Experimentation, publication}, pubstate = {published}, tppubtype = {article} } @article{ENMUMT, title = {Environmental Noise Mapping Using Measurements in Transit}, author = {Gareth Bennett and Eoin A. King and Jan Curn and Vinny Cahill and Fabián E. Bustamante and Henry J. Rice}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ISMA10-Bennett.pdf}, year = {2010}, date = {2010-09-03}, journal = {In Proc. of the International Conference on Noise and Vibration Engineering (ISMA)}, abstract = {Due to the ever increasing level of environmental noise that the EU population is exposed to, all countries are directed to disseminate community noise level exposures to the public in accordance with EU Directive 2002/49/EC. Environmental noise maps are used for this purpose and as a means to avoid, prevent or reduce the harmful effects caused by exposure to environmental noise. There is no common standard to which these maps are generated in the EU and indeed these maps are in most cases inaccurate due to poorly informed predictive models. This paper develops a novel environmental noise monitoring methodology which will allow accurate road noise measurements to replace erroneous source model approximations in the generation of noise maps. The approach proposes the acquisition of sound levels and position coordinates by instrumented vehicles such as bicycles or cars or by pedestrians equipped with a Smartphone. The accumulation of large amounts of data over time will result in extremely high spatial and temporal resolution resulting in an accurate measurement of environmental noise.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{CSLNED, title = {Crowdsourcing Service-Level Network Event Detection}, author = {David R. Choffnes and Fabián E. Bustamante and Zihui Ge}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10SIGCOMM.pdf}, year = {2010}, date = {2010-08-03}, journal = {In Proc. of ACM SIGCOMM}, abstract = {The user experience for networked applications is becoming a key benchmark for customers and network providers. Perceived user experience is largely determined by the frequency, duration and severity of network events that impact a service. While today’s networks implement sophisticated infrastructure that issues alarms for most failures, there remains a class of silent outages (e.g., caused by configuration errors) that are not detected. Further, existing alarms provide little information to help operators understand the impact of network events on services. Attempts to address this through infrastructure that monitors end-to-end performance for customers have been hampered by the cost of deployment and by the volume of data generated by these solutions. We present an alternative approach that pushes monitoring to applications on end systems and uses their collective view to detect network events and their impact on services - an approach we call Crowdsourcing Event Monitoring (CEM). This paper presents a general framework for CEM systems and demonstrates its effectiveness for a P2P application using a large dataset gathered from BitTorrent users and confirmed network events from two ISPs. We discuss how we designed and deployed a prototype CEM implementation as an extension to BitTorrent. This system performs online service-level network event detection through passive monitoring and correlation of performance in end-users’ applications.}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{SB, title = {Strange Bedfellows: Communities in BitTorrent}, author = {David R. Choffnes and Jordi Duch and Dean Malmgren and Roger Guimera and Fabián E. Bustamante and Luis Amaral}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10IPTPS.pdf}, year = {2010}, date = {2010-04-06}, journal = {In Proc. of the 9th International Workshop on Peer-to-Peer Systems (IPTPS)}, abstract = {While P2P systems benefit from large numbers of interconnected nodes, each of these connections provides an opportunity for eavesdropping. Using only the connection patterns gathered from 10,000 BitTorrent (BT) users during a one-month period, we determine whether randomized connection patterns give rise to communities of users. Even though connections in BT require not only shared interest in content, but also concurrent sessions, we find that strong communities naturally form -- users inside a typical community are 5 to 25 times more likely to connect to each other than with users outside. These strong communities enable guilt by association, where the behavior of an entire community of users can be inferred by monitoring one of its members.Our study shows that through a single observation point, an attacker trying to identify such communities can uncover 50% of the network within a distance of two hops. Finally, we propose and evaluate a practical solution that mitigates this threat.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{PTEIS, title = { Pitfalls for Testbed Evaluations of Internet Systems}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10CCR.pdf}, year = {2010}, date = {2010-04-03}, journal = {In ACM SIGCOMM CCR}, abstract = {Today's open platforms for network measurement and distributed system research, which we collectively refer to as testbeds in this article, provide opportunities for controllable experimentation and evaluations of systems at the scale of hundreds or thousands of hosts. In this article, we identify several issues with extending results from such platforms to Internet wide perspectives. Specifically, we try to quantify the level of inaccuracy and incompleteness of testbed results when applied to the context of a large-scale peer-to-peer (P2P) system. Based on our results, we emphasize the importance of measurements in the appropriate environment when evaluating Internet-scale systems.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{NPE, title = {Network positioning from the edge: An empirical study of the effectiveness of network positioning in P2P systems}, author = {David R. Choffnes and Mario A. Sánchez and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10Infocom.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10InfocomFinal.pdf}, year = {2010}, date = {2010-03-03}, journal = {In Proc. of IEEE INFOCOM}, abstract = {Network positioning systems provide an important service to large-scale P2P systems, potentially enabling clients to achieve higher performance, reduce cross-ISP traffic and improve the robustness of the system to failures. Because traces representative of this environment are generally unavailable, and there is no platform suited for experimentation at the appropriate scale, network positioning systems have been commonly implemented and evaluated in simulation and on research testbeds. The performance of network positioning remains an open question for large deployments at the edges of the network. This paper evaluates how four key classes of network po- sitioning systems fare when deployed at scale and measured in P2P systems where they are used. Using 2 billion network measurements gathered from more than 43,000 IP addresses probing over 8 million other IPs worldwide, we show that network positioning exhibits noticeably worse performance than previously reported in studies conducted on research testbeds. To explain this result, we identify several key properties of this environment that call into question fundamental assumptions driving network positioning research.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{TTc, title = {Taming the Torrent}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10login.pdf}, year = {2010}, date = {2010-02-06}, journal = {In USENIX}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{POPI, title = {POPI: A User-level Tool for Inferring Router Packet Forwarding Priority}, author = {Guohan Lu and Yan Chen and Stefan Birrer and Fabian E. Bustamante and Xing Li}, url = {https://ieeexplore.ieee.org/document/5233840}, year = {2010}, date = {2010-02-03}, journal = {In IEEE/ACM Transactions on Networking (ToN)}, abstract = {Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network operators. PFP can have a significant impact on the accuracy of network measurements, the performance of applications and the effectiveness of network troubleshooting procedures. Despite its potential impacts, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for PFP inference and its associated tool, POPI. This is the first attempt to infer router packet forwarding priority through end-to-end measurement. POPI enables users to discover such network policies through measurements of packet losses of different packet types. We evaluated our approach via statistical analysis, simulation and wide-area experimentation in PlanetLab. We employed POPI to analyze 156 paths among 162 PlanetLab sites. POPI flagged 15 paths with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them all confirming our inferences. Besides, we compared POPI with the inference mechanisms through other metrics such as packet reordering [called out-of-order (OOO)]. OOO is unable to find many priority paths such as those implemented via traffic policing. On the other hand, interestingly, we found it can detect existence of the mechanisms which induce delay differences among packet types such as slow processing path in the router and port-based load sharing. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{WSEEI, title = {Where the Sidewalk Ends: Extending the Internet AS Graph Using Traceroutes From P2P Users}, author = {Kai Chen and David R. Choffnes and Rahul Potharaju and Yan Chen and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/KChen09Conext.pdf}, year = {2009}, date = {2009-12-06}, journal = {In Proc. of CoNEXT}, abstract = {An accurate Internet topology graph is important in many areas of networking, from deciding ISP business relationships to diagnosing network anomalies. Most Internet mapping efforts have derived the network structure, at the level of interconnected autonomous systems (ASes), from a limited number of either BGP- or traceroute-based data sources. While techniques for charting the topology continue to improve, the growth of the number of vantage points is significantly outpaced by the rapid growth of the Internet. In this paper, we argue that a promising approach to revealing the hidden areas of the Internet topology is through active measurement from an observation platform that scales with the growing Internet. By leveraging measurements performed by an extension to a popular P2P system, we show that this approach indeed exposes significant new topological information. Based on traceroute measurements from more than 992,000 IPs in over 3,700 ASes distributed across the Internet hierarchy, our proposed heuristics identify 23,914 new AS links not visible in the publicly-available BGP data -- 12.86% more customer-provider links and 40.99% more peering links, than previously reported. We validate our heuristics using data from a tier-1 ISP and show that they correctly filter out all false links introduced by public IP-to-AS mapping. We have made the identified set of links and their inferred relationships publically available.}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{DBAc, title = {Drafting Behind Akamai: Inferring Network Conditions Based on CDN Redirections}, author = {Ao-Jan Su and David R. Choffnes and Aleksandar Kuzmanovic and Fabián E. Bustamante}, url = {https://ieeexplore.ieee.org/document/5238553}, year = {2009}, date = {2009-12-03}, journal = {In IEEE/ACM Transactions on Networking (ToN)}, volume = {17}, number = {6}, abstract = {To enhance Web browsing experiences, content distribution networks (CDNs) move Web content "closer" to clients by caching copies of Web objects on thousands of servers worldwide. Additionally, to minimize client download times, such systems perform extensive network and server measurements and use them to redirect clients to different servers over short time scales. In this paper, we explore techniques for inferring and exploiting network measurements performed by the largest CDN, Akamai; our objective is to locate and utilize quality Internet paths without performing extensive path probing or monitoring. Our contributions are threefold. First, we conduct a broad measurement study of Akamai's CDN. We probe Akamai's network from 140 PlanetLab (PL) vantage points for two months. We find that Akamai redirection times, while slightly higher than advertised, are sufficiently low to be useful for network control. Second, we empirically show that Akamai redirections overwhelmingly correlate with network latencies on the paths between clients and the Akamai servers. Finally, we illustrate how large-scale overlay networks can exploit Akamai redirections to identify the best detouring nodes for one-hop source routing. Our research shows that in more than 50% of investigated scenarios, it is better to route through the nodes "recommended" by Akamai than to use the direct paths. Because this is not the case for the rest of the scenarios, we develop low-overhead pruning algorithms that avoid Akamai-driven paths when they are not beneficial. Because these Akamai nodes are part of a closed system, we provide a method for mapping Akamai-recommended paths to those in a generic overlay and demonstrate that these one-hop paths indeed outperform direct ones. }, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{DCTAS, title = {Distributed or Centralized Traffic Advisory Systems -- The Application's Take}, author = {John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/JOtto09SECON.pdf}, year = {2009}, date = {2009-06-06}, journal = {In Proc. of IEEE SECON}, abstract = {We consider the problem of data dissemination in vehicular networks. Our main goal is to compare the application-level performance of fully distributed and centralized data dissemination approaches in the context of traffic advisory systems. Vehicular networks are emerging as a new distributed system environment with myriad promising applications. Wirelessly-connected, GPS-equipped vehicles can be used, for instance, as probes for traffic advisory or pavement condition information services with significant improvements in cost, coverage and accuracy. There is an ongoing discussion on the pros and cons of alternative approaches to data distribution for these applications. Proposed centralized, or infrastructure-based, models rely on road-side equipment to upload information to a central location for later use. Distributed approaches take advantage of the direct exchanges between participating vehicles to achieve higher scalability at the potential cost of data consistency. While distributed solutions can significantly reduce infrastructures' deployment and maintenance costs, it is unclear what the impact of "imprecise" information is to an application or what level of adoption is needed for this model to be effective. This paper investigates the inherent trade-offs in the adoption of distributed or centralized approaches to a traffic advisory service, a commonly proposed application. We based our analysis on a measurements study of signal propagation in urban settings and an extensive simulation-based experimentation in the Chicago road network.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{DBAC, title = {Down the Block and Around the Corner -- The Impact of Radio Propagation on Inter-vehicle Wireless Communication}, author = {John S. Otto and Fabián E. Bustamante and Randall A. Berry}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/JOtto09ICDCS.pdf}, year = {2009}, date = {2009-06-03}, journal = {In Proc. of IEEE International Conference on Distributed Computing Systems (ICDCS)}, abstract = {Vehicular networks are emerging as a new distributed system environment with myriad possible applications. Most studies on vehicular networks are carried out via simulation, given the logistical and economical problems with large-scale deployments. This paper investigates the impact of realistic radio propagation settings on the evaluation of VANET-based systems. Using a set of instrumented cars, we collected IEEE 802.11b signal propagation measurements between vehicles in a variety of urban and suburban environments. We found that signal propagation between vehicles varies in different settings, especially between line-of-sight ("down the block") and non line-of-sight ("around the corner") communication in the same setting. Using a probabilistic shadowing model, we evaluate the impact of different parameter settings on the performance of an epidemic data dissemination protocol and discuss the implications of our findings. We also suggest a variation of a basic signal propagation model that incorporates additional realism without sacrificing scalability by taking advantage of environmental information, including node locations and street information.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{EMRPBD, title = {On the Effectiveness of Measurement Reuse for Performance-Based Detouring}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes09Infocom.pdf}, year = {2009}, date = {2009-04-03}, journal = {In Proc. of IEEE INFOCOM}, abstract = {For both technological and economic reasons, the default path between two end systems in the wide-area Internet can be suboptimal. This observation has motivated a number of systems that attempt to improve reliability and performance by routing over one or more hops in an overlay. Most of the proposed solutions, however, fall at an extreme in the cost-performance trade-off. While some provide near-optimal performance with an unscalable measurement overhead, others avoid measurement when selecting routes around network failures but make no attempt to optimize performance. This paper presents an experimental evaluation of an alternative approach to scalable, performance detouring based on the strategic reuse of measurements from other large-scale distributed systems, namely content distribution networks (CDN). By relying on CDN redirections as hints on network conditions, higher performance paths are readily found with little overhead and no active network measurement. We report results from a study of more than 13,700 paths between 170 widely-distributed hosts over a three-week period, showing the advantages of this approach. We demonstrate the practicality of our approach by implementing an FTP suite that uses our publicly available SideStep library to take advantage of these improved Internet routes.}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{P2PPSSS, title = { Improving Peer-to-Peer Performance Through Server-Side Scheduling}, author = {Yi Qiao and Dong Lu and Fabián E. Bustamante and Peter Dinda and Stefan Birrer}, url = {https://dl.acm.org/citation.cfm?id=1455260}, year = {2008}, date = {2008-12-03}, journal = {In ACM Transactions on Computer Systems (TOCS)}, volume = {26}, number = {4}, abstract = {We show how to significantly improve the mean response time seen by both uploaders and downloaders in peer-to-peer data-sharing systems. Our work is motivated by the observation that response times are largely determined by the performance of the peers serving the requested objects, that is, by the peers in their capacity as servers. With this in mind, we take a close look at this server side of peers, characterizing its workload by collecting and examining an extensive set of traces. Using trace-driven simulation, we demonstrate the promise and potential problems with scheduling policies based on shortest-remaining-processing-time (SRPT), the algorithm known to be optimal for minimizing mean response time. The key challenge to using SRPT in this context is determining request service times. In addressing this challenge, we introduce two new estimators that enable predictive SRPT scheduling policies that closely approach the performance of ideal SRPT. We evaluate our approach through extensive single-server and system-level simulation coupled with real Internet deployment and experimentation.}, keywords = {P2P, publication}, pubstate = {published}, tppubtype = {article} } @article{TTP2P, title = {Taming the Torrent: A practical approach to reducing cross-ISP traffic in P2P systems}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes08Sigcomm.pdf}, year = {2008}, date = {2008-08-03}, journal = {In Proc. of ACM SIGCOMM}, abstract = {Peer-to-peer (P2P) systems, which provide a variety of popular services, such as file sharing, video streaming and voice-over-IP, contribute a significant portion of today's Internet traffic. By building overlay networks that are oblivious to the underlying Internet topology and routing, these systems have become one of the greatest trafficengineering challenges for Internet Service Providers (ISPs) and the source of costly data traffic flows. In an attempt to reduce these operational costs, ISPs have tried to shape, block or otherwise limit P2P traffic, much to the chagrin of their subscribers, who consistently finds ways to eschew these controls or simply switch providers. In this paper, we present the design, deployment and evaluation of an approach to reducing this costly cross- ISP traffic without sacrificing system performance. Our approach recycles network views gathered at low cost from content distribution networks to drive biased neighbor selection without any path monitoring or probing. Using results collected from a deployment in BitTorrent with over 120,000 users in nearly 3,000 networks, we show that our lightweight approach significantly reduces cross-ISP traffic and over 33% of the time it selects peers along paths that are within a single autonomous system (AS). Further, we find that our system locates peers along paths that have two orders of magnitude lower latency and 30% lower loss rates than those picked at random, and that these highquality paths can lead to significant improvements in transfer rates. In challenged settings where peers are overloaded in terms of available bandwidth, our approach provides 31% average download-rate improvement; in environments with large available bandwidth, it increases download rates by 207% on average (and improves median rates by 883%). DATA SET As we state in the paper, data used for this study will be made available upon request to edgescope@aqua-lab.org. For privacy reasons, the data is provided at an AS-level granularity. Note that you will have to agree to these terms before we grant access to the data. Also note that the dataset consists of 10s of GB of compressed data, so plan accordingly. }, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{RNPCDNR, title = {Relative Network Positioning via CDN Redirections}, author = {Ao-Jan Su and David R. Choffnes and Fabián E. Bustamante and Aleksandar Kuzmanovic}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/AJSu08CRP.pdf}, year = {2008}, date = {2008-06-06}, journal = { In Proc. of the International Conference on Distributed Computing Systems (ICDCS)}, abstract = {Many large-scale distributed systems can benefit from a service that allows them to select among alternative nodes based on their relative network positions. A variety of approaches propose new measurement infrastructures that attempt to scale this service to large numbers of nodes by reducing the amount of direct measurements to end hosts. In this paper, we introduce a new approach to relative network positioning that eliminates direct probing by leveraging pre-existing infrastructure. Specifically, we exploit the dynamic association of nodes with replica servers from large content distribution networks (CDNs) to determine relative position information -- we call this approach CDN-based Relative network Positioning (CRP). We demonstrate how CRP can support two common examples of location information used by distributed applications: server selection and dynamic node clustering. After describing CRP in detail, we present results from an extensive wide-area evaluation that demonstrates its effectiveness.}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{DesigningLess, title = {Designing Less-structured P2P Systems for the Expected High Churn}, author = {Fabián E. Bustamante and Yi Qiao}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/BustamanteTON07.pdf}, year = {2008}, date = {2008-06-03}, journal = {In IEEE/ACM Transactions on Networking, (ToN)}, volume = {16}, number = {3}, abstract = {We address the problem of highly transient populations in unstructured and loosely-structured peer-to-peer systems. We propose a number of illustrative query-related strategies and organizational protocols that, by taking into consideration the expected session times of peers (their lifespans), yield systems with performance characteristics more resilient to the natural instability of their environments. We first demonstrate the benefits of lifespan-based organizational protocols in terms of end-application performance and in the context of dynamic and heterogeneous Internet environments. We do this using a number of currently adopted and proposed query-related strategies, including methods for query distribution, caching and replication. We then show, through trace-driven simulation and wide-area experimentation, the performance advantages of lifespan-based, query-related strategies when layered over currently employed and lifespan-based organizational protocols. While merely illustrative, the evaluated strategies and protocols clearly demonstrate the advantages of considering peers' session time in designing widely-deployed peer-to-peer systems.}, keywords = {P2P, publication}, pubstate = {published}, tppubtype = {article} } @article{CROMA, title = {A Comparison of Resilient Overlay Multicast Approaches}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrerJSAC07.pdf}, year = {2007}, date = {2007-12-03}, journal = {n IEEE Journal on Selected Areas in Communications (JSAC) -- Special Issue on Advances in Peer-to-Peer Streaming Systems}, volume = {25}, number = {9}, abstract = {Overlay-based multicast has been proposed as a key alternative for large-scale group communication. There is ample motivation for such an approach, as it delivers the scalability advantages of multicast while avoiding the deployment issues of a network-level solution. As multicast functionality is pushed to autonomous, unpredictable end systems, however, significant performance loss can result from their higher degree of transiency when compared to routers. Consequently, a number of techniques have recently been proposed to improve overlays' resilience by exploiting path diversity and minimizing node dependencies. Delivering high application performance at relatively low costs and under high degree of transiency has proven to be a difficult task. Each of the proposed resilient techniques comes with a different trade-off in terms of delivery ratio, end-to-end latency and additional network traffic. In this paper, we review some of these approaches and evaluate their effectiveness by contrasting the performance and associated cost of representative protocols through simulation and wide area experimentation.}, keywords = {Overlay Multicast, publication}, pubstate = {published}, tppubtype = {article} } @article{Vortex, title = {Vortex: Enabling Cooperative Selective Wormholing for Network Security Systems}, author = {Jack Lange and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/JLange07VRTX.pdf}, year = {2007}, date = {2007-09-03}, journal = {In Proc. of 10th International Symposium on Recent Advances in Intrusion Detection }, abstract = {We present a novel approach to remote traffic aggregation for Network Intrusion Detection Systems (NIDS) called Cooperative Selective Wormholing (CSW). Our approach works by selectively aggregating traffic bound for unused network ports on a volunteer’s commodity PC. CSW could enable NIDS operators to cheaply and efficiently monitor large distributed portions of the Internet, something they are currently incapable of. Based on a study of several hundred hosts in a university network, we posit that there is sufficient heterogeneity in hosts’ network service configurations to achieve a high degree of network coverage by re-using unused port space on client machines. We demonstrate Vortex, a proof-of-concept CSW implementation that runs on a wide range of commodity PCs (Unix and Windows). Our experiments show that Vortex can selectively aggregate traffic to a virtual machine backend, effectively allowing two machines to share the same IP address transparently. We close with a discussion of the basic requirements for a large-scale CSW deployment.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{EEBIVC, title = { Exploiting Emergent Behavior for Inter-Vehicle Communication}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes07EEB.pdf}, year = {2007}, date = {2007-06-03}, journal = {In Proc. of 2nd International Workshop on Hot Topics in Autonomic Computing}, abstract = {We introduce Virtual Ferry Networking (VFN), a novel approach to data dissemination services on mobile adhoc networks. VFN exploits the emergent patterns of vehicles’mobility to buffer and carry messages when immediately forwarding those messages would fail. Instead of depending on a fixed, small set of vehicles and paths for ferrying messages, VFN allows any vehicle moving along part of a virtual route to become a possible carrier for messages. VFN helps address many of the challenges with supporting distributed applications in challenging ad-hoc vehicular networks with rapidly changing topologies, fast-moving vehicles and signal-weakening obstructions such as bridges and buildings. We discuss the challenges with implementing VFN and present evaluation results from an early prototype.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{E2EIRPFP, title = {End-to-end Inference of Router Packet Forwarding Priority}, author = {Guohan Lu and Yan Chen and Stefan Birrer and Fabián E. Bustamante and Chin Yin Cheung and Xing Li}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/GLu07POPI.pdf}, year = {2007}, date = {2007-05-03}, journal = { In Proc. of IEEE INFOCOM}, abstract = {Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network administrators. PFP can have a significant impact on the performance of applications, the accuracy of measurement tools’ results and the effectiveness of network troubleshooting procedures. Despite their potential impact, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for packet forwarding priority inference and its associated tool, POPI. This is the first attempt to infer router packetforwarding priority through end-to-end measurement. Our POPI tool enables users to discover such network policies through the monitoring and rank classification of loss rates for different packet types. We validated our approach via statistical analysis, simulation, and wide-area experimentation in PlanetLab. As part of our wide-area experiments, we employed POPI to analyze 156 random paths across 162 PlanetLab nodes. We discovered 15 paths flagged with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them confirming our inferences. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{ROMP, title = {Resilience in Overlay Multicast Protocols}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer06ROMP.pdf}, year = {2006}, date = {2006-10-03}, journal = {In Proc. of the 14th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems}, abstract = {One of the most important challenges of self-organized, overlay systems for large-scale group communication lies in these systems ability to handle the high degree of transiency inherent to their environment. While a number of resilient protocols and techniques have been recently proposed, achieving high delivery ratios without sacrificing end-to-end latencies or incurring significant additional costs has proven to be a difficult task. In this paper we review some of these approaches and experimentally evaluate their effectiveness by contrasting their performance and associated cost through simulation and wide-area experimentation.}, keywords = {Overlay Multicast, publication}, pubstate = {published}, tppubtype = {article} } @article{DBA, title = { Drafting Behind Akamai (Travelocity-Based Detouring)}, author = {Ao-Jan Su and David R. Choffnes and Alekzandar Kuzmanovic and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/Ajsu06DBA.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/Ajsu06DBA.ppt}, year = {2006}, date = {2006-09-03}, journal = { In Proc. of ACM SIGCOMM 2006}, abstract = {To enhance web browsing experiences, content distribution networks (CDNs) move web content closer to clients by caching copies of web objects on thousands of servers worldwide. Additionally, to minimize client download times, such systems perform extensive network and server measurements, and use them to redirect clients to different servers over short time scales. In this paper, we explore techniques for inferring and exploiting network measurements performed by the largest CDN, Akamai; our objective is to locate and utilize quality Internet paths without performing extensive path probing or monitoring. Our contributions are threefold. First, we conduct a broad measurement study of Akamai's CDN. We probe Akamai's network from 140 PlanetLab vantage points for two months. We find that Akamai redirection times, while slightly higher than advertised, are sufficiently low to be useful for network control. Second, we empirically show that Akamai redirections overwhelmingly correlate with network latencies on the paths between clients and the Akamai servers. Finally, we illustrate how large-scale overlay networks can exploit Akamai redirections to identify the best detouring nodes for one-hop source routing. Our research shows that in more than 50%of investigated scenarios, it is better to route through the nodes recommended by Akamai, than to use the direct paths. Because this is not the case for the rest of the scenarios, we develop low overhead pruning algorithms that avoid Akamai-driven paths when they are not beneficial.}, keywords = {publication, R3}, pubstate = {published}, tppubtype = {article} } @article{SUOUM, title = {Structured and Unstructured Overlays Under the Microscope - A Measurement-based View of Two P2P Systems That People Use}, author = {Yi Qiao and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao06SUO.pdf}, year = {2006}, date = {2006-06-03}, journal = {In Proc. of the 2006 USENIX Annual Technical Confrence}, abstract = {Existing peer-to-peer systems rely on overlay network protocols for object storage and retrieval and message routing. These overlay protocols can be broadly classified as structured and unstructured -- structured overlays impose constraints on the network topology for efficient object discovery, while unstructured overlays organize nodes in a random graph topology that is arguably more resilient to peer population transiency. There is an ongoing discussion on the pros and cons of both approaches. This paper contributes to the discussion a multiple-site, measurement-based study of two operational and widely-deployed file-sharing systems. The two protocols are evaluated in terms of resilience, message overhead, and query performance. We validate our findings and further extend our conclusions through detailed analysis and simulation experiments.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{PSHPC, title = {Publish-subscribe for high-performance computing}, author = {Greg Eisenhauer and Fabián Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Eisenhauer06PSHP.pdf}, year = {2006}, date = {2006-01-03}, journal = {IEEE Internet Computing - Special Asynchronous Middleware and Services}, volume = {10(1): 8-25}, abstract = {High-performance computing could significantly benefit from publish-subscribe communication, but current systems don't deliver the kind of performance required by applications in that domain. In response, the authors developed Echo, a high-performance event-delivery middleware designed to scale to the data rates typically found in grid environments.This article provides an overview of Echo, the infrastructure on which it's built, and the techniques used to implement it.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{IMTMVMN, title = {An Integrated Mobility and Traffic Model for Vehicular Wireless Networks}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes05vanet.pdf }, year = {2005}, date = {2005-12-03}, journal = {In Proc. of the 2nd ACM International Workshop on Vehicular Ad Hoc Networks}, abstract = {Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring and vehicular safety applications. When evaluating application performance in simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This paper analyzes ad-hoc wireless network performance in a vehicular network in which nodes move according to a simplified vehicular traffic model on roads defined by real map data. We show that when nodes move according to our street mobility model, STRAW, network performance is significantly different from that of the commonly used random waypoint model. We also demonstrate that protocol performance varies with the type of urban environment. Finally, we use these results to argue for the development of integrated vehicular and network traffic simulators to evaluate vehicular ad-hoc network applications, particularly when the information passed through the network affects node mobility. }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{FDHTSM, title = {The Feasibility of DHT-based Streaming Multicast}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05FDSM.pdf}, year = {2005}, date = {2005-11-03}, journal = { In Proc. of the 13th IEEE/ACM International Symposium on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems}, abstract = {We explore the feasibility of streaming applications over DHT-based substrates. In particular, we focus our study on the implications of bandwidth heterogeneity and transiency, both characteristic of these systems' target environment. Our discussion is grounded on an initial evaluation of SplitStream, a representative DHT-based cooperative multicast system.}, keywords = {Overlay Multicast, publication}, pubstate = {published}, tppubtype = {article} } @article{EKBb, title = {Elders Know Best - Handling Churn in Less Structured P2P Systems}, author = {Yi Qiao and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao05EKB.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao05EKB.pps}, year = {2005}, date = {2005-10-03}, journal = {In Proc. of the Fifth IEEE International Conference on Peer-to-Peer Computing}, abstract = {We address the problem of highly transient populations in unstructured and loosely-structured peer-to-peer systems. We propose a number of illustrative query-related strategies and organizational protocols that, by taking into consideration the expected session times of peers (their lifespans), yield systems with performance characteristics more resilient to the natural instability of their environments. We first demonstrate the benefits of lifespan-based organizational protocols in terms of end-application performance and in the context of dynamic and heterogeneous Internet environments. We do this using a number of currently adopted and proposed query-related strategies, including methods for query distribution, caching and replication. We then show, through trace-driven simulation and wide-area experimentation, the performance advantages of lifespan-based, query-related strategies when layered over currently employed and lifespan-based organizational protocols. While merely illustrative, the evaluated strategies and protocols clearly demonstrate the advantages of considering peers' session time in designing widely-deployed peer-to-peer systems.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{Magellan, title = {Magellan: Performance-based, Cooperative Multicast}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05MPCM.pdf}, year = {2005}, date = {2005-09-03}, journal = {In Proc. of the Tenth International Workshop on Web Content Caching and Distribution}, abstract = {Among the proposed overlay multicast protocols, tree-based systems have proven to be highly scalable and efficient in terms of physical link stress and end-to-end latency. Conventional tree-based protocols, however, distribute the forwarding load unevenly among the participating peers. An effective approach for addressing this problem is to stripe the multicast content across a forest of disjoint trees, evenly sharing the forwarding responsibility among participants. DHTs seem to be naturally well suited for the task, as they are able to leverage the inherent properties of their routing model in building such a forest. In heterogeneous environments, though, DHT-based schemes for tree (and forest) construction may yield deep, unbalanced structures with potentially large delivery latencies. This paper introduces Magellan, a new overlay multicast protocol we have built to explore the tradeoff between fairness and performance in these environments. Magellan builds a data-distribution forest out of multiple performance-centric, balanced trees. It assigns every peer in the system a primary tree with priority over the peer's resources. The peers' spare resources are then made available to secondary trees. In this manner, Magellan achieves fairness, ensuring that every participating peer contributes resources to the system. By employing a balanced distribution tree with O(log N)-bounded, end-to-end hop-distance, Magellan also provides high delivery ratio with comparable low latency. Preliminary simulation results show the advantage of this approach.}, keywords = {Overlay Multicast, publication}, pubstate = {published}, tppubtype = {article} } @article{DPI, title = {Distributed Popularity Indices}, author = {Ashish Gupta, Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/AGupta05DPI.pdf}, year = {2005}, date = {2005-08-03}, journal = {Poster in Proc. of ACM SIGCOMM}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{TTWAN, title = {Characterizing and Predicting TCP Throughput on the Wide Area Network}, author = {Dong Lu and Yi Qiao and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Donglu05ICDCS.pdf}, year = {2005}, date = {2005-06-03}, journal = {In Proc. of the 25th IEEE International Conference on Distributed Computing Systems}, abstract = {DualPats exploits the strong correlation between TCP throughput and flow size, and the statistical stability of Internet path characteristics to accurately predict the TCP throughput of large transfers using active probing. We propose additional mechanisms to explain the correlation, and then analyze why traditional TCP benchmarking fails to predict the throughput of large transfers well. We characterize stability and develop a dynamic sampling rate adjustment algorithm so that we probe a path based on its stability. Our analysis, design, and evaluation is based on a large-scale measurement study.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{FatNemob, title = {FatNemo: Multi-Source Multicast Overlay Fat-Tree}, author = {Stefan Birrer and Fabián E. Bustamante and Dong Lu and Peter Dinda and Yi Qiao}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05NSDI.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi05poster.pdf}, year = {2005}, date = {2005-05-03}, journal = {Poster in Proc. of the Second Symposium on Networked Systems Design & Implementation}, abstract = {This poster presents the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We describes FatNemo, a novel overlay multi-source multicast protocol based on this idea, and present early experimental and analytical results showing the advatanges of this approach. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures.}, keywords = {Overlay Multicast, publication}, pubstate = {published}, tppubtype = {article} } @article{Magnolia, title = {Magnolia: A novel DHT architecture for keyword-based searching}, author = {Ashish Gupta and Manan Sanghi and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Gupta05NSDIPoster.pdf}, year = {2005}, date = {2005-05-03}, journal = {In Proc. of the Second Symposium on Networked Systems Design & Implementation }, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{MTPTWAN, title = {Modeling and Taming Parallel TCP on the Wide Area Network}, author = {Dong Lu and Yi Qiao and Peter A. Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DongLuIPDPS05.pdf}, year = {2005}, date = {2005-04-03}, journal = {In Proc. of the 19th IEEE International Parallel and Distributed Processing Symposium}, abstract = {Parallel TCP flows are broadly used in the high performance distributed computing community to enhance network throughput, particularly for large data transfers. Previous research has studied the mechanism by which parallel TCP improves aggregate throughput, but there doesn't exist any practical mechanism to predict its throughput. In this work, we address how to predict parallel TCP throughput as a function of the number of flows, as well as how to predict the corresponding impact on cross traffic. To the best of our knowledge, we are the first to answer the following question on behalf of a user: what number of parallel flows will give the highest throughput with less than a p% impact on cross traffic? We term this the maximum nondisruptive throughput. We begin by studying the behavior of parallel TCP in simulation to help derive a model for predicting parallel TCP throughput and its impact on cross traffic. Combining this model with some previous findings we derive a simple, yet effective, online advisor. We evaluate our advisor through simulation-based and wide-area experimentation.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{Nemoc, title = {Nemo: Resilient Peer-to-Peer Multicast without the Cost}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer05MMCN.pdf}, year = {2005}, date = {2005-03-03}, journal = {In Proc. of the 12th Annual Multimedia Computing and Networking Conference}, abstract = {We introduce Nemo, a novel peer-to-peer multicast protocol that achieves high delivery ratio without sacrificing end-to-end latency or incurring additional costs. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo's design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We contrast the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%).}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @techreport{Reef, title = {Reef: Efficiently designing and evaluating overlay algorithms}, author = {Stefan Birrer and Fabián E. Bustamante}, year = {2005}, date = {2005-02-03}, number = {NWU-CS-05-14}, institution = {Department of Computer Science, Northwestern University}, keywords = {Overlay Multicast, technical report}, pubstate = {published}, tppubtype = {techreport} } @techreport{MVTMVEN, title = {Modeling Vehicular Traffic and Mobility for Vehicular Wireless Networks}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-05-03-1-1.pdf}, year = {2005}, date = {2005-01-03}, number = {NWU-CS-05-03}, institution = {Department of Computer Science, Northwestern University}, abstract = {Ad-hoc wireless communication among highly dynamic, mobile nodes in a urban network is a critical capability for a wide range of important applications including automated vehicles, real-time traffic monitoring, and battleground communication. When evaluating application performance through simulation, a realistic mobility model for vehicular ad-hoc networks (VANETs) is critical for accurate results. This technical report discusses the implementation of STRAW, a new mobility model for VANETs in which nodes move according to a realistic vehicular traffic model on roads defined by real street map data. The challenge is to create a traffic model that accounts for individual vehicle motion without incurring significant overhead relative to the cost of performing the wireless network simulation. We identify essential and optional techniques for modeling vehicular motion that can be integrated into any wireless network simulator. We then detail choices we made in implementing STRAW.}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} } @article{SSP2Pb, title = { Looking at the Server-Side of Peer-to-Peer Systems}, author = {Yi Qiao and Dong Lu and Fabián E. Bustamante and Peter Dinda}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao04LCR.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao04LCR.ppt}, year = {2004}, date = {2004-11-03}, journal = {In Proc. of the 7th Workshop on Languages, Compilers and Run-time Support for Scalable Systems}, abstract = {Peer-to-peer systems have grown significantly in popularity over the last few years. An increasing number of research projects have been closely following this trend, looking at many of the paradigm's technical aspects. In the context of data-sharing services, efforts have focused on a variety of issues from object location and routing to fair sharing and peer lifespans. Overall, the majority of these projects have concentrated on either the whole P2P infrastructure or the client-side of peers. Little attention has been given to the peer's server-side, even when that side determines much of the everyday-user's experience. In this paper, we make the case for looking at the server-side of peers, focusing on the problem of scheduling download requests at the server-side of P2P systems with the intent of minimizing the average response time experienced by users. We start by characterizing server workload based on extensive trace collection and analysis. We then evaluate the performance and fairness of different scheduling policies through trace-driven simulations. Our results show that average response time can be dramatically reduced by more effectively scheduling the requests on the server-side of P2P systems.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{SRPT, title = {Applications of SRPT Scheduling with Inaccurate Information}, author = {Dong Lu and Peter A. Dinda and Yi Qiao and Huanyuan Sheng and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DongLuMASCOTS04.pdf}, year = {2004}, date = {2004-10-03}, journal = {Poster in Proc. of the 12th IEEE/ACM International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems (MASCOTS)}, abstract = {The Shortest Remaining Processing Time (SRPT) scheduling policy was proven, in the 1960s, to yield the smallest mean response time, and recently it was proven its performance gain over Processor Sharing (PS) usually does not come at the expense of large jobs. However, despite the many advantages of SRPT scheduling, it is not widely applied. One important reason for the sporadic application of SRPT scheduling is that accurate job size information is often unavailable. Our previous work addressed the performance and fairness issues of SRPT scheduling when job size information is inaccurate. We found that SRPT (and FSP) scheduling outperforms PS as long as there exists a (rather small) amount of correlation between the estimated job size and the actual job size. In the work we summarize here, we have developed job size estimation techniques to support the application of SRPT to web server and Peer-to-Peer server side scheduling. We have evaluated our techniques with extensive simulation studies and real world implementation and measurement.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{FatNemo, title = {FatNemo: Building a Resilient Multi-Source Multicast Fat-Tree}, author = {Stefan Birrer and Dong Lu and Fabián E. Bustamante and Yi Qiao and Peter Dinda}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer04FNB.pdf}, year = {2004}, date = {2004-09-03}, journal = {In Proc. of the Ninth International Workshop on Web Content Caching and Distribution}, abstract = {This paper proposes the idea of emulating fat-trees in overlays for multi-source multicast applications. Fat-trees are like real trees in that their branches become thicker the closer one gets to the root, thus overcoming the "root bottleneck" of regular trees. We introduce FatNemo, a novel overlay multi-source multicast protocol based on this idea. FatNemo organizes its members into a tree of clusters with cluster sizes increasing closer to the root. It uses bandwidth capacity to decide the highest layer in which a peer can participate, and relies on co-leaders to share the forwarding responsibility and to increase the tree's resilience to path and node failures. We present the design of FatNemo and show simulation-based experimental results comparing its performance with that of three alternative protocols (Narada, Nice and Nice-PRM). These initial results show that FatNemo not only minimizes the average and standard deviation of response time, but also handles end host failures gracefully with minimum performance penalty.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{P2PMGUb, title = {Resilient Peer-to-Peer Multicast from the Ground Up}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/SBirrer04RGU.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nca04nemo.pdf}, year = {2004}, date = {2004-08-03}, journal = {In Proc. of the IEEE Network Computing and Applications - Workshop on Adaptive Grid Computing}, abstract = {One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of churn inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios similar to those of comparable protocols (up to 99.98%) under different failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 85%) and control-related traffic. }, keywords = {Overlay Multicast, publication}, pubstate = {published}, tppubtype = {article} } @article{WayBack, title = {Wayback: A User-level Versioning File System for Linux}, author = {Brian Cornell and Peter Dinda and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Cornell04VFS.pdf https://sourceforge.net/projects/wayback/}, year = {2004}, date = {2004-06-03}, journal = {In Proc. of USENIX Annual Technical Conference, FREENIX Track ( Best Paper Award )}, abstract = {In a typical file system, only the current version of a file (or directory) is available. In Wayback, a user can also access any previous version, all the way back to the file's creation time. Versioning is done automatically at the write level: each write to the file creates a new version. Wayback implements versioning using an undo log structure, exploiting the massive space available on modern disks to provide its very useful functionality. Wayback is a user-level file system built on the FUSE framework that relies on an underlying file system for access to the disk. In addition to simplifying Wayback, this also allows it to extend any existing file system with versioning: after being mounted, the file system can be mounted a second time with versioning. We describe the implementation of Wayback, and evaluate its performance using several benchmarks.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @techreport{ICMPM, title = {Can We Trust ICMP Measurements?}, author = {Stefan Birrer and Fabián E. Bustamante and Yan Chen}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-04-48.pdf}, year = {2004}, date = {2004-03-03}, number = {NWU-CS-04-48}, institution = {Department of Computer Science, Northwestern University}, abstract = {ICMP-based measurements (e.g. ping) are often criticized as un-representative of the applications' experienced performance, as applications are based on TCP/UDP protocols and there is a well-accepted conjecture that routers are often configured to treat ICMP differently from TCP and UDP. However, to the best of our knowledge, this assumption has not been validated. With this in mind, we conducted extensive Internet end-to-end path measurements of these three protocols, spanning over 90 sites (from both commercial and academic networks), over 6,000 paths and more than 28 million probes in PlanetLab during two weeks. Our results show that ICMP performance is a good estimator for TCP/UDP performance for the majority of the paths. However for nearly 0.5% of the paths, we found persistent RTT differences between UDP and ICMP greater than 50%, while for TCP the difference exceeds 10% for 0.27% of the paths. Thus, although ICMP-based measurements can be trusted as predictors of TCP/UDP performance, distributed systems and network researchers should be aware of some scenarios where these measurements will be heavily misleading; this paper also provides some hints that can help in identifying those situations.}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} } @techreport{SSP2PS, title = {Looking at the Server-Side of Peer-to-Peer Systems}, author = {Yi Qiao and Dong Lu and Fabián E. Bustamante and Peter Dinda}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-04-37.pdf}, year = {2004}, date = {2004-02-01}, number = { NWU-CS-04-37}, institution = {Department of Computer Science, Northwestern University}, abstract = {Peer-to-peer systems have grown significantly in popularity over the last few years. An increasing number of research projects have been closely following this trend, looking at many of the paradigm s technical aspects. In the context of data-sharing services, efforts have focused on a variety of issues from object location and routing to fair sharing and peer lifespans. Overall, the majority of these projects have concentrated on either the whole P2P infrastructure or the client-side of peers. Little attention has been given to the peer s server-side, even when that side determines much of the everyday user s experience. In this paper, we make the case for looking at the server side of peers, focusing on the problem of scheduling with the intent of minimizing the average response time experienced by users. We start by characterizing server workload based on extensive trace collection and analysis. We then evaluate the performance and fairness of different scheduling policies through trace-driven simulations. Our results show that average response time can be dramatically reduced by more effectively scheduling the requests on the server-side of P2P systems.}, keywords = {P2P, technical report}, pubstate = {published}, tppubtype = {techreport} } @techreport{Nemo, title = {Nemo: Resilient Peer-to-Peer Multicast without the Cost}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-04-36.pdf}, year = {2004}, date = {2004-01-03}, number = {NWU-CS-04-36}, institution = {Department of Computer Science, Northwestern University}, abstract = {One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of transiency inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. Based on two simple techniques: (1) co-leaders to minimize dependencies and, (2) triggered negative acknowledgments (NACKs) to detect lost packets, Nemo s design emphasizes conceptual simplicity and minimum dependencies, thus achieving performance characteristics capable of withstanding the natural instability of its target environment. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the scalability and performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show that Nemo can achieve delivery ratios (up to 99.9%) similar to those of comparable protocols under high failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 90%) and control-related traffic (reductions > 20%).}, keywords = {Overlay Multicast, technical report}, pubstate = {published}, tppubtype = {techreport} } @article{DDPDDA, title = {Differential Data Protection for Dynamic Distributed Applications}, author = {Patrick Widener and Karsten Schwan and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Widener03DDP.pdf}, year = {2003}, date = {2003-12-03}, journal = {In Proc. of the 19th Annual Computer Security Applications Conference}, abstract = {We present a mechanism for providing differential data protection to publish/subscribe distributed systems, such as those used in peer-to-peer computing, grid environments, and others. This mechamism, termed "security overlays," incorporates credential-based communication channel creation, subscription and extension. We describe a conceptual model of publish/subscribe services that is made concrete by our mechanism. We also present an application, Active Video Streams, whose reimplementation using security overlays allows it to react to high-level security policies specified in XML without significant performance loss or the necessity for embedding policy-specific code into the application.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{EKBLBI, title = {Elders Know Best: Lifespan-Based Ideas in P2P Systems}, author = {Yi Qiao and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao03EKB.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/YQiao03EKB.ppt}, year = {2003}, date = {2003-10-03}, journal = {19th Symposium on Operating Systems Principles}, abstract = {The transiency of peer population and its implications on peer-to-peer (P2P) applications are increasingly calling the attention of the research community. As undesireable as unavoidable, peers transiency could negate many of the appealing features of the P2P approach. We are exploring new P2P protocols and strategies that, by considering peers' lifespan a key attribute, can greatly boost the stability, efficiency and scalability of these systems. This work-in-progress briefly discusses our appraoch and presents some initial results.}, keywords = {P2P, publication}, pubstate = {published}, tppubtype = {article} } @article{PLP2PP, title = {Friendships that last: Peer lifespan and its role in P2P protocols}, author = {Fabián E. Bustamante and Yi Qiao}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante03FLPL.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante03FLPL-2.ppt}, year = {2003}, date = {2003-09-03}, journal = {In Proc. of the International Workshop on Web Content Caching and Distribution}, abstract = {We consider the problem of choosing who to "befriend" among a collection of known peers in distributed P2P systems. In particular, our work explores a number of P2P protocols that, by considering peers' lifespan distribution a key attribute, can yield systems with performance characteristics more resilient to the natural instability of their environments. This article presents results from our initial efforts, focusing on currently deployed decentralized P2P systems. We measure the observed lifespan of more than 500,000 peers in a popular P2P system for over a week and propose a functional form that fits the distribution well. We consider a number of P2P protocols based on this distribution, and use a trace-driven simulator to compare them against alternative protocols for decentralized and unstructured or loosely-structured P2P systems. We find that simple lifespan-based protocols can reduce the ratio of connection breakdowns and their associated costs by over 42%. }, keywords = {P2P, publication}, pubstate = {published}, tppubtype = {article} } @techreport{FP2PP, title = {The effect of lasting friendships in P2P protocols}, author = {Yi Qiao and Fabián E. Bustamante}, year = {2003}, date = {2003-02-03}, number = {NWU-CS-03-23 }, institution = {Department of Computer Science, Northwestern University}, keywords = {P2P, technical report}, pubstate = {published}, tppubtype = {techreport} } @techreport{RP2PMGU, title = {Resilient Peer-to-Peer Multicast from the Ground Up}, author = {Stefan Birrer and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NWU-CS-03-22.pdf}, year = {2003}, date = {2003-01-03}, number = {NWU-CS-03-22}, institution = { Department of Computer Science, Northwestern University}, abstract = {One of the most important challenges of peer-to-peer multicast protocols is the ability to efficiently deal with the high degree of churn inherent to their environment. As multicast functionality is pushed to autonomous, unpredictable peers, significant performance losses can result from group membership changes and the higher failure rates of end-hosts when compared to routers. Achieving high delivery ratios without sacrificing end-to-end latencies or incurring additional costs has proven to be a challenging task. This paper introduces Nemo, a novel peer-to-peer multicast protocol that aims at achieving this elusive goal. We present an extensive comparative evaluation of our protocol through simulation and wide-area experimentation. We compare the performance of Nemo with that of three alternative protocols: Narada, Nice and Nice-PRM. Our results show how Nemo can achieve delivery ratios similar to those of comparable protocols (up to 99.98%) under different failure rates, but at a fraction of their cost in terms of duplicate packets (reductions > 85%) and control-related traffic. }, keywords = {Overlay Multicast, technical report}, pubstate = {published}, tppubtype = {techreport} } @techreport{NDRb, title = { Native Data Representation: An Efficient Wire Format for High-Performance Computing}, author = {Greg Eisenhauer and Fabián Bustamante and Karsten Schwan}, year = {2002}, date = {2002-12-03}, journal = {IEEE Transactions on Parallel and Distributed Systems}, number = {13(12): 1234-1246}, institution = {IEEE Transactions on Parallel and Distributed Systems}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} } @article{SDSUP, title = { Scalable Directory Services Using Proactivity}, author = {Fabián E. Bustamante and Patrick Widener and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante02SDS.pdf}, year = {2002}, date = {2002-11-03}, journal = {In Proc. of Supercomputing }, abstract = {Common to computational grids and pervasive computing is the need for an expressive, efficient, and scalable directory service that provides information about objects in the environment. We argue that a directory interface that pushes information to clients about changes to objects can significantly improve scalability. This paper describes the design, implementation, and evaluation of the Proactive Directory Service (PDS). PDS interface supports a customizable proactive mode through which clients can subscribe to be notified about changes to their objects of interest. Clients can dynamically tune the detail and granularity of these notifications through filter functions instantiated at the server or at the object's owner, and by remotely tuning the functionality of those filters. We compare PDS performance against off-the-shelf implementations of DNS and the Lightweight Directory Access Protocol. Our evaluation results confirm the expected performance advantages of this approach and demonstrate that customized notification through filter functions can reduce bandwidth utilization while improving the performance of both clients and directory servers.}, keywords = {Active Streams, publication}, pubstate = {published}, tppubtype = {article} } @article{AIMS, title = {AIMS: Robustness Through Sensible Introspection}, author = {Fabián E. Bustamante and Christian Poellabauer and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante02AIMS.pdf}, year = {2002}, date = {2002-09-03}, journal = {In Proc. of the 10th ACM SIGOPS European Workshop}, abstract = {Our society increasingly relies on dependable complex computing systems. To be useful, dependable systems must also be robust when facing unpredictable changes to their operating environments. Introspection has proven to be a helpful approach in the design of dynamically adaptable computing systems. We argue that, for robustness, the intro-spective component itself needs to be dynamically adaptive since (i) it is effectively impossible to predict all information needed for introspection, (ii) even if we try, no introspective system will be able to manage the amount of data necessary to select the right adaptation to an overwhelming number of possible system conditions, and (iii) the right adaptation may be situation dependent as well. At Georgia Tech we are exploring the idea of dynamically adaptive introspective components for future systems. To this end, we are building AIMS,an Adaptive Introspective Management System through which monitoring probes (or agents) can be (un-)installed at runtime, their execution can be finely tuned dynamically, and the processing done on the collected data can be changed as needed.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @techreport{OMF, title = {Open Metadata Formats: Efficient XML-Based Communication for High Performance Computing}, author = {Patrick Widener and Greg Eisenhauer and Karsten Schwan and Fabián E. Bustamante}, year = {2002}, date = {2002-07-03}, number = {5(3):315-324}, institution = {Cluster Computing}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} } @phdthesis{ASAADAS, title = {The Active Streams Approach to Adaptive Distributed Applications And Services}, author = {Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/astreams-thesis.pdf}, year = {2001}, date = {2001-11-20}, school = {Georgia Institute of Technology}, abstract = {The widespread deployment of inexpensive communication technologies, computational resources in the networking infrastructure, and network-capable end devices offers a rich design space for novel distributed applications and services. Exploration of this space has given rise, for instance, to the notions of grid and peer-to-peer computing. Both technologies promise to change the way we think about and use computing, by harvesting geographically distributed resources in order to create a universal source of pervasive computing power that will support new classes of applications. Despite the growing interest in these new environments and the increasing availability of the necessary hardware and network infrastructure, few actual applications are readily available and/or widely deployed. Such scarcity results from a number of technical challenges that must be addressed before the full potential of these technologies can be realized. Most of these applications, as well as the services they utilize, are expected to handle dynamically varying demand on resources and to run in large, heterogeneous, and dynamic environments, where the availability of resources cannot be guaranteed `a priori' -- all of this while providing acceptable levels of performance. To support such requirements, we believe that new services need to be customizable, applications need to be dynamically extensible, and both applications and services need to be able to adapt to variations in resources' availability and demand. The Active Streams approach, advocated in this dissertation, aims to facilitate the task of building new distributed systems with these characteristics. To this end, the approach considers the contents of the information flowing across the application and its services, it adopts a component-based model to application/service programming, and it provides for dynamic adaptation at multiple levels and points in the underlying platform. In addition, due to the complexity of building such systems, it tries to ease the programmer's task by facilitating the needed infrastructure for resource monitoring, self-monitoring and adaptation. This dissertation explores the Active Streams approach and its supporting framework in the context of these new distributed applications and services.}, keywords = {Active Streams, PhD Thesis}, pubstate = {published}, tppubtype = {phdthesis} } @techreport{PDS, title = {The Case for Proactive Directory Services}, author = {Fabián E. Bustamante and Patrick Widener and Karsten Schwan}, year = {2001}, date = {2001-11-10}, journal = {Poster in Proc. of Supercomputing}, volume = {SC - 2001}, institution = {Poster in Proc. of Supercomputing}, keywords = {Active Streams, technical report}, pubstate = {published}, tppubtype = {techreport} } @article{ASESS, title = {Active Streams and the effects of stream specialization}, author = {Fabián E. Bustamante and Greg Eisenhauer and Karsten Schwan and Patrick Widener}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante01ASE.pdf}, year = {2001}, date = {2001-08-03}, journal = {Poster in Proc. of Tenth International Symposium on High Performance Distributed Computing}, volume = {HPDC-2001}, abstract = {The explosive growth of the Internet, with the emergence of new networking technologies and the increasing number of network-capable end devices, is paving the way to a number of novel distributed applications and services. Cooperative distributed systems have become a common computing model and pervasive computing has caught the interest of academia and industry. The realization of these types of applications is complicated by the characteristics of their target environments, including their heterogeneous nature as well as the dynamically varying demands on and availability of their resources. Dynamic variations in resource usage are due to applications' data dependencies and/or users' dynamic behaviors, while the run-time variation in resource availability is a consequence of failures, resource additions or removals, and most importantly, contention for shared resources. This poster presents Active Streams, a middleware approach and its associated framework for building such novel distributed applications and services. It reports our initial results in understanding the effects of stream specialization through streamlets, demonstrating experimentally the potential improvements in latency (3-6X) and CPU utilization (up to 6X) derived from migrating streamlets `up' a stream, as well as the need for intermediate computational units.}, keywords = {Active Streams, publication}, pubstate = {published}, tppubtype = {article} } @article{ASADS, title = {Active Streams: An approach to adaptive distributed systems}, author = {Fabián E. Bustamante and Greg Eisenhauer and Patrick Widener and Karsten Schwan and Calton Pu}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante01ASA.pdf}, year = {2001}, date = {2001-05-03}, journal = {In Proc. 8th Workshop on Hot Topics in Operating Systems}, volume = {HotOS-VIII}, abstract = {An increasing number of distributed applications aim to provide services to users by interacting with a correspondingly growing set of data-intensive network services. Such applications, as well as the services they utilize, are generally expected to handle dynamically varying demands on resources and to run in large, heterogeneous, and dynamic environments, where the availability of resources cannot be guaranteed a priori -- all of this while providing acceptable levels of performance. To support such requirements, we believe that new services need to be customizable, applications need to be dynamically extensible, and both applications and services need to be able to adapt to variations in resource availability and demand. A comprehensive approach to building new distributed applications can facilitate this by considering the contents of the information flowing across the application and its services and by adopting a component-based model to application/service programming. It should provide for dynamic adaptation at multiple levels and points in the underlying platform; and, since the mapping of components to resources in dynamic environment is too complicated, it should relieve programmers of this task. We propose Active Streams, a middleware approach and its associated framework for building distributed applications and services that exhibit these characteristics.}, keywords = {Active Streams, publication}, pubstate = {published}, tppubtype = {article} } @techreport{ESHPS, title = {Event Services in High Performance Systems}, author = {Greg Eisenhauer, Fabián E. Bustamante and Karsten Schwan}, year = {2001}, date = {2001-05-03}, number = {4(3):243-252 }, institution = {Cluster Computing}, abstract = {The Internet and the Grid are changing the face of high performance computing. Rather than tightly-coupled SPMD-style components running in a single cluster, on a parallel machine, or even on the Internet programmed in MPI, applications are evolving into sets of cooperating components scattered across diverse computational elements. These components may run on different operating systems and hardware platforms and may be written by different organizations in different languages. Complete “applications” are constructed by assembling these components in a plug-and-play fashion. This new vision for high performance computing demands features and characteristics not easily provided by traditional high-performance communications middleware. In response to these needs, we have developed ECho, a high-performance event-delivery middleware that meets the new demands of the Grid environment. ECho provides efficient binary transmission of event data with unique features that support data-type discovery and enterprise-scale application evolution. We present measurements detailing ECho's performance to show that ECho significantly outperforms other systems intended to provide this functionality and provides throughput and latency comparable to the most efficient middleware infrastructures available.}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} } @article{MTCIS, title = {A Middleware Toolkit for Client-Initiated Service Specialization}, author = {Greg Eisenhauer and Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Eisenhauer00MTC.pdf}, year = {2001}, date = {2001-04-03}, journal = {ACM SIGOPS}, volume = {35}, number = {2}, abstract = {As the Internet matures, streaming data services are taking an increasingly important place alongside traditional HTTP transactions. The need to dynamically adjust the delivery of such services to changes in available network and processing resources has spawned substantial research on application-specific methods for dynamic adaptation, including video and audio streaming applications. Such adaptation techniques are well developed, but they are also highly specialized, with the client (receiver) and server (sender) implementing well-defined protocols that exploit content-specific stream properties. This paper describes our efforts to bring the benefits of such content-aware, application-level service adaptation to all types of streaming data and to do so in a manner that is efficient and flexible. Our contribution in this domain is ECho, a high-performance event-delivery middleware system. ECho's basic functionality provides efficient binary transmission of event data with unique features that support dynamic data-type discovery and service evolution. ECho's contribution to data stream adaptation is in the mechanisms it provides for its clients to customize their data flows through type-safe dynamic server extension.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @techreport{NDR, title = {Native Data Representation: An Efficient Wire Format for High Performance Computing}, author = {Greg Eisenhauer and Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/GIT-CC-01-18.pdf}, year = {2001}, date = {2001-01-03}, number = {GIT-CC-01-18}, institution = {College of Computing, Georgia Institute of Technology}, abstract = {New trends in high-performance software development such as tool- and component-based approaches have increased the need for flexible and high-performance communication systems. High-performance computing applications are being integrated with a variety of software tools to allow on-line remote data visualization, enable real-time interaction with remote sensors and instruments, or provide novel environments for human collaboration. There has also been a growing interest among high-performance researchers in component-based approaches, in an attempt to facilitate software evolution and promote software reuse. When trying to reap the well-known benefits of these approaches, the question of what communications infrastructure should be used to link the various components arises. In this context, flexibility and high-performance seem to be incompatible goals. Traditional HPC-style communication libraries, such as MPI, offer good performance, but are not intended for loosely-coupled systems. Object- and metadata-based approaches like XML offer the needed plug-and-play flexibility, but with significantly lower performance. We observe that the flexibility and baseline performance of data exchange systems are strongly determined by their wire formats, or by how they represent data for transmission in the heterogeneous environments. Upon examining the performance implications of using a number of different wire formats, we propose an alternative approach for flexible high-performance data exchange, Native Data Representation, and evaluate its current implementation in the Portable Binary I/O library.}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} } @article{EWFHPC, title = {Efficient Wire Formats for High Performance Computing}, author = {Fabián E. Bustamante and Greg Eisenhauer and Karsten Schwan and Patrick Widener}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante00EWF.pdf}, year = {2000}, date = {2000-11-03}, journal = {In Proc. of Supercomputing (SC)}, abstract = {High performance computing is being increasingly utilized in non-traditional circumstances where it must interoperate with other applications. For example, online visualization is being used to monitor the progress of applications, and real-world sensors are used as inputs to simulations. Whenever these situations arise, there is a question of what communications infrastructure should be used to link the different components. Traditional HPC-style communications systems such as MPI offer relatively high performance, but are poorly suited for developing these less tightly-coupled cooperating applications. Object-based systems and meta-data formats like XML offer substantial plug-and-play flexibility, but with substantially lower performance. We observe that the flexibility and baseline performance of all these systems is strongly determined by their `wire format', or how they represent data for transmission in a heterogeneous environment. We examine the performance implications of different wire formats and present an alternative with significant advantages in terms of both performance and flexibility.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @article{ESHPC, title = {Event Services for High Performance Computing}, author = {Greg Eisenhauer and Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Eisenhauer00ESH.pdf}, year = {2000}, date = {2000-08-03}, journal = {In Proc. of Ninth International Symposium on High Performance Distributed Computing}, volume = {HPDC-2000}, abstract = {The Internet and the Grid are changing the face of high performance computing. Rather than tightly-coupled SPMD-style components running in a single cluster, on a parallel machine, or even on the Internet programmed in MPI, applications are evolving into sets of collaborating elements scattered across diverse computational elements. These collaborating components may run on different operating systems and hardware platforms and may be written by different organizations in different languages. Complete ``applications'' are constructed by assembling these components in a plug-and-play fashion. This new vision for high performance computing demands features and characteristics not easily provided by traditional high-performance communications middleware. In response to these needs, we have developed ECho, a high-performance event-delivery middleware that meets the new demands of the Grid environment. ECho provides efficient binary transmission of event data with unique features that support data-type discovery and enterprise-scale application evolution. We present measurements detailing ECho's performance to show that ECho significantly outperforms other systems intended to provide this functionality and provides throughput and latency comparable to the most efficient middleware infrastructures available.}, keywords = {publication}, pubstate = {published}, tppubtype = {article} } @techreport{Pacioli, title = {Pacioli: A Framework for Model Construction}, author = {Fabián E. Bustamante}, year = {2000}, date = {2000-02-03}, institution = {Storage Systems Program, Computer Systems Laboratory, Hewlett-Packard Laboratory}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} } @conference{AcitiveIOHHPC, title = {Active I/O Streams for Heterogeneous High Performance Computing}, author = {Fabián E. Bustamante and Karsten Schwan}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante99AIOS.pdf}, year = {1999}, date = {1999-03-03}, publisher = {Proc. of Parallel Computing (ParCo)}, abstract = {We are concerned with the attainment of high performance in I/O on distributed, heterogeneous hardware. Our approach is to combine a program's data retrieval and storage actions with operations executed on the resulting active I/O streams. Performance improvements are attained by exploitation of information about these operations and by runtime changes to their behavior and placement. In this fashion, active I/O can adjust to static system properties derived from the heterogeneous nature of distributed CPU, storage, and network devices, and it can respond to dynamic changes in system's conditions, thereby reducing the total bandwidth needs and/or the end-to-end latencies of I/O actions. Our prototype of an active I/O system, called Adios, implements I/O as a directed network comprised of streams originating at sources, destined for sinks, and routed through a number of intermediate vertices that act on the data units traversing the stream. Adaptive resource allocation methods based on this model are under development, with the goal of improving I/O performance of complex parallel programs running in shared heterogeneous computing environments.}, keywords = {Active Streams, publication}, pubstate = {published}, tppubtype = {conference} } @conference{DDT-TVM, title = {Digital Dynamic Telepathology - the Virtual Microscope}, author = {Asmara Afework and Michael Benyon and Fabián E. Bustamante and Angelo DeMarzo and Renato Ferreira and Robert Miller and Mark Silberman and Joel Saltz and Alan Sussman and Hubert Tsang}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bustamante98DDT.pdf}, year = {1998}, date = {1998-08-03}, publisher = {Proc. of the 1998 AMIA Annual Fall Symposium}, abstract = {The Virtual Microscope is being designed as an integrated computer hardware and software system that generates a highly realistic digital simulation of analog, mechanical light microscopy. We present our work over the past year in meeting the challenges in building such a system. The enhancements we made are discussed, as well as the planned future improvements. Performance results are provided that show that the system scales well, so that many clients can be adequately serviced by an appropriately configured data server.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @conference{High-endcollaborative, title = {Agent and Object Technologies for High-end Collaborative Applications}, author = {Mustaque Ahamad and Raja Das and Karsten Schwan and Sumeer Bhola and Fabián E. Bustamante and Greg Eisenhauer and Jeremy Heiner and Vijaykumar Krishnaswamy and Todd Rose and Beth Schroeder and Dong Zhou}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Ahamad97AOT.pdf}, year = {1997}, date = {1997-02-14}, publisher = {Proc. of the 1997 Dartmouth Workshop on Transportable Agents}, abstract = {Complex distributed collaborative applications have rich computational and communication needs that cannot easily be met by the currently available web based software infrastructure. In this position paper, we claim that to address the needs of such highly demanding applications, it is necessary to develop an integrated framework that both supports high performance executions via distributed objects and makes use of agent based computations to address dynamic application behavior, mobility, and security needs. Specifically, we claim that based on application needs and resource availability, it should be possible for an application to switch at runtime between the remote invocation and evaluation mechanisms of the object and agent technologies being employed. To support such dynamically configurable applications, we identify several issues that arise for the required integrated object-agent system. These include: (1) system support for agent and object executions and (2) the efficient execution of agents and high performance object implementations using performance techniques like caching, replication, and fragmentation of the state being accessed and manipulated. We are currently developing a system supporting high end collaborative applications.}, keywords = {publication}, pubstate = {published}, tppubtype = {conference} } @techreport{Emerical-Comparison, title = {An Empirical Comparison of Time Warp and the NPSI Elastic Time Protocol}, author = {Fabián E. Bustamante and Richard M. Fujimoto}, year = {1997}, date = {1997-02-06}, number = {GIT-CC-97-13}, address = {College of Computing, Georgia Institute of Technology}, keywords = {technical report}, pubstate = {published}, tppubtype = {techreport} }