2025 |
Michael Polinski, Richard Jo, Kevin McAfee, Fabian E. Bustamante The Centralization of a Decentralized Video Platform – A First Characterization Of PeerTube Journal Article Forthcoming SIGCOMM CCR, Forthcoming. @article{polinski:ccr25, title = {The Centralization of a Decentralized Video Platform – A First Characterization Of PeerTube}, author = {Michael Polinski, Richard Jo, Kevin McAfee and Fabian E. Bustamante}, year = {2025}, date = {2025-01-01}, journal = {SIGCOMM CCR}, abstract = {PeerTube is an open-source video sharing platform built as a decentralized alternative to YouTube. With software like Mastodon and Friendica, PeerTube is part of a series of federated social media platforms built partly in response to growing concerns about centralized control and ownership of the incumbent ones. In this paper, we present the first characterization of PeerTube, including its underlying infrastructure and the content being shared on its network. Our findings reveal concerning trends toward centralization that echo patterns observed in other contexts, exacerbated by the limited degree of content replication. PeerTube instances are mostly located in North America and Western Europe, with about 70% hosted in Germany, the USA, and France, and over 50% hosted on the top 7 ASes. We also find that over 92% of videos are stored without any redundancy in spite of PeerTube’s native support for video redundancy.}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } PeerTube is an open-source video sharing platform built as a decentralized alternative to YouTube. With software like Mastodon and Friendica, PeerTube is part of a series of federated social media platforms built partly in response to growing concerns about centralized control and ownership of the incumbent ones. In this paper, we present the first characterization of PeerTube, including its underlying infrastructure and the content being shared on its network. Our findings reveal concerning trends toward centralization that echo patterns observed in other contexts, exacerbated by the limited degree of content replication. PeerTube instances are mostly located in North America and Western Europe, with about 70% hosted in Germany, the USA, and France, and over 50% hosted on the top 7 ASes. We also find that over 92% of videos are stored without any redundancy in spite of PeerTube’s native support for video redundancy. |
2024 |
Rashna Kumar, Esteban Carisimo, Lukas De Angelis Riva, Mauricio Buzzone, Fabián E. Bustamante, Ihsan Ayyub Qazi, Mariano G. Beiro “Of Choices and Control - A Comparative Analysis of Government Hosting Inproceedings Proc. of the ACM IMC, 2024. @inproceedings{rkumar:imc24, title = {“Of Choices and Control - A Comparative Analysis of Government Hosting}, author = {Rashna Kumar and Esteban Carisimo and Lukas De Angelis Riva and Mauricio Buzzone and Fabián E. Bustamante and Ihsan Ayyub Qazi and Mariano G. Beiro }, url = {https://aqualab.cs.northwestern.edu/wp-content/uploads/2024/10/RKumar-IMC24.pdf}, year = {2024}, date = {2024-11-04}, booktitle = { Proc. of the ACM IMC}, abstract = {We present the first large-scale analysis of the adoption of third-party serving infrastructures in government digital services. Drawing from data collected across 61 countries spanning every continent and region, capturing over 82% of the world's Internet population, we examine the preferred hosting models for public-facing government sites and associated resources. Leveraging this dataset, we analyze government hosting strategies, cross-border dependencies, and the level of centralization in government web services. Among other findings, we show that governments predominantly rely on third-party infrastructure for data delivery, although this varies significantly, with even neighboring countries showing contrasting patterns. Despite a preference for third-party hosting solutions, most government URLs in our study are served from domestic servers, although again with significant regional variation. Looking at overseas located servers, while the majority are found in North America and Western Europe, we note some interesting bilateral relationships (e.g., with 79% of Mexico's government URLs being served from the US, and 26% of China's government URLs from Japan). This research contributes to understanding the evolving landscape of serving infrastructures in the government sector, and the choices governments make between leveraging third-party solutions and maintaining control over users' access to their services and information.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We present the first large-scale analysis of the adoption of third-party serving infrastructures in government digital services. Drawing from data collected across 61 countries spanning every continent and region, capturing over 82% of the world's Internet population, we examine the preferred hosting models for public-facing government sites and associated resources. Leveraging this dataset, we analyze government hosting strategies, cross-border dependencies, and the level of centralization in government web services. Among other findings, we show that governments predominantly rely on third-party infrastructure for data delivery, although this varies significantly, with even neighboring countries showing contrasting patterns. Despite a preference for third-party hosting solutions, most government URLs in our study are served from domestic servers, although again with significant regional variation. Looking at overseas located servers, while the majority are found in North America and Western Europe, we note some interesting bilateral relationships (e.g., with 79% of Mexico's government URLs being served from the US, and 26% of China's government URLs from Japan). This research contributes to understanding the evolving landscape of serving infrastructures in the government sector, and the choices governments make between leveraging third-party solutions and maintaining control over users' access to their services and information. |
Esteban Carisimo, Rashna Kumar, Caleb J. Wang, Santiago Klein, Fabián E. Bustamante, Ten years of the Venezuelan crisis - An Internet perspective Inproceedings Proc. of the ACM SIGCOMM, 2024. @inproceedings{ecarisimo:sigcomm24, title = {Ten years of the Venezuelan crisis - An Internet perspective}, author = {Esteban Carisimo and Rashna Kumar and Caleb J. Wang and Santiago Klein and Fabián E. Bustamante, }, url = {https://aqualab.cs.northwestern.edu/wp-content/uploads/2024/08/Ten-years-of-the-Venezuelan-crisis-An-Internet-perspective-3.pdf}, year = {2024}, date = {2024-08-06}, booktitle = {Proc. of the ACM SIGCOMM}, abstract = {The Venezuelan crisis, unfolding over the past decade, has garnered international attention due to its impact on various sectors of civil society. While studies have extensively covered the crisis’s effects on public health, energy, and water management, this paper delves into a previously unexplored area – the impact on Venezuela’s Internet infrastructure. Amidst Venezuela’s multifaceted challenges, understanding the repercussions of this critical aspect of modern society becomes imperative for the country’s recovery. Leveraging measurements from various sources, we present a comprehensive view of the changes undergone by the Venezuelan network in the past decade. Our study reveals the significant impact of the crisis captured by different signals, including bandwidth stagnation, limited growth on network infrastructure growth, and high latency compared to the Latin American average. Beyond offering a new perspective on the Venezuelan crisis, our study can help inform attempts at devising strategies for its recovery.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The Venezuelan crisis, unfolding over the past decade, has garnered international attention due to its impact on various sectors of civil society. While studies have extensively covered the crisis’s effects on public health, energy, and water management, this paper delves into a previously unexplored area – the impact on Venezuela’s Internet infrastructure. Amidst Venezuela’s multifaceted challenges, understanding the repercussions of this critical aspect of modern society becomes imperative for the country’s recovery. Leveraging measurements from various sources, we present a comprehensive view of the changes undergone by the Venezuelan network in the past decade. Our study reveals the significant impact of the crisis captured by different signals, including bandwidth stagnation, limited growth on network infrastructure growth, and high latency compared to the Latin American average. Beyond offering a new perspective on the Venezuelan crisis, our study can help inform attempts at devising strategies for its recovery. |
Esteban Carisimo, Mia Weaver, Fabián E. Bustamante, Paul Barford Beyond Proximity: Exploring Remote Cloud Peering Inproceedings Proc. of the ACM SIGCOMM, Poster Session, 2024. @inproceedings{ecarisimo:cloudpeering, title = {Beyond Proximity: Exploring Remote Cloud Peering}, author = {Esteban Carisimo and Mia Weaver and Fabián E. Bustamante and Paul Barford }, url = {https://aqualab.cs.northwestern.edu/wp-content/uploads/2024/08/Poster_-Beyond-Proximity_-Exploring-Remote-Cloud-Peering.pdf}, year = {2024}, date = {2024-08-06}, booktitle = {Proc. of the ACM SIGCOMM, Poster Session}, abstract = {We investigate network peering location choices, focusing on whether networks opt for distant peering sites even when nearby options are available. We conduct a network-wide cloud-based traceroute campaign using virtual machine instances from four major cloud providers to identify peering locations and calculate the "peering stretch": the extra distance networks travel beyond the nearest data center to their actual peering points. Our results reveal a median peering stretch of 300 kilometers, with some networks traveling as much as 6,700 kilometers. We explore the characteristics of networks that prefer distant peering points and the potential motivations behind these choices, providing insights into digital sovereignty and cybersecurity implications.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We investigate network peering location choices, focusing on whether networks opt for distant peering sites even when nearby options are available. We conduct a network-wide cloud-based traceroute campaign using virtual machine instances from four major cloud providers to identify peering locations and calculate the "peering stretch": the extra distance networks travel beyond the nearest data center to their actual peering points. Our results reveal a median peering stretch of 300 kilometers, with some networks traveling as much as 6,700 kilometers. We explore the characteristics of networks that prefer distant peering points and the potential motivations behind these choices, providing insights into digital sovereignty and cybersecurity implications. |
Fabián E. Bustamante, John Doyle, Walter Willinger, Marwan Fayed, David L. Alderson, Steven Low, Stefan Savage, Henning Schulzrinne Towards Re-Architecting Today's Internet for Survivability: NSF Workshop Report Journal Article ACM SIGCOMM Computer Communication Review (CCR), 54 (1), pp. 36 - 47, 2024. @article{ccr24:criticalInternet, title = {Towards Re-Architecting Today's Internet for Survivability: NSF Workshop Report}, author = {Fabián E. Bustamante, John Doyle, Walter Willinger, Marwan Fayed, David L. Alderson, Steven Low, Stefan Savage and Henning Schulzrinne}, url = {https://aqualab.cs.northwestern.edu/wp-content/uploads/2024/09/NSFWorkshopReport-A.pdf}, year = {2024}, date = {2024-08-06}, journal = {ACM SIGCOMM Computer Communication Review (CCR)}, volume = {54}, number = {1}, pages = {36 - 47}, abstract = {On November 28--29, 2023, Northwestern University hosted a workshop titled "Towards Re-architecting Today's Internet for Survivability" in Evanston, Illinois, US. The goal of the workshop was to bring together a group of national and international experts to sketch and start implementing a transformative research agenda for solving one of our community's most challenging yet important tasks: the re-architecting of tomorrow's Internet for "survivability", ensuring that the network is able to fulfill its mission even in the presence of large-scale catastrophic events. This report provides a necessarily brief overview of two full days of active discussions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } On November 28--29, 2023, Northwestern University hosted a workshop titled "Towards Re-architecting Today's Internet for Survivability" in Evanston, Illinois, US. The goal of the workshop was to bring together a group of national and international experts to sketch and start implementing a transformative research agenda for solving one of our community's most challenging yet important tasks: the re-architecting of tomorrow's Internet for "survivability", ensuring that the network is able to fulfill its mission even in the presence of large-scale catastrophic events. This report provides a necessarily brief overview of two full days of active discussions. |
Kedar Thiagarajan, Esteban Carisimo, Fabián E. Bustamante Revealing Hidden Secrets: Decoding DNS PTR records with Large Language Models Inproceedings Proc. of the ACM SIGCOMM, Poster Session, 2024. @inproceedings{kedar:llmptr, title = {Revealing Hidden Secrets: Decoding DNS PTR records with Large Language Models}, author = {Kedar Thiagarajan and Esteban Carisimo and Fabián E. Bustamante}, url = {https://aqualab.cs.northwestern.edu/wp-content/uploads/2024/08/Poster_-Revealing-Hidden-Secrets_-Decod.PTR-records-with-Large-Language-Models.pdf}, year = {2024}, date = {2024-08-06}, booktitle = {Proc. of the ACM SIGCOMM, Poster Session}, journal = {Proc. of ACM SIGCOMM, Poster Session}, abstract = {Geolocating network devices is essential for various research areas. Yet, despite notable advancements, it continues to be one of the most challenging issues for experimentalists. An approach for geolocating that has proved effective is leveraging geolocating hints in PTR records associated with network devices. We argue that Large Language Models (LLMs), rather than humans, are better equipped to identify patterns in DNS PTR records, and significantly scale the coverage of tools like Hoiho. We introduce an approach that leverages LLMs to classify PTR records, and generate regular expressions for these classes, and hint-to-location mapping. We present preliminary results showing the applicability of using LLMs as a scalable approach to leverage PTR records for infrastructure geolocation.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Geolocating network devices is essential for various research areas. Yet, despite notable advancements, it continues to be one of the most challenging issues for experimentalists. An approach for geolocating that has proved effective is leveraging geolocating hints in PTR records associated with network devices. We argue that Large Language Models (LLMs), rather than humans, are better equipped to identify patterns in DNS PTR records, and significantly scale the coverage of tools like Hoiho. We introduce an approach that leverages LLMs to classify PTR records, and generate regular expressions for these classes, and hint-to-location mapping. We present preliminary results showing the applicability of using LLMs as a scalable approach to leverage PTR records for infrastructure geolocation. |
Esteban Carisimo, Caleb Wang, Mia Weaver, Fabián E. Bustamante, Paul Barford A hop away from everywhere: A view of the intercontinental long-haul infrastructure Inproceedings Proc. of ACM SIGMETRICS, 2024. @inproceedings{carisimo:lhl, title = {A hop away from everywhere: A view of the intercontinental long-haul infrastructure}, author = {Esteban Carisimo and Caleb Wang and Mia Weaver and Fabián E. Bustamante and Paul Barford}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/10/carisimo-lhl-24.pdf}, year = {2024}, date = {2024-06-10}, booktitle = {Proc. of ACM SIGMETRICS}, abstract = {We present a longitudinal study of intercontinental long-haul links (LHLs) – links with latencies significantly higher than that of all other links in a traceroute path. Our study is motivated by the recognition of these LHLs as a network-layer manifestation of critical transoceanic undersea cables. We present a methodology and associated processing system for identifying long-haul links in traceroute measurements. We apply this system to a large corpus of traceroute data and report on multiple aspects of long haul connectivity including country-level prevalence, routers as international gateways, preferred long-haul destinations, and the evolution of these characteristics over a 7 year period. We identify 85,620 layer-3 links (out of 2.7M links in a large traceroute dataset) that satisfy our definition for intercontinental long haul with many of them terminating in a relatively small number of nodes. An analysis of connected components shows a clearly dominant component with a relative size that remains stable despite a significant growth of the long-haul infrastructure.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We present a longitudinal study of intercontinental long-haul links (LHLs) – links with latencies significantly higher than that of all other links in a traceroute path. Our study is motivated by the recognition of these LHLs as a network-layer manifestation of critical transoceanic undersea cables. We present a methodology and associated processing system for identifying long-haul links in traceroute measurements. We apply this system to a large corpus of traceroute data and report on multiple aspects of long haul connectivity including country-level prevalence, routers as international gateways, preferred long-haul destinations, and the evolution of these characteristics over a 7 year period. We identify 85,620 layer-3 links (out of 2.7M links in a large traceroute dataset) that satisfy our definition for intercontinental long haul with many of them terminating in a relatively small number of nodes. An analysis of connected components shows a clearly dominant component with a relative size that remains stable despite a significant growth of the long-haul infrastructure. |
2023 |
Zachary Bischof, Kennedy Pitcher, Esteban Carisimo, Amanda Meng, Rafaek Nunes, Ramakrishna Padmanabhan, Margaret E. Roberts, Alex C. Snoeren,, Alberto Dainotti Destination Unreachable: Characterizing Internet Outages and Shutdowns Inproceedings Proc. of ACM SIGCOMM, 2023. @inproceedings{bischof:sigcomm23, title = {Destination Unreachable: Characterizing Internet Outages and Shutdowns}, author = {Zachary Bischof, Kennedy Pitcher, Esteban Carisimo, Amanda Meng, Rafaek Nunes, Ramakrishna Padmanabhan, Margaret E. Roberts, Alex C. Snoeren, and Alberto Dainotti}, year = {2023}, date = {2023-09-11}, booktitle = {Proc. of ACM SIGCOMM}, abstract = { In this paper, we provide the first comprehensive longitudinal analysis of government-ordered Internet shutdowns and spontaneous outages (i.e., disruptions not ordered by the government). We describe the available tools, data sources and methods to identify and analyze Internet shutdowns. We then merge manually curated datasets on known government-ordered shutdowns and large-scale Internet outages, further augmenting them with data on real-world events, macroeconomic and sociopolitical indicators, and network operator statistics. Our analysis confirms previous findings on the economic and political profiles of countries with government-ordered shutdowns. Extending this analysis, we find that countries with national-scale spontaneous outages often have profiles similar to countries with shutdowns, differing from countries that experience neither. However, we find that government-ordered shutdowns are many more times likely to occur on days of mobilization, coinciding with elections, protests, and coups. Our study also characterizes the temporal characteristics of Internet shutdowns and finds that they differ significantly in terms of duration, recurrence interval, and start times when compared to spontaneous outages.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In this paper, we provide the first comprehensive longitudinal analysis of government-ordered Internet shutdowns and spontaneous outages (i.e., disruptions not ordered by the government). We describe the available tools, data sources and methods to identify and analyze Internet shutdowns. We then merge manually curated datasets on known government-ordered shutdowns and large-scale Internet outages, further augmenting them with data on real-world events, macroeconomic and sociopolitical indicators, and network operator statistics. Our analysis confirms previous findings on the economic and political profiles of countries with government-ordered shutdowns. Extending this analysis, we find that countries with national-scale spontaneous outages often have profiles similar to countries with shutdowns, differing from countries that experience neither. However, we find that government-ordered shutdowns are many more times likely to occur on days of mobilization, coinciding with elections, protests, and coups. Our study also characterizes the temporal characteristics of Internet shutdowns and finds that they differ significantly in terms of duration, recurrence interval, and start times when compared to spontaneous outages. |
Rashna Kumar, Sana Asif, Elise Lee, Fabián E. Bustamante Each at its own pace: Third-party Dependency and Centralization Around the World Inproceedings Proc. of ACM SIGMETRICS, 2023. @inproceedings{global:sigmetrics23, title = {Each at its own pace: Third-party Dependency and Centralization Around the World}, author = {Rashna Kumar and Sana Asif and Elise Lee and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/RKumar-SIGMETRICS23.pdf}, year = {2023}, date = {2023-06-19}, booktitle = {Proc. of ACM SIGMETRICS}, abstract = {We describe the results of a large-scale study of third-party dependencies around the world based on regional top-500 popular websites accessed from vantage points in 50 countries, together covering all inhabited continents. This broad perspective shows that dependencies on a third-party DNS, CDN or CA provider vary widely around the world, ranging from 19% to as much as 76% of websites, across all countries. The critical dependencies of websites -- where the site depends on a single third-party provider -- are equally spread ranging from 5% to 60% (CDN in Costa Rica and DNS in China, respectively). Interestingly, despite this high variability, our results suggest a highly concentrated market of third-party providers: three providers across all countries serve an average of 92% and Google, by itself, serves an average of 70% of the surveyed websites. Even more concerning, these differences persist a year later with increasing dependencies, particularly for DNS and CDNs. We briefly explore various factors that may help explain the differences and similarities in degrees of third-party dependency across countries, including economic conditions, Internet development, economic trading partners, categories, home countries, and traffic skewness of the country's top-500 sites.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We describe the results of a large-scale study of third-party dependencies around the world based on regional top-500 popular websites accessed from vantage points in 50 countries, together covering all inhabited continents. This broad perspective shows that dependencies on a third-party DNS, CDN or CA provider vary widely around the world, ranging from 19% to as much as 76% of websites, across all countries. The critical dependencies of websites -- where the site depends on a single third-party provider -- are equally spread ranging from 5% to 60% (CDN in Costa Rica and DNS in China, respectively). Interestingly, despite this high variability, our results suggest a highly concentrated market of third-party providers: three providers across all countries serve an average of 92% and Google, by itself, serves an average of 70% of the surveyed websites. Even more concerning, these differences persist a year later with increasing dependencies, particularly for DNS and CDNs. We briefly explore various factors that may help explain the differences and similarities in degrees of third-party dependency across countries, including economic conditions, Internet development, economic trading partners, categories, home countries, and traffic skewness of the country's top-500 sites. |
Augusto Arturi, Esteban Carisimo, Fabián E. Bustamante as2org+ : Enriching AS-to-Organization Mappings with PeeringDB Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2023. @inproceedings{as2orgplus:PAM23, title = {as2org+ : Enriching AS-to-Organization Mappings with PeeringDB}, author = {Augusto Arturi and Esteban Carisimo and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/AArturi-PAM23.pdf}, year = {2023}, date = {2023-03-21}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {An organization-level topology of the Internet is a valuable resource with uses that range from the study of organizations’ footprints and Internet centralization trends, to analysis of the dynamics of the Internet’s corporate structures as result of (de)mergers and acquisitions. Current approaches to infer this topology rely exclusively on WHOIS databases and are thus impacted by its limitations, including errors and outdated data. We argue that a collaborative, operator-oriented database such as PeeringDB can bring a complementary perspective from the legally-bounded information available in WHOIS records. We present as2org+, a new framework that leverages self-reported information available on PeeringDB to boost the state-of-the-art WHOIS-based methodologies. We discuss the challenges and opportunities with using PeeringDB records for AS-to-organization mappings, present the design of as2org+ and demonstrate its value identifying companies operating in multiple continents and mergers and acquisitions over a five-year period.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } An organization-level topology of the Internet is a valuable resource with uses that range from the study of organizations’ footprints and Internet centralization trends, to analysis of the dynamics of the Internet’s corporate structures as result of (de)mergers and acquisitions. Current approaches to infer this topology rely exclusively on WHOIS databases and are thus impacted by its limitations, including errors and outdated data. We argue that a collaborative, operator-oriented database such as PeeringDB can bring a complementary perspective from the legally-bounded information available in WHOIS records. We present as2org+, a new framework that leverages self-reported information available on PeeringDB to boost the state-of-the-art WHOIS-based methodologies. We discuss the challenges and opportunities with using PeeringDB records for AS-to-organization mappings, present the design of as2org+ and demonstrate its value identifying companies operating in multiple continents and mergers and acquisitions over a five-year period. |
2022 |
Sergi Alcalá-Marín, Aravindh Raman, Weili Wu, Andra Lutu, Marcelo Bagnulo, Ozgu Alay, Fabián E. Bustamante Global Mobile Network Aggregators: Taxonomy, Roaming Performance and Optimization Inproceedings Proc. of ACM International Conference on Mobile Systems, Applications, and Services (MobiSys), 2022. @inproceedings{sergi:gmobile, title = {Global Mobile Network Aggregators: Taxonomy, Roaming Performance and Optimization}, author = {Sergi Alcalá-Marín and Aravindh Raman and Weili Wu and Andra Lutu and Marcelo Bagnulo and Ozgu Alay and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2022/05/SAlcala-Marin-Mobisys22.pdf}, year = {2022}, date = {2022-06-27}, booktitle = {Proc. of ACM International Conference on Mobile Systems, Applications, and Services (MobiSys)}, abstract = {A new model of global virtual Mobile Network Operator (MNO) – the Mobile Network Aggregator (MNA) – has recently been gaining significant traction. MNAs provide mobile communications services to their customers by leveraging multiple MNOs, and connecting through the one that best match their customers’ needs at any point in time (and space). MNAs naturally provide optimized global coverage by connecting through local MNOs across the different geographic regions they provide service. In this paper, we dissect the operations of three MNAs, namely, Google Fi, Twilio, and Truphone. We perform measurements using the three selected MNAs to assess their performance for three major applications, namely, DNS, web browsing, and video streaming. We benchmark their performance comparing it to the one of a traditional MNO. We find that even MNAs provide some delay penalty compared to the service accessed through the local MNOs in the geographic area where the user is roaming, they can significantly improve performance compared to traditional roaming model of the MNOs (e.g. home routed roaming). Finally, in order to fully quantify the potential benefits that can be realized using the MNA model, we perform a set of emulations by deploying both control and user plane functions of open-source 5G implementations in different locations of AWS, and measure the potential gains.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } A new model of global virtual Mobile Network Operator (MNO) – the Mobile Network Aggregator (MNA) – has recently been gaining significant traction. MNAs provide mobile communications services to their customers by leveraging multiple MNOs, and connecting through the one that best match their customers’ needs at any point in time (and space). MNAs naturally provide optimized global coverage by connecting through local MNOs across the different geographic regions they provide service. In this paper, we dissect the operations of three MNAs, namely, Google Fi, Twilio, and Truphone. We perform measurements using the three selected MNAs to assess their performance for three major applications, namely, DNS, web browsing, and video streaming. We benchmark their performance comparing it to the one of a traditional MNO. We find that even MNAs provide some delay penalty compared to the service accessed through the local MNOs in the geographic area where the user is roaming, they can significantly improve performance compared to traditional roaming model of the MNOs (e.g. home routed roaming). Finally, in order to fully quantify the potential benefits that can be realized using the MNA model, we perform a set of emulations by deploying both control and user plane functions of open-source 5G implementations in different locations of AWS, and measure the potential gains. |
Matteo Varvello, Kleomenis Katevas, Mihai Plesa, Hamed Haddadi, Fabián E. Bustamante, Bel Livshits BatteryLab: A Collaborative Platform for Power Monitoring Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2022. @inproceedings{varvello:batterylabb, title = {BatteryLab: A Collaborative Platform for Power Monitoring}, author = {Matteo Varvello and Kleomenis Katevas and Mihai Plesa and Hamed Haddadi and Fabián E. Bustamante and Bel Livshits }, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2022/03/Varvello2022_Chapter_BatteryLabACollaborativePlatfo.pdf}, year = {2022}, date = {2022-03-28}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {Advances in cloud computing have simplified the way that both software development and testing are performed. This is not true for battery testing for which state of the art test-beds simply consist of one phone attached to a power meter. These test-beds have limited resources, access, and are overall hard to maintain; for these reasons, they often sit idle with no experiment to run. In this paper, we propose to share existing battery testbeds and transform them into vantage points of BatteryLab, a power monitoring platform offering heterogeneous devices and testing conditions. We have achieved this vision with a combination of hardware and software which allow to augment existing battery test-beds with remote capabilities. BatteryLab currently counts three vantage points, one in Europe and two in the US, hosting three Android devices and one iPhone 7. We benchmark BatteryLab with respect to the accuracy of its battery readings, system performance, and platform heterogeneity. Next, we demonstrate how measurements can be run atop of BatteryLab by developing the “Web Power Monitor” (WPM), a tool which can measure website power consumption at scale. We released WPM and used it to report on the energy consumption of Alexa’s top 1,000 websites across 3 locations and 4 devices (both Android and iOS).}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Advances in cloud computing have simplified the way that both software development and testing are performed. This is not true for battery testing for which state of the art test-beds simply consist of one phone attached to a power meter. These test-beds have limited resources, access, and are overall hard to maintain; for these reasons, they often sit idle with no experiment to run. In this paper, we propose to share existing battery testbeds and transform them into vantage points of BatteryLab, a power monitoring platform offering heterogeneous devices and testing conditions. We have achieved this vision with a combination of hardware and software which allow to augment existing battery test-beds with remote capabilities. BatteryLab currently counts three vantage points, one in Europe and two in the US, hosting three Android devices and one iPhone 7. We benchmark BatteryLab with respect to the accuracy of its battery readings, system performance, and platform heterogeneity. Next, we demonstrate how measurements can be run atop of BatteryLab by developing the “Web Power Monitor” (WPM), a tool which can measure website power consumption at scale. We released WPM and used it to report on the energy consumption of Alexa’s top 1,000 websites across 3 locations and 4 devices (both Android and iOS). |
Esteban Carisimo, Ricky K. P. Mok, David D. Clark, kc claffy Jitterbug: A new framework for jitter-based congestion inference Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2022. @inproceedings{carisimo:jitterbug, title = {Jitterbug: A new framework for jitter-based congestion inference}, author = {Esteban Carisimo and Ricky K. P. Mok and David D. Clark and kc claffy}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/ECarisimo-PAM22.pdf}, year = {2022}, date = {2022-03-28}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {We investigate a novel approach to the use of jitter to infer network congestion using data collected by probes in access networks. We discovered a set of features in jitter and jitter dispersion —a jitter- derived time series we define in this paper— time series that are char- acteristic of periods of congestion. We leverage these concepts to create a jitter-based congestion inference framework that we call Jitterbug. We apply Jitterbug’s capabilities to a wide range of traffic scenarios and discover that Jitterbug can correctly identify both recurrent and one-off congestion events. We validate Jitterbug inferences against state-of-the- art autocorrelation-based inferences of recurrent congestion. We find that the two approaches have strong congruity in their inferences, but Jitter- bug holds promise for detecting one-off as well as recurrent congestion. We identify several future directions for this research including lever- aging ML/AI techniques to optimize performance and accuracy of this approach in operational settings.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We investigate a novel approach to the use of jitter to infer network congestion using data collected by probes in access networks. We discovered a set of features in jitter and jitter dispersion —a jitter- derived time series we define in this paper— time series that are char- acteristic of periods of congestion. We leverage these concepts to create a jitter-based congestion inference framework that we call Jitterbug. We apply Jitterbug’s capabilities to a wide range of traffic scenarios and discover that Jitterbug can correctly identify both recurrent and one-off congestion events. We validate Jitterbug inferences against state-of-the- art autocorrelation-based inferences of recurrent congestion. We find that the two approaches have strong congruity in their inferences, but Jitter- bug holds promise for detecting one-off as well as recurrent congestion. We identify several future directions for this research including lever- aging ML/AI techniques to optimize performance and accuracy of this approach in operational settings. |
Alexander Gamero-Garrido, Esteban Carisimo, Shuai Hao, Bradley Huffaker, Alex C. Snoeren, Alberto Dainotti Quantifying Nations' Exposure to Traffic Observation and Selective Tampering Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2022. @inproceedings{carisimo:cti, title = {Quantifying Nations' Exposure to Traffic Observation and Selective Tampering}, author = {Alexander Gamero-Garrido and Esteban Carisimo and Shuai Hao and Bradley Huffaker and Alex C. Snoeren and Alberto Dainotti}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2023/02/AGameroG-PAM22.pdf https://github.com/estcarisimo/state-owned-ases}, year = {2022}, date = {2022-03-28}, booktitle = {Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {Almost all popular Internet services are hosted in a select set of countries, forcing other nations to rely on international connectivity to access them. We identify nations where traffic towards a large portion of the country is serviced by a small number of Autonomous Systems, and, therefore, may be exposed to observation or selective tampering by these ASes. We introduce the Country-level Transit Influence (CTI) metric to quantify the significance of a given AS on the international transit service of a particular country. By studying the CTI values for the top ASes in each country, we find that 34 nations have transit ecosystems that render them particularly exposed, where a single AS is privy to traffic destined to over 40% of their IP addresses. In the nations where we are able to validate our findings with in-country operators, our top- five ASes are 90% accurate on average. In the countries we examine, CTI reveals two classes of networks frequently play a particularly prominent role: submarine cable operators and state-owned ASes.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Almost all popular Internet services are hosted in a select set of countries, forcing other nations to rely on international connectivity to access them. We identify nations where traffic towards a large portion of the country is serviced by a small number of Autonomous Systems, and, therefore, may be exposed to observation or selective tampering by these ASes. We introduce the Country-level Transit Influence (CTI) metric to quantify the significance of a given AS on the international transit service of a particular country. By studying the CTI values for the top ASes in each country, we find that 34 nations have transit ecosystems that render them particularly exposed, where a single AS is privy to traffic destined to over 40% of their IP addresses. In the nations where we are able to validate our findings with in-country operators, our top- five ASes are 90% accurate on average. In the countries we examine, CTI reveals two classes of networks frequently play a particularly prominent role: submarine cable operators and state-owned ASes. |
Byungjin Jun, Fabián E. Bustamante, Ben Greenstein, Ian Clelland Reining in Mobile Web Performance with Document and Permission Policies Inproceedings Proc. of International Workshop on Mobile Computing Systems and Applications (HotMobile), 2022. @inproceedings{bjun:hotmobile22, title = {Reining in Mobile Web Performance with Document and Permission Policies}, author = {Byungjin Jun and Fabián E. Bustamante and Ben Greenstein and Ian Clelland}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2022/03/bjun-hotmobile22.pdf}, year = {2022}, date = {2022-03-09}, booktitle = {Proc. of International Workshop on Mobile Computing Systems and Applications (HotMobile)}, abstract = {The quality of mobile web experience remains poor, partially as a result of complex websites and design choices that worsen performance, particularly for users on suboptimal networks or with low-end devices. Prior proposed solutions have seen limited adoption due to the demand they place on developers and content providers, and the performing infrastructure needed to support them. We argue that Document and Permissions Policies – ongoing efforts to enforce good practices on web design – may offer the basis for a readily-available and easily-adoptable solution, as they encode key best practices for web development. In this paper, as a first step, we evaluate the potential performance cost of violating these well-understood best practices and how common such violations are in today’s web. Our analysis shows, for example, that controlling for unsized-media policy, something applicable to 70% of the top Alexa websites, can indeed significantly reduce Cumulative Layout Shift, a core metric for evaluating the performance of the web.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The quality of mobile web experience remains poor, partially as a result of complex websites and design choices that worsen performance, particularly for users on suboptimal networks or with low-end devices. Prior proposed solutions have seen limited adoption due to the demand they place on developers and content providers, and the performing infrastructure needed to support them. We argue that Document and Permissions Policies – ongoing efforts to enforce good practices on web design – may offer the basis for a readily-available and easily-adoptable solution, as they encode key best practices for web development. In this paper, as a first step, we evaluate the potential performance cost of violating these well-understood best practices and how common such violations are in today’s web. Our analysis shows, for example, that controlling for unsized-media policy, something applicable to 70% of the top Alexa websites, can indeed significantly reduce Cumulative Layout Shift, a core metric for evaluating the performance of the web. |
2021 |
kc claffy, David D. Clark, John S. Heidemann, Fabián E. Bustamante, Mattijs Jonker, Aaron Schulman, Ellen Zegura Workshop on Overcoming Measurement Barriers to Internet Research (WOMBIR 2021) final report. Journal Article SIGCOMM Comput. Commun. Rev., 51 (3), pp. 33-40, 2021. @article{claffy:wombir21, title = {Workshop on Overcoming Measurement Barriers to Internet Research (WOMBIR 2021) final report.}, author = {kc claffy and David D. Clark and John S. Heidemann and Fabián E. Bustamante and Mattijs Jonker and Aaron Schulman and Ellen Zegura}, url = {https://dl.acm.org/doi/10.1145/3477482.3477489}, year = {2021}, date = {2021-12-11}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {51}, number = {3}, pages = {33-40}, abstract = {In January and April 2021 we held the Workshop on Overcoming Measurement Barriers to Internet Research (WOMBIR) with the goal of understanding challenges in network and security data set collection and sharing. Most workshop attendees provided white papers describing their perspectives, and many participated in short-talks and discussion in two virtual workshops over five days. That discussion produced consensus around several points. First, many aspects of the Internet are characterized by decreasing visibility of important network properties, which is in tension with the Internet’s role as critical infrastructure. We discussed three specific research areas that illustrate this tension: security, Internet access; and mobile networking. We discussed visibility challenges at all layers of the networking stack, and the challenge of gathering data and validating inferences. Important data sets require longitudinal (long-term, ongoing) data collection and sharing, support for which is more challenging for Internet research than other fields. We discussed why a combination of technical and policy methods are necessary to safeguard privacy when using or sharing measurement data. Workshop participants proposed several opportunities to accelerate progress, some of which require coordination across government, industry, and academia.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In January and April 2021 we held the Workshop on Overcoming Measurement Barriers to Internet Research (WOMBIR) with the goal of understanding challenges in network and security data set collection and sharing. Most workshop attendees provided white papers describing their perspectives, and many participated in short-talks and discussion in two virtual workshops over five days. That discussion produced consensus around several points. First, many aspects of the Internet are characterized by decreasing visibility of important network properties, which is in tension with the Internet’s role as critical infrastructure. We discussed three specific research areas that illustrate this tension: security, Internet access; and mobile networking. We discussed visibility challenges at all layers of the networking stack, and the challenge of gathering data and validating inferences. Important data sets require longitudinal (long-term, ongoing) data collection and sharing, support for which is more challenging for Internet research than other fields. We discussed why a combination of technical and policy methods are necessary to safeguard privacy when using or sharing measurement data. Workshop participants proposed several opportunities to accelerate progress, some of which require coordination across government, industry, and academia. |
Esteban Carisimo, Alexander Gamero-Garrido, Alex C. Snoeren, Alberto Dainotti Identifying ASes of State-Owned Internet Operators Inproceedings Proc. of the ACM Internet Measurement Conference (IMC), 2021. @inproceedings{carisimo:stateowned:imc21, title = {Identifying ASes of State-Owned Internet Operators}, author = {Esteban Carisimo and Alexander Gamero-Garrido and Alex C. Snoeren and Alberto Dainotti }, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/10/ECarisimo-IMC21.pdf https://github.com/estcarisimo/state-owned-ases}, year = {2021}, date = {2021-11-02}, booktitle = {Proc. of the ACM Internet Measurement Conference (IMC)}, abstract = {In this paper we present and apply a methodology to accurately identify state-owned Internet operators worldwide and their Autonomous System Numbers (ASNs). Obtaining an accurate dataset of ASNs of state-owned Internet operators enables studies where state ownership is an important dimension, including research related to Internet censorship and surveillance, cyber-warfare and international relations, ICT development and digital divide, critical infrastructure protection, and public policy. Our approach is based on a multi-stage, in-depth manual analysis of datasets that are highly diverse in nature. We find that each of these datasets contributes in different ways to the classification process and we identify limitations and shortcomings of these data sources. We obtain the first data set of this type, make it available to the research community together with the several lessons we learned in the process, and perform a preliminary analysis based on our data. We find that 53% (i.e., 123) of the world’s countries are majority owners of Internet operators, highlighting that this is a widespread phenomenon. We also find and document the existence of subsidiaries of state-owned governments operating in foreign countries, an aspect that touches every continent and particularly affects Africa. We hope that this work and the associated data set will inspire and enable a broad set of Internet measurement studies and interdisciplinary research.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In this paper we present and apply a methodology to accurately identify state-owned Internet operators worldwide and their Autonomous System Numbers (ASNs). Obtaining an accurate dataset of ASNs of state-owned Internet operators enables studies where state ownership is an important dimension, including research related to Internet censorship and surveillance, cyber-warfare and international relations, ICT development and digital divide, critical infrastructure protection, and public policy. Our approach is based on a multi-stage, in-depth manual analysis of datasets that are highly diverse in nature. We find that each of these datasets contributes in different ways to the classification process and we identify limitations and shortcomings of these data sources. We obtain the first data set of this type, make it available to the research community together with the several lessons we learned in the process, and perform a preliminary analysis based on our data. We find that 53% (i.e., 123) of the world’s countries are majority owners of Internet operators, highlighting that this is a widespread phenomenon. We also find and document the existence of subsidiaries of state-owned governments operating in foreign countries, an aspect that touches every continent and particularly affects Africa. We hope that this work and the associated data set will inspire and enable a broad set of Internet measurement studies and interdisciplinary research. |
Sana Asif, Byungjin Jun, Fabián E. Bustamante, John P. Rula Networked Systems as Witnesses - Association Between Content Demand, Human Mobility and an Infection Spread Inproceedings Proc. of ACM Internet Measurement Conference (IMC), 2021. @inproceedings{sasif:witnesses:imc21, title = {Networked Systems as Witnesses - Association Between Content Demand, Human Mobility and an Infection Spread}, author = {Sana Asif and Byungjin Jun and Fabián E. Bustamante and John P. Rula}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/10/SAsif-IMC21-3.pdf}, year = {2021}, date = {2021-11-02}, booktitle = {Proc. of ACM Internet Measurement Conference (IMC)}, abstract = {While non-pharmaceutical interventions (NPIs) such as stay-at-home, shelter-in-place, and school closures are considered the most effective ways to limit the spread of infectious diseases, their use is generally controversial given the political, ethical, and socioeconomic issues they raise. Part of the challenge is the non-obvious link between the level of compliance with such measures and their effectiveness. In this paper, we argue that users' demand on networked services can serve as a proxy for the social distancing behavior of communities, offering a new approach to evaluate these measures' effectiveness. We leverage the vantage point of one of the largest worldwide CDNs together with public-available datasets of mobile users' behavior, to examine the relationship between changes in user demand on the CDN and different interventions including stay-at-home/shelter-in-place, mask mandates, and school closures. As networked systems become integral parts of our everyday lives, they can act as witnesses of our individual and collective actions. Our study illustrates the potential value of this new role.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } While non-pharmaceutical interventions (NPIs) such as stay-at-home, shelter-in-place, and school closures are considered the most effective ways to limit the spread of infectious diseases, their use is generally controversial given the political, ethical, and socioeconomic issues they raise. Part of the challenge is the non-obvious link between the level of compliance with such measures and their effectiveness. In this paper, we argue that users' demand on networked services can serve as a proxy for the social distancing behavior of communities, offering a new approach to evaluate these measures' effectiveness. We leverage the vantage point of one of the largest worldwide CDNs together with public-available datasets of mobile users' behavior, to examine the relationship between changes in user demand on the CDN and different interventions including stay-at-home/shelter-in-place, mask mandates, and school closures. As networked systems become integral parts of our everyday lives, they can act as witnesses of our individual and collective actions. Our study illustrates the potential value of this new role. |
Byungjin Jun, Matteo Varvello, Yasir Zaki, Fabián E. Bustamante WebTune: A Distributed Platform for Web Performance Measurements Inproceedings Proc. of the Network Traffic Measurement and Analysis Conference (TMA), 2021. @inproceedings{jun:webtune, title = {WebTune: A Distributed Platform for Web Performance Measurements}, author = {Byungjin Jun and Matteo Varvello and Yasir Zaki and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/07/WebTune-4.pdf}, year = {2021}, date = {2021-09-14}, booktitle = {Proc. of the Network Traffic Measurement and Analysis Conference (TMA)}, abstract = {Web performance researchers have to regularly choose between synthetic and in-the-wild experiments. In the one hand, synthetic tests are useful to isolate \textit{what} needs to be measured, but lack the realism of real networks, websites, and server-specific configurations. Even enumerating all these conditions can be challenging, and no existing tool or testbed currently allows for this. In this paper, as in life, we argue that \textit{unity makes strength}: by sharing part of their experimenting resources, researchers can naturally build their desired realistic conditions without compromising on the flexibility of synthetic tests. We take a step toward realizing this vision with WebTune, a distributed platform for web measurements. At a high level, WebTune seamlessly integrates with popular web measurements tools like Lighthouse and Puppeteer exposing to an experimenter fine-grained control on real networks and servers, as one would expect in synthetic tests. Under the hood, tool serves ``Webtuned'' versions of websites which are \textit{cloned} and distributed to a testing network built on resources donated by the community. We evaluate WebTune with respect to its cloning \textit{accuracy} and the \textit{complexity} of network conditions to be reproduced. Further, we demonstrate its functioning via a 5-nodes deployment.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Web performance researchers have to regularly choose between synthetic and in-the-wild experiments. In the one hand, synthetic tests are useful to isolate what needs to be measured, but lack the realism of real networks, websites, and server-specific configurations. Even enumerating all these conditions can be challenging, and no existing tool or testbed currently allows for this. In this paper, as in life, we argue that unity makes strength: by sharing part of their experimenting resources, researchers can naturally build their desired realistic conditions without compromising on the flexibility of synthetic tests. We take a step toward realizing this vision with WebTune, a distributed platform for web measurements. At a high level, WebTune seamlessly integrates with popular web measurements tools like Lighthouse and Puppeteer exposing to an experimenter fine-grained control on real networks and servers, as one would expect in synthetic tests. Under the hood, tool serves ``Webtuned'' versions of websites which are cloned and distributed to a testing network built on resources donated by the community. We evaluate WebTune with respect to its cloning accuracy and the complexity of network conditions to be reproduced. Further, we demonstrate its functioning via a 5-nodes deployment. |
Andra Lutu, Diego Perino, Marcelo Bangulo, Fabián E. Bustamante Insights from Operating an IP Exchange Provider Inproceedings Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM), 2021. @inproceedings{lutu:sigcomm21, title = {Insights from Operating an IP Exchange Provider}, author = {Andra Lutu and Diego Perino and Marcelo Bangulo and Fabián E. Bustamante}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/07/sigcomm2021-1.pdf}, year = {2021}, date = {2021-08-23}, booktitle = {Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM)}, abstract = {IP Exchange Providers (IPX-Ps) offer to their customers (e.g., mobile or IoT service providers) global data roaming and support for a variety of emerging services. They peer to other IPX-Ps and form the IPX network, which interconnects 800 MNOs worldwide offering their customers access to mobile services in any other country. Despite the importance of IPX-Ps, little is known about their operations and performance. In this paper, we shed light on these opaque providers by analyzing a large IPX-P with more than 100 PoPs in 40+ countries, with a particularly strong presence in America and Europe. Specifically, we characterize the traffic and performance of the main infrastructures of the IPX-P (i.e., 2-3-4G signaling and GTP tunneling), and provide implications for its operation, as well as for the IPX-P’s customers. Our analysis is based on statistics we collected during two time periods (i.e., prior and during COVID-19 pandemic) and includes insights on the main service the platform supports (i.e., IoT and data roaming), traffic breakdown and geographical/temporal distribution, communication performance (e.g., tunnel setup time, RTTs). Our results constitute a step towards advancing the understanding of IPX-Ps at their core, and provide guidelines for their operations and customer satisfaction.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } IP Exchange Providers (IPX-Ps) offer to their customers (e.g., mobile or IoT service providers) global data roaming and support for a variety of emerging services. They peer to other IPX-Ps and form the IPX network, which interconnects 800 MNOs worldwide offering their customers access to mobile services in any other country. Despite the importance of IPX-Ps, little is known about their operations and performance. In this paper, we shed light on these opaque providers by analyzing a large IPX-P with more than 100 PoPs in 40+ countries, with a particularly strong presence in America and Europe. Specifically, we characterize the traffic and performance of the main infrastructures of the IPX-P (i.e., 2-3-4G signaling and GTP tunneling), and provide implications for its operation, as well as for the IPX-P’s customers. Our analysis is based on statistics we collected during two time periods (i.e., prior and during COVID-19 pandemic) and includes insights on the main service the platform supports (i.e., IoT and data roaming), traffic breakdown and geographical/temporal distribution, communication performance (e.g., tunnel setup time, RTTs). Our results constitute a step towards advancing the understanding of IPX-Ps at their core, and provide guidelines for their operations and customer satisfaction. |
Rashna Kumar, Fabián E. Bustamante: Decentralization, privacy and performance for DNS Inproceedings Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM) - Poster - Winner SIGCOMM ACM SRC Competition, 2021. @inproceedings{rkumar:dns:sigcomm21poster, title = {Decentralization, privacy and performance for DNS}, author = {Rashna Kumar and Fabián E. Bustamante:}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/10/RKumar-DNS-Poster.pdf https://src.acm.org/winners/2022}, year = {2021}, date = {2021-08-23}, booktitle = {Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM) - Poster - Winner SIGCOMM ACM SRC Competition}, abstract = {The Domain Name System (DNS) is both key determinant of a users' quality of experience (QoE) and privy to their tastes, preferences, and even the devices they own. Growing concern about user privacy and QoE has brought a number of alternative DNS techniques and services, from public DNS to encrypted and oblivious DNS. Today, a user choosing among these services and its few providers is forced to prioritize -- aware of it or not -- between web performance, privacy, reliability, and the potential for a centralized market and its consequences. We present Ónoma, a DNS resolver that addresses the concerns about DNS centralization without sacrificing privacy or QoE by sharding requests across alternative DNS services, placing these services in competition with each other, and pushing resolution to the network edge. Our preliminary evaluation shows the potential benefits of this approach across locales, with different DNS services, content providers, and content distribution networks.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The Domain Name System (DNS) is both key determinant of a users' quality of experience (QoE) and privy to their tastes, preferences, and even the devices they own. Growing concern about user privacy and QoE has brought a number of alternative DNS techniques and services, from public DNS to encrypted and oblivious DNS. Today, a user choosing among these services and its few providers is forced to prioritize -- aware of it or not -- between web performance, privacy, reliability, and the potential for a centralized market and its consequences. We present Ónoma, a DNS resolver that addresses the concerns about DNS centralization without sacrificing privacy or QoE by sharding requests across alternative DNS services, placing these services in competition with each other, and pushing resolution to the network edge. Our preliminary evaluation shows the potential benefits of this approach across locales, with different DNS services, content providers, and content distribution networks. |
2020 |
James Newman, Abbas Razaghpanah, Narseo Vallina-Rodriguez, Fabian E. Bustamante, Mark Allman, Diego Perino, Alessandro Finamore Back in control -- An extensible middle-box on your phone Technical Report arXiv (arXiv:2012.07695), 2020. @techreport{jnewman:mbz, title = {Back in control -- An extensible middle-box on your phone}, author = {James Newman and Abbas Razaghpanah and Narseo Vallina-Rodriguez and Fabian E. Bustamante and Mark Allman and Diego Perino and Alessandro Finamore}, url = {https://arxiv.org/abs/2012.07695 https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/03/JNewman-arXiv20.pdf }, year = {2020}, date = {2020-12-14}, number = {arXiv:2012.07695}, institution = {arXiv}, abstract = {The closed design of mobile devices -- with the increased security and consistent user interfaces -- is in large part responsible for their becoming the dominant platform for accessing the Internet. These benefits, however, are not without a cost. Their operation of mobile devices and their apps is not easy to understand by either users or operators. We argue for recovering transparency and control on mobile devices through an extensible platform that can intercept and modify traffic before leaving the device or, on arrival, before it reaches the operating system. Conceptually, this is the same view of the traffic that a traditional middlebox would have at the far end of the first link in the network path. We call this platform ``middlebox zero'' or MBZ. By being on-board, MBZ also leverages local context as it processes the traffic and complements the network-wide view of standard middleboxes. We discuss the challenges of the MBZ approach, sketch a working design, and illustrate its potential with some concrete examples.}, keywords = {}, pubstate = {published}, tppubtype = {techreport} } The closed design of mobile devices -- with the increased security and consistent user interfaces -- is in large part responsible for their becoming the dominant platform for accessing the Internet. These benefits, however, are not without a cost. Their operation of mobile devices and their apps is not easy to understand by either users or operators. We argue for recovering transparency and control on mobile devices through an extensible platform that can intercept and modify traffic before leaving the device or, on arrival, before it reaches the operating system. Conceptually, this is the same view of the traffic that a traditional middlebox would have at the far end of the first link in the network path. We call this platform ``middlebox zero'' or MBZ. By being on-board, MBZ also leverages local context as it processes the traffic and complements the network-wide view of standard middleboxes. We discuss the challenges of the MBZ approach, sketch a working design, and illustrate its potential with some concrete examples. |
Neil Agarwal, Matteo Varvello, Andrius Aucinas, Fabián E. Bustamante, Ravi Netravali Mind the Delay: The Adverse Effects of Delay-Based TCP on HTTP Inproceedings Proc. of ACM International Conference on emerging Networking EXperiments and Technologies (CoNEXT) , 2020. @inproceedings{nagarwal:delaytcp, title = {Mind the Delay: The Adverse Effects of Delay-Based TCP on HTTP}, author = {Neil Agarwal and Matteo Varvello and Andrius Aucinas and Fabián E. Bustamante and Ravi Netravali}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2021/03/NAgarwal-CoNEXT20.pdf}, year = {2020}, date = {2020-12-01}, booktitle = {Proc. of ACM International Conference on emerging Networking EXperiments and Technologies (CoNEXT) }, abstract = {The last three decades have seen much evolution in web and network protocols: amongst them, a transition from HTTP/1.1 to HTTP/2 and a shift from loss-based to delay-based TCP congestion control algorithms. This paper argues that these two trends come at odds with one another, ultimately hurting web performance. Using a controlled synthetic study, we show how delay-based congestion control protocols (e.g., BBR and CUBIC + Hybrid Slow Start) result in the underestimation of the available congestion window in mobile networks, and how that dramatically hampers the effectiveness of HTTP/2. To quantify the impact of such findings in the current web, we evolved the web performance toolbox in two ways. First, we develop Igor, a client-side TCP congestion control detection tool that can differentiate between loss-based and delay-based algorithms by focusing on their behavior during slow start. Second, we develop a Chromium patch that allows fine-grained control on the HTTP version to be used per domain. Using these new web performance tools, we analyze over 300 real websites and find that 67% of sites relying solely on delay-based congestion control algorithms have better performance with HTTP/1.1.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The last three decades have seen much evolution in web and network protocols: amongst them, a transition from HTTP/1.1 to HTTP/2 and a shift from loss-based to delay-based TCP congestion control algorithms. This paper argues that these two trends come at odds with one another, ultimately hurting web performance. Using a controlled synthetic study, we show how delay-based congestion control protocols (e.g., BBR and CUBIC + Hybrid Slow Start) result in the underestimation of the available congestion window in mobile networks, and how that dramatically hampers the effectiveness of HTTP/2. To quantify the impact of such findings in the current web, we evolved the web performance toolbox in two ways. First, we develop Igor, a client-side TCP congestion control detection tool that can differentiate between loss-based and delay-based algorithms by focusing on their behavior during slow start. Second, we develop a Chromium patch that allows fine-grained control on the HTTP version to be used per domain. Using these new web performance tools, we analyze over 300 real websites and find that 67% of sites relying solely on delay-based congestion control algorithms have better performance with HTTP/1.1. |
Andra Lutu, Byungjin Jun, Fabián E. Bustamante, Diego Perino, Marcelo Bagnulo, Carlos Gamboa Bontje A first look at the IP eXchange Ecosystem Journal Article ACM SIGCOMM Computer Communication Review (CCR), 50 (4), 2020. @article{lutu:ccr20, title = {A first look at the IP eXchange Ecosystem}, author = {Andra Lutu and Byungjin Jun and Fabián E. Bustamante and Diego Perino and Marcelo Bagnulo and Carlos Gamboa Bontje}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2020/10/ccr20-2.pdf}, year = {2020}, date = {2020-10-31}, journal = {ACM SIGCOMM Computer Communication Review (CCR)}, volume = {50}, number = {4}, abstract = {The IPX Network interconnects about 800 Mobile Network Operators (MNOs) worldwide and a range of other service providers (such as cloud and content providers). It forms the core that enables global data roaming while supporting emerging applications, from VoLTE and video streaming to IoT verticals. This paper presents the first characterization of this, so-far opaque, IPX ecosystem and a first-of-its-kind in-depth analysis of an IPX Provider (IPX-P). The IPX Network is a private network formed by a small set of tightly interconnected IPX-Ps. We analyze an operational dataset from a large IPX-P that includes BGP data as well as statistics from signaling. We shed light on the structure of the IPX Network as well as on the temporal, structural and geographic features of the IPX traffic. Our results are a first step in understanding the IPX Network at its core, key to fully understand the global mobile Internet.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The IPX Network interconnects about 800 Mobile Network Operators (MNOs) worldwide and a range of other service providers (such as cloud and content providers). It forms the core that enables global data roaming while supporting emerging applications, from VoLTE and video streaming to IoT verticals. This paper presents the first characterization of this, so-far opaque, IPX ecosystem and a first-of-its-kind in-depth analysis of an IPX Provider (IPX-P). The IPX Network is a private network formed by a small set of tightly interconnected IPX-Ps. We analyze an operational dataset from a large IPX-P that includes BGP data as well as statistics from signaling. We shed light on the structure of the IPX Network as well as on the temporal, structural and geographic features of the IPX traffic. Our results are a first step in understanding the IPX Network at its core, key to fully understand the global mobile Internet. |
Shucheng Liu, Zachary S. Bischof, Ishaan Madan, Peter K. Chan, Fabián E. Bustamante Out of Sight, Not Out of Mind - A User-View on the Criticality of the Submarine Cable Network Inproceedings Proc. of ACM Internet Measurement Conference (IMC), 2020. @inproceedings{aliu:imc20, title = {Out of Sight, Not Out of Mind - A User-View on the Criticality of the Submarine Cable Network}, author = {Shucheng Liu and Zachary S. Bischof and Ishaan Madan and Peter K. Chan and Fabián E. Bustamante}, url = {https://github.com/NU-AquaLab/Criticality-SCN https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2020/10/SLiu-IMC2020.pdf}, year = {2020}, date = {2020-10-27}, booktitle = {Proc. of ACM Internet Measurement Conference (IMC)}, abstract = {Nearly all international data is carried by a mesh of submarine cables connecting virtually every region in the world. It is generally assumed that Internet services rely on this submarine cable network (SCN) for backend traffic, but that most users do not directly depend on it, as popular resources are either local or cached nearby. In this paper, we study the criticality of the SCN from the perspective of end users. We present a general methodology for analyzing the reliance on the SCN for a given region, and apply it to the most popular web resources accessed by users in 63 countries from every inhabited continent, collectively capturing ≈80% of the global Internet population. We find that as many as 64.33% of all web resources accessed from a specific country rely on the SCN. Despite the explosive growth of data center and CDN infrastructure around the world, at least 28.22% of the CDN-hosted resources traverse a submarine cable.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Nearly all international data is carried by a mesh of submarine cables connecting virtually every region in the world. It is generally assumed that Internet services rely on this submarine cable network (SCN) for backend traffic, but that most users do not directly depend on it, as popular resources are either local or cached nearby. In this paper, we study the criticality of the SCN from the perspective of end users. We present a general methodology for analyzing the reliance on the SCN for a given region, and apply it to the most popular web resources accessed by users in 63 countries from every inhabited continent, collectively capturing ≈80% of the global Internet population. We find that as many as 64.33% of all web resources accessed from a specific country rely on the SCN. Despite the explosive growth of data center and CDN infrastructure around the world, at least 28.22% of the CDN-hosted resources traverse a submarine cable. |
Andra Lutu, Byungjin Jun, Alessandro Finamore, Fabián E. Bustamante, Diego Perino Where Things Roam: Uncovering Cellular IoT/M2M Connectivity Inproceedings Proc. of ACM Internet Measurement Conference (IMC), 2020. @inproceedings{alutu:imc20, title = {Where Things Roam: Uncovering Cellular IoT/M2M Connectivity}, author = {Andra Lutu and Byungjin Jun and Alessandro Finamore and Fabián E. Bustamante and Diego Perino}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2020/10/ALutu-IMC2020a.pdf}, year = {2020}, date = {2020-10-27}, booktitle = {Proc. of ACM Internet Measurement Conference (IMC)}, abstract = {Support for "things" roaming internationally has become critical for Internet of Things (IoT) verticals, from connected cars to smart meters and wearables, and explains the commercial success of Machine-to-Machine (M2M) platforms. We analyze IoT verticals operating with connectivity via IoT SIMs, and present the first large-scale study of commercially deployed IoT SIMs for energy meters. We also present the first characterization of an operational M2M platform and the first analysis of the rather opaque associated ecosystem. For operators, the exponential growth of IoT has meant increased stress on the infrastructure shared with traditional roaming traffic. Our analysis quantifies the adoption of roaming by M2M platforms and the impact they have on the underlying visited Mobile Network Operators (MNOs). To manage the impact of massive deployments of devices operating with an IoT SIM, operators must be able to distinguish between the latter and traditional inbound roamers. We build a comprehensive dataset capturing the device population of a large European MNO over three weeks. With this, we propose and validate a classification approach that can allow operators to distinguish inbound roaming IoT devices.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Support for "things" roaming internationally has become critical for Internet of Things (IoT) verticals, from connected cars to smart meters and wearables, and explains the commercial success of Machine-to-Machine (M2M) platforms. We analyze IoT verticals operating with connectivity via IoT SIMs, and present the first large-scale study of commercially deployed IoT SIMs for energy meters. We also present the first characterization of an operational M2M platform and the first analysis of the rather opaque associated ecosystem. For operators, the exponential growth of IoT has meant increased stress on the infrastructure shared with traditional roaming traffic. Our analysis quantifies the adoption of roaming by M2M platforms and the impact they have on the underlying visited Mobile Network Operators (MNOs). To manage the impact of massive deployments of devices operating with an IoT SIM, operators must be able to distinguish between the latter and traditional inbound roamers. We build a comprehensive dataset capturing the device population of a large European MNO over three weeks. With this, we propose and validate a classification approach that can allow operators to distinguish inbound roaming IoT devices. |
Yihan Zhang, Lyon Zhang, Hanlin Wang, Fabián E. Bustamante, Michael Rubenstein SwarmTalk - Towards Benchmark Software Suites for Swarm Robotics Platforms Inproceedings Proc. of the International Conference on Autonomous Agents and Multiagent Systems (AAMAS), 2020. @inproceedings{zhang:swarmtalk20, title = {SwarmTalk - Towards Benchmark Software Suites for Swarm Robotics Platforms}, author = {Yihan Zhang and Lyon Zhang and Hanlin Wang and Fabián E. Bustamante and Michael Rubenstein }, url = {http://ifaamas.org/Proceedings/aamas2020/pdfs/p1638.pdf}, year = {2020}, date = {2020-05-09}, booktitle = {Proc. of the International Conference on Autonomous Agents and Multiagent Systems (AAMAS)}, abstract = {With nearly every new swarm robotic platform built, the designers develop its software stack, from low-level drivers to high-level algorithmic implementations. And while the different software stacks frequently share components, especially in robot-to-robot communication, these common components are also developed from scratch time and again. We present SwarmTalk, a new communication library that can be quickly ported to new and existing swarm hardware. SwarmTalk adopts a publish-subscribe communication model that satisfies the severe hardware constraints found in many swarms, and provides an easy-to-use programming interface. We port our SwarmTalk prototype to two hardware swarm platforms and two simulator-based platforms, and implement commonly-used swarm algorithms on these four platforms. We present the design and implementation of SwarmTalk, discuss some of the system challenges in implementation and cross-platform porting, and report on our initial experiences as a common communication abstraction for a community benchmarking suite.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } With nearly every new swarm robotic platform built, the designers develop its software stack, from low-level drivers to high-level algorithmic implementations. And while the different software stacks frequently share components, especially in robot-to-robot communication, these common components are also developed from scratch time and again. We present SwarmTalk, a new communication library that can be quickly ported to new and existing swarm hardware. SwarmTalk adopts a publish-subscribe communication model that satisfies the severe hardware constraints found in many swarms, and provides an easy-to-use programming interface. We port our SwarmTalk prototype to two hardware swarm platforms and two simulator-based platforms, and implement commonly-used swarm algorithms on these four platforms. We present the design and implementation of SwarmTalk, discuss some of the system challenges in implementation and cross-platform porting, and report on our initial experiences as a common communication abstraction for a community benchmarking suite. |
2019 |
Matteo Varvello, Kleomenis Katevas, Wei Hang, Mihai Plesa, Hamed Haddadi, Fabián E. Bustamante, Benjamin Livshits BatteryLab, a distributed power monitoring platform for mobile devices: demo abstract Inproceedings Proc. of the ACM Conference on Embedded Networked Sensor Systems (SensSys), 2019. @inproceedings{mvarvello:batterylab, title = {BatteryLab, a distributed power monitoring platform for mobile devices: demo abstract}, author = {Matteo Varvello and Kleomenis Katevas and Wei Hang and Mihai Plesa and Hamed Haddadi and Fabián E. Bustamante and Benjamin Livshits }, year = {2019}, date = {2019-11-11}, booktitle = {Proc. of the ACM Conference on Embedded Networked Sensor Systems (SensSys)}, abstract = {There has been a growing interest in measuring and optimizing the power efficiency of mobile apps. Traditional power evaluations rely either on inaccurate software-based solutions or on ad-hoc testbeds composed of a power meter and a mobile device. This demonstration presents BatteryLab, our solution to share existing battery testing setups to build a distributed platform for battery measurements. Our vision is to transform independent battery testing setups into vantage points of a planetary-scale measurement platform offering heterogeneous devices and testing conditions. We demonstrate BatteryLab functionalities by investigating the energy efficiency of popular websites when loaded via both Android and iOS browsers. Our demonstration is also live at https://batterylab.dev/.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } There has been a growing interest in measuring and optimizing the power efficiency of mobile apps. Traditional power evaluations rely either on inaccurate software-based solutions or on ad-hoc testbeds composed of a power meter and a mobile device. This demonstration presents BatteryLab, our solution to share existing battery testing setups to build a distributed platform for battery measurements. Our vision is to transform independent battery testing setups into vantage points of a planetary-scale measurement platform offering heterogeneous devices and testing conditions. We demonstrate BatteryLab functionalities by investigating the energy efficiency of popular websites when loaded via both Android and iOS browsers. Our demonstration is also live at https://batterylab.dev/. |
James Newman, Robert H. Belson, Fabián E. Bustamante Scaling up your web experience, everywhere Workshop Proc. of the International Workshop on Mobile Computing Systems and Applications (HotMobile), 2019. @workshop{newman:scaleup, title = {Scaling up your web experience, everywhere}, author = {James Newman and Robert H. Belson and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/Newman-Scaleup.pdf}, year = {2019}, date = {2019-01-06}, booktitle = {Proc. of the International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {Hotmobile}, abstract = {We present an approach to improve users’ web experience by dynamically reducing the complexity of websites rendered based on network conditions. Our approach is based on a simple insight – adjusting a browser window’s scale (i.e., zooming in/out), changes the number of objects placed abovethe-fold and thus hides the loading of objects pushed below the fold in the user scroll time. We design ScaleUp , a browser extension that tracks network conditions and dynamically adjusts browser scale appropriately to improve user web Quality of Experience (QoE) while preserving the design integrity of websites. Through control experiments, we demonstrate the impact of ScaleUp on a number of key QoE metrics over a random sample of 50 from the top 500 Alexa websites. We show that a simple adjustment in scale can result in an over 19% improvement on Above-The-Fold (ATF) time in the median case. While adjusting a scale factor can improve proxy metrics of QoE, it is unclear if that translates in an improved web experience for users. We summarize findings from a large, crowdsourced experiment with 1,000 users showing that, indeed, improvement to QoE metrics correlate with an enhanced user experience. We have released ScaleUp as a Chrome Extension that now counts with over 1,000 users worldwide, and report on some of the lessons learned from this deployment.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } We present an approach to improve users’ web experience by dynamically reducing the complexity of websites rendered based on network conditions. Our approach is based on a simple insight – adjusting a browser window’s scale (i.e., zooming in/out), changes the number of objects placed abovethe-fold and thus hides the loading of objects pushed below the fold in the user scroll time. We design ScaleUp , a browser extension that tracks network conditions and dynamically adjusts browser scale appropriately to improve user web Quality of Experience (QoE) while preserving the design integrity of websites. Through control experiments, we demonstrate the impact of ScaleUp on a number of key QoE metrics over a random sample of 50 from the top 500 Alexa websites. We show that a simple adjustment in scale can result in an over 19% improvement on Above-The-Fold (ATF) time in the median case. While adjusting a scale factor can improve proxy metrics of QoE, it is unclear if that translates in an improved web experience for users. We summarize findings from a large, crowdsourced experiment with 1,000 users showing that, indeed, improvement to QoE metrics correlate with an enhanced user experience. We have released ScaleUp as a Chrome Extension that now counts with over 1,000 users worldwide, and report on some of the lessons learned from this deployment. |
James Newman, Fabián E. Bustamante The Value of First Impressions: The Impact of Ad-Blocking on Web QoE Inproceedings Proc. of the Passive and Active Measurement (PAM), 2019. @inproceedings{newman:impressions, title = {The Value of First Impressions: The Impact of Ad-Blocking on Web QoE}, author = {James Newman and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JNewman-Adblock.pdf}, year = {2019}, date = {2019-01-03}, booktitle = {Proc. of the Passive and Active Measurement (PAM)}, journal = {Passive and Active Measurement (PAM)}, abstract = {We present the first detailed analysis of ad-blocking’s impact on user Web quality of experience (QoE). We use the most popular webbased ad-blocker to capture the impact of ad-blocking on QoE for the top Alexa 5,000 websites. We find that ad-blocking reduces the number of objects loaded by 15% in the median case, and that this reduction translates into a 12.5% improvement on page load time (PLT) and a slight worsening of time to first paint (TTFP) of 6.54%. We show the complex relationship between ad-blocking and quality of experience - despite the clear improvements to PLT in the average case, for the bottom 10 percentile, this improvement comes at the cost of a slowdown on the initial responsiveness of websites, with a 19% increase to TTFP. To understand the relative importance of this tradeoff on user experience, we run a large, crowdsourced experiment with 1,000 users in Amazon Turk. For this experiment, users were presented with websites for which adblocking results in both, a reduction of PLT and a significant increase in TTFP. We find, surprisingly, 71.5% of the time users show a clear preference for faster first paint over faster page load times, hinting at the importance of first impressions on web QoE.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We present the first detailed analysis of ad-blocking’s impact on user Web quality of experience (QoE). We use the most popular webbased ad-blocker to capture the impact of ad-blocking on QoE for the top Alexa 5,000 websites. We find that ad-blocking reduces the number of objects loaded by 15% in the median case, and that this reduction translates into a 12.5% improvement on page load time (PLT) and a slight worsening of time to first paint (TTFP) of 6.54%. We show the complex relationship between ad-blocking and quality of experience - despite the clear improvements to PLT in the average case, for the bottom 10 percentile, this improvement comes at the cost of a slowdown on the initial responsiveness of websites, with a 19% increase to TTFP. To understand the relative importance of this tradeoff on user experience, we run a large, crowdsourced experiment with 1,000 users in Amazon Turk. For this experiment, users were presented with websites for which adblocking results in both, a reduction of PLT and a significant increase in TTFP. We find, surprisingly, 71.5% of the time users show a clear preference for faster first paint over faster page load times, hinting at the importance of first impressions on web QoE. |
Byungjin Jun, Fabián E. Bustamante, Sung Yoon Whang, Zachary S. Bischof AMP up your Mobile Web Experience: Characterizing the Impact of Google’s Accelerated Mobile Project Conference Proc. of the Annual International Conference on Mobile Computing and Networking (MobiCom), 2019. @conference{jun:ampup, title = {AMP up your Mobile Web Experience: Characterizing the Impact of Google’s Accelerated Mobile Project}, author = {Byungjin Jun and Fabián E. Bustamante and Sung Yoon Whang and Zachary S. Bischof}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/AMP-Mobicom-2019.pdf}, year = {2019}, date = {2019-01-02}, booktitle = {Proc. of the Annual International Conference on Mobile Computing and Networking (MobiCom)}, journal = {Mobicom}, abstract = {The rapid growth in the number of mobile devices, subscriptions and their associated traffic, has served as motivation for several projects focused on improving mobile users' quality of experience (QoE). Few have been as contentious as the Google-initiated Accelerated Mobile Project (AMP), both praised for its seemingly instant mobile web experience and criticized based on concerns about the enforcement of its formats. This paper presents the first characterization of AMP’s impact on users’ QoE.We do this using a corpus of over 2,100 AMP webpages, and their corresponding non-AMP counterparts, based on trendy-keyword-based searches. We characterized AMP’s impact looking at common web QoE metrics, including Page Load Time, Time to First Byte and SpeedIndex (SI). Our results show that AMP significantly improves SI, yielding on average a 60% lower SI than non-AMP pages without accounting for prefetching. Prefetching of AMP pages pushes this advantage even further, with prefetched pages loading over 2,000ms faster than non-prefetched AMP pages. This clear boost may come, however, at a non-negligible cost for users with limited data plans as it incurs an average of over 1.4 MB of additional data downloaded, unbeknownst to users.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The rapid growth in the number of mobile devices, subscriptions and their associated traffic, has served as motivation for several projects focused on improving mobile users' quality of experience (QoE). Few have been as contentious as the Google-initiated Accelerated Mobile Project (AMP), both praised for its seemingly instant mobile web experience and criticized based on concerns about the enforcement of its formats. This paper presents the first characterization of AMP’s impact on users’ QoE.We do this using a corpus of over 2,100 AMP webpages, and their corresponding non-AMP counterparts, based on trendy-keyword-based searches. We characterized AMP’s impact looking at common web QoE metrics, including Page Load Time, Time to First Byte and SpeedIndex (SI). Our results show that AMP significantly improves SI, yielding on average a 60% lower SI than non-AMP pages without accounting for prefetching. Prefetching of AMP pages pushes this advantage even further, with prefetched pages loading over 2,000ms faster than non-prefetched AMP pages. This clear boost may come, however, at a non-negligible cost for users with limited data plans as it incurs an average of over 1.4 MB of additional data downloaded, unbeknownst to users. |
2018 |
Zachary S. Bischof, Romain Fontugne, Fabián E. Bustamante Untangling the world-wide mesh of undersea cables Workshop ACM Workshop on Hot Topics in Networks (HotNets), 2018. @workshop{bischof:untangling, title = {Untangling the world-wide mesh of undersea cables}, author = {Zachary S. Bischof and Romain Fontugne and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/undersea.pdf}, year = {2018}, date = {2018-11-19}, booktitle = {ACM Workshop on Hot Topics in Networks (HotNets)}, abstract = {The growth of global Internet traffic has driven an exponential expansion of the submarine cable network, both in terms of the sheer number of links and its total capacity. Today, a complex mesh of hundreds of cables, stretched over 1 million kilometers, connects nearly every corner of the earth and is instrumental in closing the remaining connectivity gaps. Despite the scale and critical role of the submarine network to both business and society at large, our community has mostly ignored it, treating it as a black box in most studies from connectivity to inter-domain traffic and reliability. In this paper, we make the case for a new research agenda focused on characterizing the global submarine network and the critical role it plays as basic component of any inter-continental end-to-end connection.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } The growth of global Internet traffic has driven an exponential expansion of the submarine cable network, both in terms of the sheer number of links and its total capacity. Today, a complex mesh of hundreds of cables, stretched over 1 million kilometers, connects nearly every corner of the earth and is instrumental in closing the remaining connectivity gaps. Despite the scale and critical role of the submarine network to both business and society at large, our community has mostly ignored it, treating it as a black box in most studies from connectivity to inter-domain traffic and reliability. In this paper, we make the case for a new research agenda focused on characterizing the global submarine network and the critical role it plays as basic component of any inter-continental end-to-end connection. |
Zachary S. Bischof, Fabián E. Bustamante, Nick Feamster The Growing Importance of Being Always On -- A first look at the reliability of broadband Internet access Conference Research Conference on Communication, Information and Internet Policy (TPRC), 2018. @conference{bischof:tprc, title = {The Growing Importance of Being Always On -- A first look at the reliability of broadband Internet access}, author = {Zachary S. Bischof and Fabián E. Bustamante and Nick Feamster}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/tprc46-reliability.pdf}, year = {2018}, date = {2018-09-03}, booktitle = {Research Conference on Communication, Information and Internet Policy (TPRC)}, journal = {In Proc. of TPRC46}, abstract = {Broadband availability and performance continue to improve rapidly, spurred by both government and private investment and motivated by the recognized social and economic benefits of connectivity. A recent ITU ``State of Broadband'' reports that there are over 60 countries where fixed or mobile broadband penetration is above 25% and more than 70 countries where the majority of the population is online. According to Akamai's ``State of the Internet'' report, over the last four years, the top four countries in terms of average connection speed have nearly doubled their capacity. Although providing access and sufficient capacity remains a challenge in many parts of the world, in most developed countries, broadband providers are offering sufficiently high capacities to encourage consumers to migrate services for entertainment, communication and home monitoring to over-the-top (OTT) alternatives. According to a recent survey, nearly 78% of U.S. broadband households subscribe to an OTT video service. Enterprises are following the same path, with over one-third opting to use VoIP phones instead of landline ones. The proliferation of high-capacity access and the migration to OTT services have raised users' expectations of service reliability. A recent survey on consumer experience by the UK Office of Communication (Ofcom) ranks reliability first--- higher than even the speed of connection ---as the main reason for customer complaints. Our empirical study of access-ISP outages and user demand corroborates these observations, showing the effects of low reliability on user behavior, as captured by their demand on the network. Researchers and regulators alike have also recognized the need for clear standards and a better understanding of the role that service reliability plays in shaping the behavior of broadband users. Despite its growing importance, both the reliability of broadband services and potential ways to improve on it have received scant attention from the research community. In this paper, we introduce an approach for characterizing broadband reliability using data collected by the many emerging national efforts to study broadband (in over 30 countries and apply this approach to the data gathered by the Measuring Broadband America (MBA) project, which is operated by the United States Federal Communications Commission (FCC). We show, among other findings, that current broadband services deliver an average availability of at most two nines (99%), with an average annual downtime of 17.8 hours. Motivated by our findings, we quantify the potential benefits of multihomed broadband access and study its feasibility as a solution for increasing reliability. Using the FCC MBA dataset and measurements collected by over 6,000 end-host vantage points in 75 countries, we show that multihoming the access link at the home gateway with two different providers adds two nines of service availability, matching the minimum four nines (99.99%) required by the FCC for the public switched telephone network (PSTN).}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Broadband availability and performance continue to improve rapidly, spurred by both government and private investment and motivated by the recognized social and economic benefits of connectivity. A recent ITU ``State of Broadband'' reports that there are over 60 countries where fixed or mobile broadband penetration is above 25% and more than 70 countries where the majority of the population is online. According to Akamai's ``State of the Internet'' report, over the last four years, the top four countries in terms of average connection speed have nearly doubled their capacity. Although providing access and sufficient capacity remains a challenge in many parts of the world, in most developed countries, broadband providers are offering sufficiently high capacities to encourage consumers to migrate services for entertainment, communication and home monitoring to over-the-top (OTT) alternatives. According to a recent survey, nearly 78% of U.S. broadband households subscribe to an OTT video service. Enterprises are following the same path, with over one-third opting to use VoIP phones instead of landline ones. The proliferation of high-capacity access and the migration to OTT services have raised users' expectations of service reliability. A recent survey on consumer experience by the UK Office of Communication (Ofcom) ranks reliability first--- higher than even the speed of connection ---as the main reason for customer complaints. Our empirical study of access-ISP outages and user demand corroborates these observations, showing the effects of low reliability on user behavior, as captured by their demand on the network. Researchers and regulators alike have also recognized the need for clear standards and a better understanding of the role that service reliability plays in shaping the behavior of broadband users. Despite its growing importance, both the reliability of broadband services and potential ways to improve on it have received scant attention from the research community. In this paper, we introduce an approach for characterizing broadband reliability using data collected by the many emerging national efforts to study broadband (in over 30 countries and apply this approach to the data gathered by the Measuring Broadband America (MBA) project, which is operated by the United States Federal Communications Commission (FCC). We show, among other findings, that current broadband services deliver an average availability of at most two nines (99%), with an average annual downtime of 17.8 hours. Motivated by our findings, we quantify the potential benefits of multihomed broadband access and study its feasibility as a solution for increasing reliability. Using the FCC MBA dataset and measurements collected by over 6,000 end-host vantage points in 75 countries, we show that multihoming the access link at the home gateway with two different providers adds two nines of service availability, matching the minimum four nines (99.99%) required by the FCC for the public switched telephone network (PSTN). |
Sarah Wassermann, John P. Rula, Fabián E. Bustamante, Pedro Casas Anycast on the Move: A Look at Mobile Anycast Performance, Conference Network Traffic Measurement and Analysis Conference (TMA), 2018. @conference{wassermann:anycast, title = {Anycast on the Move: A Look at Mobile Anycast Performance,}, author = {Sarah Wassermann and John P. Rula and Fabián E. Bustamante and Pedro Casas}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/SWasserman-AnycastMove.pdf}, year = {2018}, date = {2018-06-11}, booktitle = {Network Traffic Measurement and Analysis Conference (TMA)}, abstract = {The appeal and clear operational and economic benefits of anycast to service providers have motivated a number of recent experimental studies on its potential performance impact for end users. For CDNs on mobile networks, in particular, anycast provides a simpler alternative to existing request routing systems challenged by a growing, complex, and commonly opaque cellular infrastructure. This paper presents the first analysis of anycast performance for mobile users. In particular, our evaluation focuses on two distinct anycast services, both providing part of the DNS Root zone and together covering all major geographical regions. Our results show that mobile clients are routed to suboptimal replicas in terms of geographical distance and associated latencies, more frequently while on a cellular connection than on WiFi, with a significant impact on performance. We find that this is not simply an issue of lacking better alternatives, and that the problem is not specific to particular geographic areas or autonomous systems. We close with a first analysis of the root causes of this phenomenon and describe some of the major classes of anycast anomalies revealed during our study, additionally including a systematic approach to automatically detect such anomalies without any sort of training or labeled measurements.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The appeal and clear operational and economic benefits of anycast to service providers have motivated a number of recent experimental studies on its potential performance impact for end users. For CDNs on mobile networks, in particular, anycast provides a simpler alternative to existing request routing systems challenged by a growing, complex, and commonly opaque cellular infrastructure. This paper presents the first analysis of anycast performance for mobile users. In particular, our evaluation focuses on two distinct anycast services, both providing part of the DNS Root zone and together covering all major geographical regions. Our results show that mobile clients are routed to suboptimal replicas in terms of geographical distance and associated latencies, more frequently while on a cellular connection than on WiFi, with a significant impact on performance. We find that this is not simply an issue of lacking better alternatives, and that the problem is not specific to particular geographic areas or autonomous systems. We close with a first analysis of the root causes of this phenomenon and describe some of the major classes of anycast anomalies revealed during our study, additionally including a systematic approach to automatically detect such anomalies without any sort of training or labeled measurements. |
John P. Rula, Fabián E. Bustamante, James Newman, Arash Molavi Khaki, Dave Choffnes Mile High WiFI: A First Look At In-Flight Internet Connectivity Conference The Web Conference (WWW), 2018. @conference{rula:mhwifi, title = {Mile High WiFI: A First Look At In-Flight Internet Connectivity}, author = {John P. Rula and Fabián E. Bustamante and James Newman and Arash Molavi Khaki and Dave Choffnes}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula-WWW18.pdf}, year = {2018}, date = {2018-04-03}, booktitle = {The Web Conference (WWW)}, journal = {Proc. of WWW}, abstract = {In-Flight Communication (IFC), which can be purchased on a growing number of commercial flights, is often received by consumers with both awe for its mere availability and harsh criticism for its poor performance. Indeed, IFC provides Internet connectivity in some of the most challenging conditions with aircraft traveling at speeds in excess of 500 mph at 30,000 feet above the ground. Yet, while existing services do provide basic Interneaccessibility, anecdotal reports rank their quality of service as, at best, poor. In this paper, we present the first characterization of deployed IFC systems. Using over 45 flight-hours of measurements, we profile the performance of IFC across the two dominant access technologies -- direct air-to-ground communication (DA2GC) and mobile satellite service (MSS). We show that IFC QoS is in large part determined by the high latencies inherent to DA2GC and MSS, with RTTs averaging 200ms and 750ms, respectively, and that these high latencies directly impact the performance of common applications such as web browsing. While each IFC technology is based on well studied wireless communication technologies, our findings reveal that IFC links experience further degraded link performance than their technological antecedents. We find median loss rates of 7%, and nearly 40% loss at the 90th percentile for MSS, an order of magnitude larger than recent characterizations of residential satellite networks. We extend our IFC study exploring the potential of the newly released HTTP/2 and QUIC protocols in an emulated IFC environmen, finding that QUIC is able to improve page load times by as much as 7.9 times. In addition, we find that HTTP/2's use of multiplexing multiple requests onto a single TCP connection performs up to 4.8x worse than HTTP/1.1 when faced with large numbers of objects. We use network emulation to explore proposed technological improvements to existing IFC systems finding that high link losses account for the largest factor of performance degradation, and that to improving link bandwidth does little to improve the quality of experience for applications such as web browsing.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } In-Flight Communication (IFC), which can be purchased on a growing number of commercial flights, is often received by consumers with both awe for its mere availability and harsh criticism for its poor performance. Indeed, IFC provides Internet connectivity in some of the most challenging conditions with aircraft traveling at speeds in excess of 500 mph at 30,000 feet above the ground. Yet, while existing services do provide basic Interneaccessibility, anecdotal reports rank their quality of service as, at best, poor. In this paper, we present the first characterization of deployed IFC systems. Using over 45 flight-hours of measurements, we profile the performance of IFC across the two dominant access technologies -- direct air-to-ground communication (DA2GC) and mobile satellite service (MSS). We show that IFC QoS is in large part determined by the high latencies inherent to DA2GC and MSS, with RTTs averaging 200ms and 750ms, respectively, and that these high latencies directly impact the performance of common applications such as web browsing. While each IFC technology is based on well studied wireless communication technologies, our findings reveal that IFC links experience further degraded link performance than their technological antecedents. We find median loss rates of 7%, and nearly 40% loss at the 90th percentile for MSS, an order of magnitude larger than recent characterizations of residential satellite networks. We extend our IFC study exploring the potential of the newly released HTTP/2 and QUIC protocols in an emulated IFC environmen, finding that QUIC is able to improve page load times by as much as 7.9 times. In addition, we find that HTTP/2's use of multiplexing multiple requests onto a single TCP connection performs up to 4.8x worse than HTTP/1.1 when faced with large numbers of objects. We use network emulation to explore proposed technological improvements to existing IFC systems finding that high link losses account for the largest factor of performance degradation, and that to improving link bandwidth does little to improve the quality of experience for applications such as web browsing. |
2017 |
John P. Rula, Fabián E. Bustamante, Moritz Steiner Cell Spotting -- Studying the Role of Cellular Networks in the Internet Conference Internet Measurement Conference (IMC), 2017. @conference{CellSpotting, title = {Cell Spotting -- Studying the Role of Cellular Networks in the Internet}, author = {John P. Rula and Fabián E. Bustamante and Moritz Steiner}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/census.pdf}, year = {2017}, date = {2017-11-03}, booktitle = {Internet Measurement Conference (IMC)}, journal = {In Proc. of IMC}, abstract = {The increasingly dominant role of the mobile Internet and its economic implications have been the topic of several stud- ies and surveys from industry and academia. Most previous work has focused on mobile devices, as a whole, independently of their connectivity, and taken the limited perspectives of either a few individual handsets or a single operator. We lack a comprehensive and global view of cellular net- works, their scope, configurations and usage. In this paper, we present a comprehensive analysis of global cellular networks. We describe an approach to accurately identify cellular network IP addresses using the Network Information API, a non-standard Javascipt API in several mobile browsers, and show its effectiveness in a range cellular network configurations. We combine this approach with the vantage point of one of the world’s largest CDNs, with over 200,000 servers in 1,450 networks and clients in over 46,000 ASes across 245 countries, to characterize cellular access around the globe. We discover over 350 thousand /24 and 23 thousand /48 cellular IPv4 and IPv6 prefixes respectively. We find that the majority of cellular networks exist as mixed networks (i.e., networks that share both fixline and cellular devices), requiring prefix – not ASN – level identification. By utilizing addresses level traffic from the same CDN, we calculate the fraction of traffic coming from cellular addresses. Overall we find that cellular traffic comprises 16.2% of the CDN’s global traffic, and that cellular traffic ranges widely in importance between countries, from cap- turing nearly 96% of all traffic in Ghana to just 12.1% in France.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The increasingly dominant role of the mobile Internet and its economic implications have been the topic of several stud- ies and surveys from industry and academia. Most previous work has focused on mobile devices, as a whole, independently of their connectivity, and taken the limited perspectives of either a few individual handsets or a single operator. We lack a comprehensive and global view of cellular net- works, their scope, configurations and usage. In this paper, we present a comprehensive analysis of global cellular networks. We describe an approach to accurately identify cellular network IP addresses using the Network Information API, a non-standard Javascipt API in several mobile browsers, and show its effectiveness in a range cellular network configurations. We combine this approach with the vantage point of one of the world’s largest CDNs, with over 200,000 servers in 1,450 networks and clients in over 46,000 ASes across 245 countries, to characterize cellular access around the globe. We discover over 350 thousand /24 and 23 thousand /48 cellular IPv4 and IPv6 prefixes respectively. We find that the majority of cellular networks exist as mixed networks (i.e., networks that share both fixline and cellular devices), requiring prefix – not ASN – level identification. By utilizing addresses level traffic from the same CDN, we calculate the fraction of traffic coming from cellular addresses. Overall we find that cellular traffic comprises 16.2% of the CDN’s global traffic, and that cellular traffic ranges widely in importance between countries, from cap- turing nearly 96% of all traffic in Ghana to just 12.1% in France. |
Zachary S. Bischof, Fabián E. Bustamante, Nick Feamster. Characterizing and Improving the Reliability of Broadband Internet Access Online arXiv.org 2017. @online{bischof:breliability, title = {Characterizing and Improving the Reliability of Broadband Internet Access}, author = {Zachary S. Bischof and Fabián E. Bustamante and Nick Feamster.}, url = {https://arxiv.org/abs/1709.09349}, year = {2017}, date = {2017-09-03}, organization = {arXiv.org}, abstract = {In this paper, we empirically demonstrate the growing importance of reliability by measuring its effect on user behavior. We present an approach for broadband reliability characterization using data collected by many emerging national initiatives to study broadband and apply it to the data gathered by the Federal Communications Commission's Measuring Broadband America project. Motivated by our findings, we present the design, implementation, and evaluation of a practical approach for improving the reliability of broadband Internet access with multihoming }, keywords = {}, pubstate = {published}, tppubtype = {online} } In this paper, we empirically demonstrate the growing importance of reliability by measuring its effect on user behavior. We present an approach for broadband reliability characterization using data collected by many emerging national initiatives to study broadband and apply it to the data gathered by the Federal Communications Commission's Measuring Broadband America project. Motivated by our findings, we present the design, implementation, and evaluation of a practical approach for improving the reliability of broadband Internet access with multihoming |
Zachary S. Bischof, Fabián E. Bustamante, Rade Stanojevic The utility argument — Making a case for broadband SLAs Conference Passive and Active Measurement (PAM), 2017. @conference{bischof:sla, title = {The utility argument — Making a case for broadband SLAs}, author = {Zachary S. Bischof and Fabián E. Bustamante and Rade Stanojevic}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/PAM17_Bischof.pdf}, year = {2017}, date = {2017-03-03}, booktitle = {Passive and Active Measurement (PAM)}, journal = {In Proc. of PAM}, abstract = {Most residential broadband services are described in terms of their maximum potential throughput rate, often advertised as having speeds "up to X Mbps". Though such promises are often met, they are fairly limited in scope and, unfortunately, there is no basis for an appeal if a customer were to receive compromised quality of service. While this 'best effort' model was sufficient in the early days, we argue that as broadband customers and their devices become more dependent on Internet connectivity, we will see an increased demand for more encompassing Service Level Agreements (SLA). In this paper, we study the design space of broadband SLAs and explore some of the trade-offs between the level of strictness of SLAs and the cost of delivering them. We argue that certain SLAs could be offered almost immediately with minimal impact on retail prices, and that ISPs (or third parties) could accurately infer the risk of offering SLA to individual customers – with accuracy comparable to that in the car or credit insurance industry – and price the SLA service accordingly.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Most residential broadband services are described in terms of their maximum potential throughput rate, often advertised as having speeds "up to X Mbps". Though such promises are often met, they are fairly limited in scope and, unfortunately, there is no basis for an appeal if a customer were to receive compromised quality of service. While this 'best effort' model was sufficient in the early days, we argue that as broadband customers and their devices become more dependent on Internet connectivity, we will see an increased demand for more encompassing Service Level Agreements (SLA). In this paper, we study the design space of broadband SLAs and explore some of the trade-offs between the level of strictness of SLAs and the cost of delivering them. We argue that certain SLAs could be offered almost immediately with minimal impact on retail prices, and that ISPs (or third parties) could accurately infer the risk of offering SLA to individual customers – with accuracy comparable to that in the car or credit insurance industry – and price the SLA service accordingly. |
Fabián E. Bustamante, David Clark, Nick Feamster Workshop on Tracking Quality of Experience in the Internet: Summary and Outcomes Journal Article SIGCOMM Computer Communication Review (CCR), 47 (1), 2017. @article{bustamante:qoe, title = {Workshop on Tracking Quality of Experience in the Internet: Summary and Outcomes}, author = {Fabián E. Bustamante and David Clark and Nick Feamster}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/p55-bustamante.pdf}, year = {2017}, date = {2017-01-03}, journal = {SIGCOMM Computer Communication Review (CCR)}, volume = {47}, number = {1}, abstract = {This is a report on the Workshop on Tracking Quality of Experience in the Internet, held at Princeton, October 21– 22, 2015, jointly sponsored by the National Science Foundation and the Federal Communication Commission. The term Quality of Experience (QoE) describes a user’s subjective assessment of their experience when using a particular application. In the past, network engineers have typically focused on Quality of Service (QoS): performance metrics such as throughput, delay and jitter, packet loss, and the like. Yet, performance as measured by QoS parameters only matters if it affects the experience of users, as they attempt to use a particular application. Ultimately, the user’s experience is determined by QoE impairments (e.g., rebuffering). Although QoE and QoS are related—for example, a video rebuffering event may be caused by high packet-loss rate— QoE metrics ultimately affect a user’s experience. Identifying the causes of QoE impairments is complex, since the impairments may arise in one or another region of the network, in the home network, on the user’s device, in servers that are part of the application, or in supporting services such as the DNS. Additionally, metrics for QoE continue to evolve, as do the methods for relating QoE impairments to underlying causes that could be measurable using standard network measurement techniques. Finally, as the capabilities of the underlying network infrastructure continues to evolve, researchers should also consider how to design infrastructure and tools can best support measurements that can better identify the locations and causes of QoE impairments. The workshop's aim was to understand the current state of QoE research and to contemplate a community agenda to integrate ongoing threads of QoE research into a collaboration. This summary report describes the topics discussed and summarize the key points of the discussion. }, keywords = {}, pubstate = {published}, tppubtype = {article} } This is a report on the Workshop on Tracking Quality of Experience in the Internet, held at Princeton, October 21– 22, 2015, jointly sponsored by the National Science Foundation and the Federal Communication Commission. The term Quality of Experience (QoE) describes a user’s subjective assessment of their experience when using a particular application. In the past, network engineers have typically focused on Quality of Service (QoS): performance metrics such as throughput, delay and jitter, packet loss, and the like. Yet, performance as measured by QoS parameters only matters if it affects the experience of users, as they attempt to use a particular application. Ultimately, the user’s experience is determined by QoE impairments (e.g., rebuffering). Although QoE and QoS are related—for example, a video rebuffering event may be caused by high packet-loss rate— QoE metrics ultimately affect a user’s experience. Identifying the causes of QoE impairments is complex, since the impairments may arise in one or another region of the network, in the home network, on the user’s device, in servers that are part of the application, or in supporting services such as the DNS. Additionally, metrics for QoE continue to evolve, as do the methods for relating QoE impairments to underlying causes that could be measurable using standard network measurement techniques. Finally, as the capabilities of the underlying network infrastructure continues to evolve, researchers should also consider how to design infrastructure and tools can best support measurements that can better identify the locations and causes of QoE impairments. The workshop's aim was to understand the current state of QoE research and to contemplate a community agenda to integrate ongoing threads of QoE research into a collaboration. This summary report describes the topics discussed and summarize the key points of the discussion. |
2016 |
Dipendra Jha, John P. Rula, Fabián E. Bustamante eXploring Xfinity: A First Look at Provider-Enabled Community Networks Conference Passive and Active Measurement (PAM), 2016. @conference{jha:xfinity, title = { eXploring Xfinity: A First Look at Provider-Enabled Community Networks}, author = {Dipendra Jha, John P. Rula, Fabián E. Bustamante }, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/pam-xfinity.pdf}, year = {2016}, date = {2016-03-03}, booktitle = {Passive and Active Measurement (PAM)}, journal = { In Proc. PAM}, abstract = {Several broadband providers have been offering community WiFi as an additional service for existing customers and paid subscribers. These community networks provide Internet connectivity on the go for mobile devices and a path to offload cellular traffic. Rather than deploying new infrastructure or relying on the resources of an organized community, these provider-enabled community WiFi services leverage the existing hardware and connections of their customers. The past few years have seen a significant growth in their popularity and coverage and some municipalities and institutions have started to considered them as the basis for public Internet access. In this paper, we present the first characterization of one such service – the Xfinity Community WiFi network. Taking the perspectives of the home-router owner and the public hotspot user, we characterize the performance and availability of this service in urban and suburban settings, at different times, between September, 2014 and 2015. Our results highlight the challenges of providing these services in urban environments considering the tensions between coverage and interference, large obstructions and high population densities. Through a series of controlled experiments, we measure the impact to hosting customers, finding that in certain cases, the use of the public hotspot can degrade host network throughput by up-to 67% under high traffic on the public hotspot.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Several broadband providers have been offering community WiFi as an additional service for existing customers and paid subscribers. These community networks provide Internet connectivity on the go for mobile devices and a path to offload cellular traffic. Rather than deploying new infrastructure or relying on the resources of an organized community, these provider-enabled community WiFi services leverage the existing hardware and connections of their customers. The past few years have seen a significant growth in their popularity and coverage and some municipalities and institutions have started to considered them as the basis for public Internet access. In this paper, we present the first characterization of one such service – the Xfinity Community WiFi network. Taking the perspectives of the home-router owner and the public hotspot user, we characterize the performance and availability of this service in urban and suburban settings, at different times, between September, 2014 and 2015. Our results highlight the challenges of providing these services in urban environments considering the tensions between coverage and interference, large obstructions and high population densities. Through a series of controlled experiments, we measure the impact to hosting customers, finding that in certain cases, the use of the public hotspot can degrade host network throughput by up-to 67% under high traffic on the public hotspot. |
John Rula, Fabián E. Bustamante, David R. Choffnes When IPs Fly: A Case for Redefining Airline Communication Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2016. @workshop{rula:ipsfly, title = {When IPs Fly: A Case for Redefining Airline Communication}, author = {John Rula and Fabián E. Bustamante and David R. Choffnes}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/airline.pdf}, year = {2016}, date = {2016-02-03}, booktitle = {International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {In Proc. HotMobile}, abstract = {The global airline industry conducted over 33 million flights in 2014 alone, carrying over 3.3 billion passengers. Surpris- ingly, the traffic management system handling this flight volume communicates over either VHF audio transmissions or plane transponders, exhibiting several seconds of latency and single bits per second of throughput. There is a general consensus that for the airline industry to serve the growing demand will require of significant improvements to the air traffic management system; we believe that many of these improvements can leverage the past two decades of mobile networking research. In this paper, we make the case that moving to a common IP-based data channel to support flight communication can radically change the airline industry. While there remain many challenges to achieve this vision, we believe that such a shift can greatly improve the rate of innovation, overall efficiency of global air traffic management, enhance aircraft safety and create new applications that leverage the capability of an advanced data channel. Through preliminary measurements on existing in-flight Internet communication systems, we show that existing in-flight connectivity achieves order of magnitude higher throughput and lower latency than current systems, and operates as a highly reliable and available data link. This position paper takes a first look at the opportunity for IP-based flight communication, and identifies several promising research areas in this space.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } The global airline industry conducted over 33 million flights in 2014 alone, carrying over 3.3 billion passengers. Surpris- ingly, the traffic management system handling this flight volume communicates over either VHF audio transmissions or plane transponders, exhibiting several seconds of latency and single bits per second of throughput. There is a general consensus that for the airline industry to serve the growing demand will require of significant improvements to the air traffic management system; we believe that many of these improvements can leverage the past two decades of mobile networking research. In this paper, we make the case that moving to a common IP-based data channel to support flight communication can radically change the airline industry. While there remain many challenges to achieve this vision, we believe that such a shift can greatly improve the rate of innovation, overall efficiency of global air traffic management, enhance aircraft safety and create new applications that leverage the capability of an advanced data channel. Through preliminary measurements on existing in-flight Internet communication systems, we show that existing in-flight connectivity achieves order of magnitude higher throughput and lower latency than current systems, and operates as a highly reliable and available data link. This position paper takes a first look at the opportunity for IP-based flight communication, and identifies several promising research areas in this space. |
2015 |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger A measurement experimentation platform at the Internet’s edge Journal Article IEEE/ACM Transactions on Networking (TON), 23 (6), 2015. @article{sanchez:dasu-ton, title = {A measurement experimentation platform at the Internet’s edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/ton-dasu.pdf}, year = {2015}, date = {2015-12-01}, journal = {IEEE/ACM Transactions on Networking (TON)}, volume = {23}, number = {6}, abstract = {Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent, well-known problem has served as motivation for numerous proposals to build or extend existing Internet measurement platforms by recruiting larger, more diverse vantage points. Capturing the edge of the network, however, remains an elusive goal. We argue that at its root the problem is one of incentives. Today's measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present a measurement experimentation platform that reaches the network edge by explicitly aligning the objectives of the experimenters with those of the users hosting the platform. Dasu -- our current prototype -- is designed to support both network measurement experimentation and broadband characterization. Dasu has been publicly available since July 2010 and is currently in use by over 100K users with a heterogeneous set of connections spreading across 2,431 networks and 166 countries. We discuss some of the challenges we faced building and using a platform for the Internet's edge, describe its design and implementation, and illustrate the unique perspective its current deployment brings to Internet measurement.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent, well-known problem has served as motivation for numerous proposals to build or extend existing Internet measurement platforms by recruiting larger, more diverse vantage points. Capturing the edge of the network, however, remains an elusive goal. We argue that at its root the problem is one of incentives. Today's measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present a measurement experimentation platform that reaches the network edge by explicitly aligning the objectives of the experimenters with those of the users hosting the platform. Dasu -- our current prototype -- is designed to support both network measurement experimentation and broadband characterization. Dasu has been publicly available since July 2010 and is currently in use by over 100K users with a heterogeneous set of connections spreading across 2,431 networks and 166 countries. We discuss some of the challenges we faced building and using a platform for the Internet's edge, describe its design and implementation, and illustrate the unique perspective its current deployment brings to Internet measurement. |
Zachary S. Bischof, John P. Rula, Fabián E. Bustamante In and Out of Cuba: Characterizing Cuba's Connectivity Conference Internet Measurement Conference (IMC), 2015. @conference{Cuba, title = {In and Out of Cuba: Characterizing Cuba's Connectivity}, author = {Zachary S. Bischof and John P. Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/imc207s-bischofA.pdf}, year = {2015}, date = {2015-10-03}, booktitle = {Internet Measurement Conference (IMC)}, journal = {In Proc. of IMC}, abstract = {The goal of our work is to characterize the current state of Cuba's access to the wider Internet. This work is motivated by recent improvements in connectivity to the island and the growing commercial interest following the ease of restrictions on travel and trade with the US. In this paper, we profile Cuba’s networks, their connections to the rest of the world, and the routes of international traffic going to and from the island. Despite the addition of the ALBA-1 submarine cable, we find that round trip times to websites hosted off the island remain very high; pings to popular websites frequently took over 300 ms. We also find a high degree of path asymmetry in traffic to/from Cuba. Specifically, in our analysis we find that traffic going out of Cuba typically travels through the ALBA-1 cable, but, surprisingly, traffic on the reverse path often traverses high-latency satellite links, adding over 200 ms to round trip times. Last, we analyze queries to public DNS servers and SSL certificate requests to characterize the availability of network services in Cuba.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The goal of our work is to characterize the current state of Cuba's access to the wider Internet. This work is motivated by recent improvements in connectivity to the island and the growing commercial interest following the ease of restrictions on travel and trade with the US. In this paper, we profile Cuba’s networks, their connections to the rest of the world, and the routes of international traffic going to and from the island. Despite the addition of the ALBA-1 submarine cable, we find that round trip times to websites hosted off the island remain very high; pings to popular websites frequently took over 300 ms. We also find a high degree of path asymmetry in traffic to/from Cuba. Specifically, in our analysis we find that traffic going out of Cuba typically travels through the ALBA-1 cable, but, surprisingly, traffic on the reverse path often traverses high-latency satellite links, adding over 200 ms to round trip times. Last, we analyze queries to public DNS servers and SSL certificate requests to characterize the availability of network services in Cuba. |
Mario A. Sánchez, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger Experiment coordination for large-scale measurement platforms Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. @workshop{sanchez:coordination, title = {Experiment coordination for large-scale measurement platforms}, author = {Mario A. Sánchez and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/c2b16-sanchezAPT.pdf}, year = {2015}, date = {2015-08-06}, booktitle = {ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D)}, journal = {In Proc. Sigcomm C2B(I)D Workshop}, abstract = {The risk of placing an undesired load on networks and networked services through probes originating from measurement platforms has always been present. While several scheduling schemes have been proposed to avoid undue loads or DDoS-like effects from uncontrolled experiments, the motivation scenarios for such schemes have generally been considered “sufficiently unlikely” and safely ignored by most existing measurement platforms. We argue that the growth of large, crowdsourced measurement systems means we cannot ignore this risk any longer. In this paper we expand on our original lease-based coordination scheme designed for measurement platforms that embrace crowdsourcing as their method-of-choice. We compare it with two alternative strategies currently implemented by some of the existing crowdsourced measurement platforms: centralized rate-limiting and individual rate limiting. Our preliminary results show that our solution outperforms these two naive strategies for coordination according to at least two different intuitive metrics: resource utilization and bound compliance. We find that our scheme efficiently allows the scalable and effective coordination of measurements among potentially thousands of hosts while providing individual clients with enough flexibility to act on their own. }, keywords = {}, pubstate = {published}, tppubtype = {workshop} } The risk of placing an undesired load on networks and networked services through probes originating from measurement platforms has always been present. While several scheduling schemes have been proposed to avoid undue loads or DDoS-like effects from uncontrolled experiments, the motivation scenarios for such schemes have generally been considered “sufficiently unlikely” and safely ignored by most existing measurement platforms. We argue that the growth of large, crowdsourced measurement systems means we cannot ignore this risk any longer. In this paper we expand on our original lease-based coordination scheme designed for measurement platforms that embrace crowdsourcing as their method-of-choice. We compare it with two alternative strategies currently implemented by some of the existing crowdsourced measurement platforms: centralized rate-limiting and individual rate limiting. Our preliminary results show that our solution outperforms these two naive strategies for coordination according to at least two different intuitive metrics: resource utilization and bound compliance. We find that our scheme efficiently allows the scalable and effective coordination of measurements among potentially thousands of hosts while providing individual clients with enough flexibility to act on their own. |
John P. Rula, Zachary S. Bischof, Fabián E. Bustamante Second Chance - Understanding diversity in broadband access network performance Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. @workshop{rula:bdiversity, title = {Second Chance - Understanding diversity in broadband access network performance}, author = {John P. Rula and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/c2b15-rula.pdf}, year = {2015}, date = {2015-08-03}, booktitle = {ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D)}, journal = {In Proc. Sigcomm C2B(I)D workshop}, abstract = {In recognition of the increasing importance of broadband, several governments have embarked on large-scale efforts to measure broadband services from devices within end-user’s homes. Participants for these studies were selected based on features that, a priori, were thought to be relevant to service performance such as geographic region, access technology and subscription level. Every new-year deployment since has followed the same model, ensuring that the number of measurement points remains stable despite the natural churn. In this paper, we start to explore the issue of vantage point selection in residential broadband networks by lever- aging the publicly available datasets collected as part of the FCC Broadband America study. We present the first analysis of the variation of performance in edge networks and diversity of individual vantage points. We explore the underlying causes of this diversity through a factor analysis of contextual factors within an ISP such as the geographic location of subscribers. The goal of this analysis is to inform additional deployments in ongoing studies, and guide the design and deployment of future investigations into broadband networks.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } In recognition of the increasing importance of broadband, several governments have embarked on large-scale efforts to measure broadband services from devices within end-user’s homes. Participants for these studies were selected based on features that, a priori, were thought to be relevant to service performance such as geographic region, access technology and subscription level. Every new-year deployment since has followed the same model, ensuring that the number of measurement points remains stable despite the natural churn. In this paper, we start to explore the issue of vantage point selection in residential broadband networks by lever- aging the publicly available datasets collected as part of the FCC Broadband America study. We present the first analysis of the variation of performance in edge networks and diversity of individual vantage points. We explore the underlying causes of this diversity through a factor analysis of contextual factors within an ISP such as the geographic location of subscribers. The goal of this analysis is to inform additional deployments in ongoing studies, and guide the design and deployment of future investigations into broadband networks. |
John P. Rula, Fabián E. Bustamante Crowdsensing Under (Soft) Control Conference IEEE INFOCOM, 2015. @conference{rula:softcontrol, title = {Crowdsensing Under (Soft) Control}, author = {John P. Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/csc.pdf}, year = {2015}, date = {2015-04-03}, booktitle = {IEEE INFOCOM}, journal = { In Proc. of INFOCOM}, abstract = {Crowdsensing leverages the pervasiveness and power of mobile devices such as smartphones and tablets, to enable ordinary citizens to collect, transport and verify data. Application domains range from environment monitoring, to infrastructure management and social computing. Crowdsensing services' effectiveness is a direct result of their coverage, which is driven by the recruitment and mobility patterns of participants. Due to the population distribution of most areas, and the regular mobility patterns of participants, less popular or populated areas suffer from poor coverage. In this paper, we present Crowd Soft Control (CSC), an approach to exert limited control over the actions of participants by leveraging the built-in incentives of location-based gaming and social applications. By pairing community sensing with location-based applications, CSC allows sensing services to reuse the incentives of location-based apps to steer the actions of participating users and increase the effectiveness of sensing campaigns. While there are several domains where this intentional movement is useful such as data muling, the paper presents the design, implementation and evaluation of CSC applied to crowdsensing. We built a prototype of CSC and integrated it with two location-based applications, and crowdsensing services. Experimental results demonstrate the low-cost of integration and minimal overhead of CSC.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Crowdsensing leverages the pervasiveness and power of mobile devices such as smartphones and tablets, to enable ordinary citizens to collect, transport and verify data. Application domains range from environment monitoring, to infrastructure management and social computing. Crowdsensing services' effectiveness is a direct result of their coverage, which is driven by the recruitment and mobility patterns of participants. Due to the population distribution of most areas, and the regular mobility patterns of participants, less popular or populated areas suffer from poor coverage. In this paper, we present Crowd Soft Control (CSC), an approach to exert limited control over the actions of participants by leveraging the built-in incentives of location-based gaming and social applications. By pairing community sensing with location-based applications, CSC allows sensing services to reuse the incentives of location-based apps to steer the actions of participating users and increase the effectiveness of sensing campaigns. While there are several domains where this intentional movement is useful such as data muling, the paper presents the design, implementation and evaluation of CSC applied to crowdsensing. We built a prototype of CSC and integrated it with two location-based applications, and crowdsensing services. Experimental results demonstrate the low-cost of integration and minimal overhead of CSC. |
John Rula, Byungjin Jun, Fabián E. Bustamante Mobile AD(D): Estimating Mobile App Session Times for Better Ads Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2015. @workshop{rula:appt, title = {Mobile AD(D): Estimating Mobile App Session Times for Better Ads}, author = {John Rula and Byungjin Jun and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/hot81-rula.pdf}, year = {2015}, date = {2015-02-03}, booktitle = {International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {In Proc. HotMobile}, abstract = {While mobile advertisements are the dominant source of revenue for mobile apps, the usage patterns of mobile users, and thus their engagement times, may be in conflict with the effectiveness of these ads. With any given application, a user may engage for anywhere between a few seconds to several minutes depending on a number of factors such as their location and goals. Despite the resulting wide-range of session times, the current nature of ad auctions dictates that ads are priced and sold prior to actual viewing, that is regardless of the actual display time. We argue that the wealth of easy-to-gather contextual information on mobile devices is sufficient to make better choices by effectively predicting exposure time. We analyze mobile device usage patterns with a detailed two-week long user study of 37 users in the US and South Korea. After characterizing application session times, we use factor analysis to derive a simple predictive model and show that this model is able to offer improved accuracy compared to mean session time over 90% of the time. We make the case for including predicted ad exposure duration in the price of mobile advertisements and posit that such information could significantly improve the effectiveness of mobile advertisement, giving publishers the ability to tune campaigns for engagement length and enabling a more efficient market for ad impressions, select appropriate media for an ad impression and lowering the cost to users including network utilization and device power.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } While mobile advertisements are the dominant source of revenue for mobile apps, the usage patterns of mobile users, and thus their engagement times, may be in conflict with the effectiveness of these ads. With any given application, a user may engage for anywhere between a few seconds to several minutes depending on a number of factors such as their location and goals. Despite the resulting wide-range of session times, the current nature of ad auctions dictates that ads are priced and sold prior to actual viewing, that is regardless of the actual display time. We argue that the wealth of easy-to-gather contextual information on mobile devices is sufficient to make better choices by effectively predicting exposure time. We analyze mobile device usage patterns with a detailed two-week long user study of 37 users in the US and South Korea. After characterizing application session times, we use factor analysis to derive a simple predictive model and show that this model is able to offer improved accuracy compared to mean session time over 90% of the time. We make the case for including predicted ad exposure duration in the price of mobile advertisements and posit that such information could significantly improve the effectiveness of mobile advertisement, giving publishers the ability to tune campaigns for engagement length and enabling a more efficient market for ad impressions, select appropriate media for an ad impression and lowering the cost to users including network utilization and device power. |
2014 |
Arnau Gavaldà-Miralles, John S. Otto, Fabián E. Bustamante, Luís A. N. Amaral, Jordi Duch, Roger Guimerà User behavior and change: File sharers and copyright laws Conference International Conference on emerging Networking EXperiments and Technologies (CoNEXT), 2014. @conference{gavalda:p2pbehaviour, title = {User behavior and change: File sharers and copyright laws}, author = {Arnau Gavaldà-Miralles and John S. Otto and Fabián E. Bustamante and Luís A. N. Amaral and Jordi Duch and Roger Guimerà}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/conext14.pdf}, year = {2014}, date = {2014-12-06}, booktitle = {International Conference on emerging Networking EXperiments and Technologies (CoNEXT)}, journal = {In Proc. of CoNEXT}, abstract = {Though the impact of file-sharing of copyrighted content has been discussed for over a decade, only in the past few years have countries begun to adopt legislation to criminalize this behavior. These laws impose penalties ranging from warnings and monetary fines to disconnecting Internet service. While their supporters are quick to point out trends showing the efficacy of these laws at reducing use of file-sharing sites, their analyses rely on brief snapshots of activity that cannot reveal long- and short-term trends. In this paper, we introduce an approach to model user behavior based on a hidden Markov model and apply it to analyze a two-year-long user-level trace of download activity of over 38k users from around the world. This approach allows us to quantify the true impact of file- sharing laws on user behavior, identifying behavioral trends otherwise difficult to identify. For instance, despite an initial reduction in activity in New Zealand when a three-strikes law took effect, after two months activity had returned to the level observed prior to the law being enacted. Given that punishment results only at best on short-term compliance, we suggest that incentives-based approaches may be more effective at changing user behavior. }, keywords = {}, pubstate = {published}, tppubtype = {conference} } Though the impact of file-sharing of copyrighted content has been discussed for over a decade, only in the past few years have countries begun to adopt legislation to criminalize this behavior. These laws impose penalties ranging from warnings and monetary fines to disconnecting Internet service. While their supporters are quick to point out trends showing the efficacy of these laws at reducing use of file-sharing sites, their analyses rely on brief snapshots of activity that cannot reveal long- and short-term trends. In this paper, we introduce an approach to model user behavior based on a hidden Markov model and apply it to analyze a two-year-long user-level trace of download activity of over 38k users from around the world. This approach allows us to quantify the true impact of file- sharing laws on user behavior, identifying behavioral trends otherwise difficult to identify. For instance, despite an initial reduction in activity in New Zealand when a three-strikes law took effect, after two months activity had returned to the level observed prior to the law being enacted. Given that punishment results only at best on short-term compliance, we suggest that incentives-based approaches may be more effective at changing user behavior. |
Mario A. Sánchez, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger, Georgios Smaragdakis, Jeffrey Erman Internet Inter-Domain Traffic Estimation for the Outsider Journal Article In Proc. of IMC, 2014. @article{Domain, title = {Internet Inter-Domain Traffic Estimation for the Outsider}, author = {Mario A. Sánchez and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger and Georgios Smaragdakis and Jeffrey Erman}, url = {https://www.aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/altps-camera-ready.pdf}, year = {2014}, date = {2014-11-09}, journal = {In Proc. of IMC}, abstract = {Characterizing the flow of Internet traffic is important in a wide range of contexts, from network engineering and application design to understanding the network impact of consumer demand and business relationships. Despite the growing interest, the nearly impossible task of collecting large-scale, Internet-wide traffic data has severely constrained the focus of traffic-related studies. In this paper, we introduce a novel approach to characterize inter-domain traffic by reusing large, publicly available traceroute datasets. Our approach builds on a simple insight -- the popularity of a route on the Internet can serve as an informative proxy for the volume of traffic it carries. It applies structural analysis to a dual-representation of the AS-level connectivity graph derived from available traceroute datasets. Drawing analogies with city grids and traffic, it adapts data transformations and metrics of route popularity from urban planning to serve as proxies for traffic volume. We call this approach Network Syntax, highlighting the connection to urban planning Space Syntax. We apply Network Syntax in the context of a global ISP and a large Internet eXchange Point and use ground-truth data to demonstrate the strong correlation (r^2 values of up to 0.9) between inter-domain traffic volume and the different proxy metrics. Working with these two network entities, we show the potential of Network Syntax for identifying critical links and inferring missing traffic matrix measurements.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Characterizing the flow of Internet traffic is important in a wide range of contexts, from network engineering and application design to understanding the network impact of consumer demand and business relationships. Despite the growing interest, the nearly impossible task of collecting large-scale, Internet-wide traffic data has severely constrained the focus of traffic-related studies. In this paper, we introduce a novel approach to characterize inter-domain traffic by reusing large, publicly available traceroute datasets. Our approach builds on a simple insight -- the popularity of a route on the Internet can serve as an informative proxy for the volume of traffic it carries. It applies structural analysis to a dual-representation of the AS-level connectivity graph derived from available traceroute datasets. Drawing analogies with city grids and traffic, it adapts data transformations and metrics of route popularity from urban planning to serve as proxies for traffic volume. We call this approach Network Syntax, highlighting the connection to urban planning Space Syntax. We apply Network Syntax in the context of a global ISP and a large Internet eXchange Point and use ground-truth data to demonstrate the strong correlation (r^2 values of up to 0.9) between inter-domain traffic volume and the different proxy metrics. Working with these two network entities, we show the potential of Network Syntax for identifying critical links and inferring missing traffic matrix measurements. |
Zachary S. Bischof, Fabián E. Bustamante, Rade Stanojevic Need, Want, or Can Afford - Broadband Markets and the Behavior of Users Inproceedings In Proc. of ACM IMC, 2014. @inproceedings{zbichof:broadband, title = {Need, Want, or Can Afford - Broadband Markets and the Behavior of Users}, author = {Zachary S. Bischof and Fabián E. Bustamante and Rade Stanojevic}, url = {http://www.aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/imc220-bischof.pdf}, year = {2014}, date = {2014-11-06}, booktitle = {In Proc. of ACM IMC}, journal = {In Proc. of IMC}, abstract = {We present the first study of broadband services in their broader context, evaluating the impact of service characteristics (such as capacity, latency and loss), their broadband pricing and user demand. We explore these relationships, beyond correlation, with the application of natural experiments. Most efforts on broadband service characterization have so far focused on performance and availability, yet we lack a clear understanding of how such services are being utilized and how their use is impacted by the particulars of the market. By analyzing over 23-months of data collected from 53,000 end hosts and residential gateways in 160 countries, along with a global survey of retail broadband plans, we empirically study the relationship between broadband service characteristics, pricing and demand. We show a strong correlation between capacity and demand, even though subscribers rarely fully utilize their links, but note a law of diminishing returns with relatively smaller increases in demand at higher capacities. Despite the fourfold increase in global IP traffic, we find that user demand on the network over a three year period remained constant for a given bandwidth capacity. We exploit natural experiments to examine the causality between these factors. The reported findings represent an important step towards understanding how user behavior, and the market features that shape it, affect broadband networks and the Internet at large.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } We present the first study of broadband services in their broader context, evaluating the impact of service characteristics (such as capacity, latency and loss), their broadband pricing and user demand. We explore these relationships, beyond correlation, with the application of natural experiments. Most efforts on broadband service characterization have so far focused on performance and availability, yet we lack a clear understanding of how such services are being utilized and how their use is impacted by the particulars of the market. By analyzing over 23-months of data collected from 53,000 end hosts and residential gateways in 160 countries, along with a global survey of retail broadband plans, we empirically study the relationship between broadband service characteristics, pricing and demand. We show a strong correlation between capacity and demand, even though subscribers rarely fully utilize their links, but note a law of diminishing returns with relatively smaller increases in demand at higher capacities. Despite the fourfold increase in global IP traffic, we find that user demand on the network over a three year period remained constant for a given bandwidth capacity. We exploit natural experiments to examine the causality between these factors. The reported findings represent an important step towards understanding how user behavior, and the market features that shape it, affect broadband networks and the Internet at large. |
Publications
2025 |
The Centralization of a Decentralized Video Platform – A First Characterization Of PeerTube Journal Article Forthcoming SIGCOMM CCR, Forthcoming. |
2024 |
“Of Choices and Control - A Comparative Analysis of Government Hosting Inproceedings Proc. of the ACM IMC, 2024. |
Ten years of the Venezuelan crisis - An Internet perspective Inproceedings Proc. of the ACM SIGCOMM, 2024. |
Beyond Proximity: Exploring Remote Cloud Peering Inproceedings Proc. of the ACM SIGCOMM, Poster Session, 2024. |
Towards Re-Architecting Today's Internet for Survivability: NSF Workshop Report Journal Article ACM SIGCOMM Computer Communication Review (CCR), 54 (1), pp. 36 - 47, 2024. |
Revealing Hidden Secrets: Decoding DNS PTR records with Large Language Models Inproceedings Proc. of the ACM SIGCOMM, Poster Session, 2024. |
A hop away from everywhere: A view of the intercontinental long-haul infrastructure Inproceedings Proc. of ACM SIGMETRICS, 2024. |
2023 |
Destination Unreachable: Characterizing Internet Outages and Shutdowns Inproceedings Proc. of ACM SIGCOMM, 2023. |
Each at its own pace: Third-party Dependency and Centralization Around the World Inproceedings Proc. of ACM SIGMETRICS, 2023. |
as2org+ : Enriching AS-to-Organization Mappings with PeeringDB Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2023. |
2022 |
Global Mobile Network Aggregators: Taxonomy, Roaming Performance and Optimization Inproceedings Proc. of ACM International Conference on Mobile Systems, Applications, and Services (MobiSys), 2022. |
BatteryLab: A Collaborative Platform for Power Monitoring Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2022. |
Jitterbug: A new framework for jitter-based congestion inference Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2022. |
Quantifying Nations' Exposure to Traffic Observation and Selective Tampering Inproceedings Proc. of the Passive and Active Measurement Conference (PAM), 2022. |
Reining in Mobile Web Performance with Document and Permission Policies Inproceedings Proc. of International Workshop on Mobile Computing Systems and Applications (HotMobile), 2022. |
2021 |
Workshop on Overcoming Measurement Barriers to Internet Research (WOMBIR 2021) final report. Journal Article SIGCOMM Comput. Commun. Rev., 51 (3), pp. 33-40, 2021. |
Identifying ASes of State-Owned Internet Operators Inproceedings Proc. of the ACM Internet Measurement Conference (IMC), 2021. |
Networked Systems as Witnesses - Association Between Content Demand, Human Mobility and an Infection Spread Inproceedings Proc. of ACM Internet Measurement Conference (IMC), 2021. |
WebTune: A Distributed Platform for Web Performance Measurements Inproceedings Proc. of the Network Traffic Measurement and Analysis Conference (TMA), 2021. |
Insights from Operating an IP Exchange Provider Inproceedings Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM), 2021. |
Decentralization, privacy and performance for DNS Inproceedings Proc. of ACM Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication (SIGCOMM) - Poster - Winner SIGCOMM ACM SRC Competition, 2021. |
2020 |
Back in control -- An extensible middle-box on your phone Technical Report arXiv (arXiv:2012.07695), 2020. |
Mind the Delay: The Adverse Effects of Delay-Based TCP on HTTP Inproceedings Proc. of ACM International Conference on emerging Networking EXperiments and Technologies (CoNEXT) , 2020. |
A first look at the IP eXchange Ecosystem Journal Article ACM SIGCOMM Computer Communication Review (CCR), 50 (4), 2020. |
Out of Sight, Not Out of Mind - A User-View on the Criticality of the Submarine Cable Network Inproceedings Proc. of ACM Internet Measurement Conference (IMC), 2020. |
Where Things Roam: Uncovering Cellular IoT/M2M Connectivity Inproceedings Proc. of ACM Internet Measurement Conference (IMC), 2020. |
SwarmTalk - Towards Benchmark Software Suites for Swarm Robotics Platforms Inproceedings Proc. of the International Conference on Autonomous Agents and Multiagent Systems (AAMAS), 2020. |
2019 |
BatteryLab, a distributed power monitoring platform for mobile devices: demo abstract Inproceedings Proc. of the ACM Conference on Embedded Networked Sensor Systems (SensSys), 2019. |
Scaling up your web experience, everywhere Workshop Proc. of the International Workshop on Mobile Computing Systems and Applications (HotMobile), 2019. |
The Value of First Impressions: The Impact of Ad-Blocking on Web QoE Inproceedings Proc. of the Passive and Active Measurement (PAM), 2019. |
AMP up your Mobile Web Experience: Characterizing the Impact of Google’s Accelerated Mobile Project Conference Proc. of the Annual International Conference on Mobile Computing and Networking (MobiCom), 2019. |
2018 |
Untangling the world-wide mesh of undersea cables Workshop ACM Workshop on Hot Topics in Networks (HotNets), 2018. |
The Growing Importance of Being Always On -- A first look at the reliability of broadband Internet access Conference Research Conference on Communication, Information and Internet Policy (TPRC), 2018. |
Anycast on the Move: A Look at Mobile Anycast Performance, Conference Network Traffic Measurement and Analysis Conference (TMA), 2018. |
Mile High WiFI: A First Look At In-Flight Internet Connectivity Conference The Web Conference (WWW), 2018. |
2017 |
Cell Spotting -- Studying the Role of Cellular Networks in the Internet Conference Internet Measurement Conference (IMC), 2017. |
Characterizing and Improving the Reliability of Broadband Internet Access Online arXiv.org 2017. |
The utility argument — Making a case for broadband SLAs Conference Passive and Active Measurement (PAM), 2017. |
Workshop on Tracking Quality of Experience in the Internet: Summary and Outcomes Journal Article SIGCOMM Computer Communication Review (CCR), 47 (1), 2017. |
2016 |
eXploring Xfinity: A First Look at Provider-Enabled Community Networks Conference Passive and Active Measurement (PAM), 2016. |
When IPs Fly: A Case for Redefining Airline Communication Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2016. |
2015 |
A measurement experimentation platform at the Internet’s edge Journal Article IEEE/ACM Transactions on Networking (TON), 23 (6), 2015. |
In and Out of Cuba: Characterizing Cuba's Connectivity Conference Internet Measurement Conference (IMC), 2015. |
Experiment coordination for large-scale measurement platforms Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. |
Second Chance - Understanding diversity in broadband access network performance Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. |
Crowdsensing Under (Soft) Control Conference IEEE INFOCOM, 2015. |
Mobile AD(D): Estimating Mobile App Session Times for Better Ads Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2015. |
2014 |
User behavior and change: File sharers and copyright laws Conference International Conference on emerging Networking EXperiments and Technologies (CoNEXT), 2014. |
Internet Inter-Domain Traffic Estimation for the Outsider Journal Article In Proc. of IMC, 2014. |
Need, Want, or Can Afford - Broadband Markets and the Behavior of Users Inproceedings In Proc. of ACM IMC, 2014. |