2019 |
James Newman, Robert H. Belson, Fabián E. Bustamante Scaling up your web experience, everywhere Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2019. @workshop{newman:scaleup, title = {Scaling up your web experience, everywhere}, author = {James Newman and Robert H. Belson and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/Newman-Scaleup.pdf}, year = {2019}, date = {2019-01-06}, booktitle = {International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {Hotmobile}, abstract = {We present an approach to improve users’ web experience by dynamically reducing the complexity of websites rendered based on network conditions. Our approach is based on a simple insight – adjusting a browser window’s scale (i.e., zooming in/out), changes the number of objects placed abovethe-fold and thus hides the loading of objects pushed below the fold in the user scroll time. We design ScaleUp , a browser extension that tracks network conditions and dynamically adjusts browser scale appropriately to improve user web Quality of Experience (QoE) while preserving the design integrity of websites. Through control experiments, we demonstrate the impact of ScaleUp on a number of key QoE metrics over a random sample of 50 from the top 500 Alexa websites. We show that a simple adjustment in scale can result in an over 19% improvement on Above-The-Fold (ATF) time in the median case. While adjusting a scale factor can improve proxy metrics of QoE, it is unclear if that translates in an improved web experience for users. We summarize findings from a large, crowdsourced experiment with 1,000 users showing that, indeed, improvement to QoE metrics correlate with an enhanced user experience. We have released ScaleUp as a Chrome Extension that now counts with over 1,000 users worldwide, and report on some of the lessons learned from this deployment.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } We present an approach to improve users’ web experience by dynamically reducing the complexity of websites rendered based on network conditions. Our approach is based on a simple insight – adjusting a browser window’s scale (i.e., zooming in/out), changes the number of objects placed abovethe-fold and thus hides the loading of objects pushed below the fold in the user scroll time. We design ScaleUp , a browser extension that tracks network conditions and dynamically adjusts browser scale appropriately to improve user web Quality of Experience (QoE) while preserving the design integrity of websites. Through control experiments, we demonstrate the impact of ScaleUp on a number of key QoE metrics over a random sample of 50 from the top 500 Alexa websites. We show that a simple adjustment in scale can result in an over 19% improvement on Above-The-Fold (ATF) time in the median case. While adjusting a scale factor can improve proxy metrics of QoE, it is unclear if that translates in an improved web experience for users. We summarize findings from a large, crowdsourced experiment with 1,000 users showing that, indeed, improvement to QoE metrics correlate with an enhanced user experience. We have released ScaleUp as a Chrome Extension that now counts with over 1,000 users worldwide, and report on some of the lessons learned from this deployment. |
James Newman, Fabián E. Bustamante The Value of First Impressions: The Impact of Ad-Blocking on Web QoE Conference Passive and Active Measurement (PAM), 2019. @conference{newman:impressions, title = {The Value of First Impressions: The Impact of Ad-Blocking on Web QoE}, author = {James Newman and Fabián E. Bustamante}, year = {2019}, date = {2019-01-03}, booktitle = {Passive and Active Measurement (PAM)}, journal = {Passive and Active Measurement (PAM)}, abstract = {We present the first detailed analysis of ad-blocking’s impact on user Web quality of experience (QoE). We use the most popular webbased ad-blocker to capture the impact of ad-blocking on QoE for the top Alexa 5,000 websites. We find that ad-blocking reduces the number of objects loaded by 15% in the median case, and that this reduction translates into a 12.5% improvement on page load time (PLT) and a slight worsening of time to first paint (TTFP) of 6.54%. We show the complex relationship between ad-blocking and quality of experience - despite the clear improvements to PLT in the average case, for the bottom 10 percentile, this improvement comes at the cost of a slowdown on the initial responsiveness of websites, with a 19% increase to TTFP. To understand the relative importance of this tradeoff on user experience, we run a large, crowdsourced experiment with 1,000 users in Amazon Turk. For this experiment, users were presented with websites for which adblocking results in both, a reduction of PLT and a significant increase in TTFP. We find, surprisingly, 71.5% of the time users show a clear preference for faster first paint over faster page load times, hinting at the importance of first impressions on web QoE.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } We present the first detailed analysis of ad-blocking’s impact on user Web quality of experience (QoE). We use the most popular webbased ad-blocker to capture the impact of ad-blocking on QoE for the top Alexa 5,000 websites. We find that ad-blocking reduces the number of objects loaded by 15% in the median case, and that this reduction translates into a 12.5% improvement on page load time (PLT) and a slight worsening of time to first paint (TTFP) of 6.54%. We show the complex relationship between ad-blocking and quality of experience - despite the clear improvements to PLT in the average case, for the bottom 10 percentile, this improvement comes at the cost of a slowdown on the initial responsiveness of websites, with a 19% increase to TTFP. To understand the relative importance of this tradeoff on user experience, we run a large, crowdsourced experiment with 1,000 users in Amazon Turk. For this experiment, users were presented with websites for which adblocking results in both, a reduction of PLT and a significant increase in TTFP. We find, surprisingly, 71.5% of the time users show a clear preference for faster first paint over faster page load times, hinting at the importance of first impressions on web QoE. |
Byungjin Jun, Fabián E. Bustamante, Sung Yoon Whang, Zachary S. Bischof AMP up your Mobile Web Experience: Characterizing the Impact of Google’s Accelerated Mobile Project Conference Annual International Conference on Mobile Computing and Networking (MobiCom), 2019. @conference{jun:ampup, title = {AMP up your Mobile Web Experience: Characterizing the Impact of Google’s Accelerated Mobile Project}, author = {Byungjin Jun and Fabián E. Bustamante and Sung Yoon Whang and Zachary S. Bischof}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/AMP-Mobicom-2019.pdf}, year = {2019}, date = {2019-01-02}, booktitle = {Annual International Conference on Mobile Computing and Networking (MobiCom)}, journal = {Mobicom}, abstract = {The rapid growth in the number of mobile devices, subscriptions and their associated traffic, has served as motivation for several projects focused on improving mobile users' quality of experience (QoE). Few have been as contentious as the Google-initiated Accelerated Mobile Project (AMP), both praised for its seemingly instant mobile web experience and criticized based on concerns about the enforcement of its formats. This paper presents the first characterization of AMP’s impact on users’ QoE.We do this using a corpus of over 2,100 AMP webpages, and their corresponding non-AMP counterparts, based on trendy-keyword-based searches. We characterized AMP’s impact looking at common web QoE metrics, including Page Load Time, Time to First Byte and SpeedIndex (SI). Our results show that AMP significantly improves SI, yielding on average a 60% lower SI than non-AMP pages without accounting for prefetching. Prefetching of AMP pages pushes this advantage even further, with prefetched pages loading over 2,000ms faster than non-prefetched AMP pages. This clear boost may come, however, at a non-negligible cost for users with limited data plans as it incurs an average of over 1.4 MB of additional data downloaded, unbeknownst to users.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The rapid growth in the number of mobile devices, subscriptions and their associated traffic, has served as motivation for several projects focused on improving mobile users' quality of experience (QoE). Few have been as contentious as the Google-initiated Accelerated Mobile Project (AMP), both praised for its seemingly instant mobile web experience and criticized based on concerns about the enforcement of its formats. This paper presents the first characterization of AMP’s impact on users’ QoE.We do this using a corpus of over 2,100 AMP webpages, and their corresponding non-AMP counterparts, based on trendy-keyword-based searches. We characterized AMP’s impact looking at common web QoE metrics, including Page Load Time, Time to First Byte and SpeedIndex (SI). Our results show that AMP significantly improves SI, yielding on average a 60% lower SI than non-AMP pages without accounting for prefetching. Prefetching of AMP pages pushes this advantage even further, with prefetched pages loading over 2,000ms faster than non-prefetched AMP pages. This clear boost may come, however, at a non-negligible cost for users with limited data plans as it incurs an average of over 1.4 MB of additional data downloaded, unbeknownst to users. |
2018 |
Zachary S. Bischof, Romain Fontugne, Fabián E. Bustamante Untangling the world-wide mesh of undersea cables Workshop ACM Workshop on Hot Topics in Networks (HotNets), 2018. @workshop{bischof:untangling, title = {Untangling the world-wide mesh of undersea cables}, author = {Zachary S. Bischof and Romain Fontugne and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/undersea.pdf}, year = {2018}, date = {2018-11-19}, booktitle = {ACM Workshop on Hot Topics in Networks (HotNets)}, abstract = {The growth of global Internet traffic has driven an exponential expansion of the submarine cable network, both in terms of the sheer number of links and its total capacity. Today, a complex mesh of hundreds of cables, stretched over 1 million kilometers, connects nearly every corner of the earth and is instrumental in closing the remaining connectivity gaps. Despite the scale and critical role of the submarine network to both business and society at large, our community has mostly ignored it, treating it as a black box in most studies from connectivity to inter-domain traffic and reliability. In this paper, we make the case for a new research agenda focused on characterizing the global submarine network and the critical role it plays as basic component of any inter-continental end-to-end connection.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } The growth of global Internet traffic has driven an exponential expansion of the submarine cable network, both in terms of the sheer number of links and its total capacity. Today, a complex mesh of hundreds of cables, stretched over 1 million kilometers, connects nearly every corner of the earth and is instrumental in closing the remaining connectivity gaps. Despite the scale and critical role of the submarine network to both business and society at large, our community has mostly ignored it, treating it as a black box in most studies from connectivity to inter-domain traffic and reliability. In this paper, we make the case for a new research agenda focused on characterizing the global submarine network and the critical role it plays as basic component of any inter-continental end-to-end connection. |
Zachary S. Bischof, Fabián E. Bustamante, Nick Feamster The Growing Importance of Being Always On -- A first look at the reliability of broadband Internet access Conference Research Conference on Communication, Information and Internet Policy (TPRC), 2018. @conference{bischof:tprc, title = {The Growing Importance of Being Always On -- A first look at the reliability of broadband Internet access}, author = {Zachary S. Bischof and Fabián E. Bustamante and Nick Feamster}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/tprc46-reliability.pdf}, year = {2018}, date = {2018-09-03}, booktitle = {Research Conference on Communication, Information and Internet Policy (TPRC)}, journal = {In Proc. of TPRC46}, abstract = {Broadband availability and performance continue to improve rapidly, spurred by both government and private investment and motivated by the recognized social and economic benefits of connectivity. A recent ITU ``State of Broadband'' reports that there are over 60 countries where fixed or mobile broadband penetration is above 25% and more than 70 countries where the majority of the population is online. According to Akamai's ``State of the Internet'' report, over the last four years, the top four countries in terms of average connection speed have nearly doubled their capacity. Although providing access and sufficient capacity remains a challenge in many parts of the world, in most developed countries, broadband providers are offering sufficiently high capacities to encourage consumers to migrate services for entertainment, communication and home monitoring to over-the-top (OTT) alternatives. According to a recent survey, nearly 78% of U.S. broadband households subscribe to an OTT video service. Enterprises are following the same path, with over one-third opting to use VoIP phones instead of landline ones. The proliferation of high-capacity access and the migration to OTT services have raised users' expectations of service reliability. A recent survey on consumer experience by the UK Office of Communication (Ofcom) ranks reliability first--- higher than even the speed of connection ---as the main reason for customer complaints. Our empirical study of access-ISP outages and user demand corroborates these observations, showing the effects of low reliability on user behavior, as captured by their demand on the network. Researchers and regulators alike have also recognized the need for clear standards and a better understanding of the role that service reliability plays in shaping the behavior of broadband users. Despite its growing importance, both the reliability of broadband services and potential ways to improve on it have received scant attention from the research community. In this paper, we introduce an approach for characterizing broadband reliability using data collected by the many emerging national efforts to study broadband (in over 30 countries and apply this approach to the data gathered by the Measuring Broadband America (MBA) project, which is operated by the United States Federal Communications Commission (FCC). We show, among other findings, that current broadband services deliver an average availability of at most two nines (99%), with an average annual downtime of 17.8 hours. Motivated by our findings, we quantify the potential benefits of multihomed broadband access and study its feasibility as a solution for increasing reliability. Using the FCC MBA dataset and measurements collected by over 6,000 end-host vantage points in 75 countries, we show that multihoming the access link at the home gateway with two different providers adds two nines of service availability, matching the minimum four nines (99.99%) required by the FCC for the public switched telephone network (PSTN).}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Broadband availability and performance continue to improve rapidly, spurred by both government and private investment and motivated by the recognized social and economic benefits of connectivity. A recent ITU ``State of Broadband'' reports that there are over 60 countries where fixed or mobile broadband penetration is above 25% and more than 70 countries where the majority of the population is online. According to Akamai's ``State of the Internet'' report, over the last four years, the top four countries in terms of average connection speed have nearly doubled their capacity. Although providing access and sufficient capacity remains a challenge in many parts of the world, in most developed countries, broadband providers are offering sufficiently high capacities to encourage consumers to migrate services for entertainment, communication and home monitoring to over-the-top (OTT) alternatives. According to a recent survey, nearly 78% of U.S. broadband households subscribe to an OTT video service. Enterprises are following the same path, with over one-third opting to use VoIP phones instead of landline ones. The proliferation of high-capacity access and the migration to OTT services have raised users' expectations of service reliability. A recent survey on consumer experience by the UK Office of Communication (Ofcom) ranks reliability first--- higher than even the speed of connection ---as the main reason for customer complaints. Our empirical study of access-ISP outages and user demand corroborates these observations, showing the effects of low reliability on user behavior, as captured by their demand on the network. Researchers and regulators alike have also recognized the need for clear standards and a better understanding of the role that service reliability plays in shaping the behavior of broadband users. Despite its growing importance, both the reliability of broadband services and potential ways to improve on it have received scant attention from the research community. In this paper, we introduce an approach for characterizing broadband reliability using data collected by the many emerging national efforts to study broadband (in over 30 countries and apply this approach to the data gathered by the Measuring Broadband America (MBA) project, which is operated by the United States Federal Communications Commission (FCC). We show, among other findings, that current broadband services deliver an average availability of at most two nines (99%), with an average annual downtime of 17.8 hours. Motivated by our findings, we quantify the potential benefits of multihomed broadband access and study its feasibility as a solution for increasing reliability. Using the FCC MBA dataset and measurements collected by over 6,000 end-host vantage points in 75 countries, we show that multihoming the access link at the home gateway with two different providers adds two nines of service availability, matching the minimum four nines (99.99%) required by the FCC for the public switched telephone network (PSTN). |
Sarah Wassermann, John P. Rula, Fabián E. Bustamante, Pedro Casas Anycast on the Move: A Look at Mobile Anycast Performance, Conference Network Traffic Measurement and Analysis Conference (TMA), 2018. @conference{wassermann:anycast, title = {Anycast on the Move: A Look at Mobile Anycast Performance,}, author = {Sarah Wassermann and John P. Rula and Fabián E. Bustamante and Pedro Casas}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/SWasserman-AnycastMove.pdf}, year = {2018}, date = {2018-06-11}, booktitle = {Network Traffic Measurement and Analysis Conference (TMA)}, abstract = {The appeal and clear operational and economic benefits of anycast to service providers have motivated a number of recent experimental studies on its potential performance impact for end users. For CDNs on mobile networks, in particular, anycast provides a simpler alternative to existing request routing systems challenged by a growing, complex, and commonly opaque cellular infrastructure. This paper presents the first analysis of anycast performance for mobile users. In particular, our evaluation focuses on two distinct anycast services, both providing part of the DNS Root zone and together covering all major geographical regions. Our results show that mobile clients are routed to suboptimal replicas in terms of geographical distance and associated latencies, more frequently while on a cellular connection than on WiFi, with a significant impact on performance. We find that this is not simply an issue of lacking better alternatives, and that the problem is not specific to particular geographic areas or autonomous systems. We close with a first analysis of the root causes of this phenomenon and describe some of the major classes of anycast anomalies revealed during our study, additionally including a systematic approach to automatically detect such anomalies without any sort of training or labeled measurements.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The appeal and clear operational and economic benefits of anycast to service providers have motivated a number of recent experimental studies on its potential performance impact for end users. For CDNs on mobile networks, in particular, anycast provides a simpler alternative to existing request routing systems challenged by a growing, complex, and commonly opaque cellular infrastructure. This paper presents the first analysis of anycast performance for mobile users. In particular, our evaluation focuses on two distinct anycast services, both providing part of the DNS Root zone and together covering all major geographical regions. Our results show that mobile clients are routed to suboptimal replicas in terms of geographical distance and associated latencies, more frequently while on a cellular connection than on WiFi, with a significant impact on performance. We find that this is not simply an issue of lacking better alternatives, and that the problem is not specific to particular geographic areas or autonomous systems. We close with a first analysis of the root causes of this phenomenon and describe some of the major classes of anycast anomalies revealed during our study, additionally including a systematic approach to automatically detect such anomalies without any sort of training or labeled measurements. |
John P. Rula, Fabián E. Bustamante, James Newman, Arash Molavi Khaki, Dave Choffnes Mile High WiFI: A First Look At In-Flight Internet Connectivity Conference The Web Conference (WWW), 2018. @conference{rula:mhwifi, title = {Mile High WiFI: A First Look At In-Flight Internet Connectivity}, author = {John P. Rula and Fabián E. Bustamante and James Newman and Arash Molavi Khaki and Dave Choffnes}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula-WWW18.pdf}, year = {2018}, date = {2018-04-03}, booktitle = {The Web Conference (WWW)}, journal = {Proc. of WWW}, abstract = {In-Flight Communication (IFC), which can be purchased on a growing number of commercial flights, is often received by consumers with both awe for its mere availability and harsh criticism for its poor performance. Indeed, IFC provides Internet connectivity in some of the most challenging conditions with aircraft traveling at speeds in excess of 500 mph at 30,000 feet above the ground. Yet, while existing services do provide basic Interneaccessibility, anecdotal reports rank their quality of service as, at best, poor. In this paper, we present the first characterization of deployed IFC systems. Using over 45 flight-hours of measurements, we profile the performance of IFC across the two dominant access technologies -- direct air-to-ground communication (DA2GC) and mobile satellite service (MSS). We show that IFC QoS is in large part determined by the high latencies inherent to DA2GC and MSS, with RTTs averaging 200ms and 750ms, respectively, and that these high latencies directly impact the performance of common applications such as web browsing. While each IFC technology is based on well studied wireless communication technologies, our findings reveal that IFC links experience further degraded link performance than their technological antecedents. We find median loss rates of 7%, and nearly 40% loss at the 90th percentile for MSS, an order of magnitude larger than recent characterizations of residential satellite networks. We extend our IFC study exploring the potential of the newly released HTTP/2 and QUIC protocols in an emulated IFC environmen, finding that QUIC is able to improve page load times by as much as 7.9 times. In addition, we find that HTTP/2's use of multiplexing multiple requests onto a single TCP connection performs up to 4.8x worse than HTTP/1.1 when faced with large numbers of objects. We use network emulation to explore proposed technological improvements to existing IFC systems finding that high link losses account for the largest factor of performance degradation, and that to improving link bandwidth does little to improve the quality of experience for applications such as web browsing.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } In-Flight Communication (IFC), which can be purchased on a growing number of commercial flights, is often received by consumers with both awe for its mere availability and harsh criticism for its poor performance. Indeed, IFC provides Internet connectivity in some of the most challenging conditions with aircraft traveling at speeds in excess of 500 mph at 30,000 feet above the ground. Yet, while existing services do provide basic Interneaccessibility, anecdotal reports rank their quality of service as, at best, poor. In this paper, we present the first characterization of deployed IFC systems. Using over 45 flight-hours of measurements, we profile the performance of IFC across the two dominant access technologies -- direct air-to-ground communication (DA2GC) and mobile satellite service (MSS). We show that IFC QoS is in large part determined by the high latencies inherent to DA2GC and MSS, with RTTs averaging 200ms and 750ms, respectively, and that these high latencies directly impact the performance of common applications such as web browsing. While each IFC technology is based on well studied wireless communication technologies, our findings reveal that IFC links experience further degraded link performance than their technological antecedents. We find median loss rates of 7%, and nearly 40% loss at the 90th percentile for MSS, an order of magnitude larger than recent characterizations of residential satellite networks. We extend our IFC study exploring the potential of the newly released HTTP/2 and QUIC protocols in an emulated IFC environmen, finding that QUIC is able to improve page load times by as much as 7.9 times. In addition, we find that HTTP/2's use of multiplexing multiple requests onto a single TCP connection performs up to 4.8x worse than HTTP/1.1 when faced with large numbers of objects. We use network emulation to explore proposed technological improvements to existing IFC systems finding that high link losses account for the largest factor of performance degradation, and that to improving link bandwidth does little to improve the quality of experience for applications such as web browsing. |
2017 |
John P. Rula, Fabián E. Bustamante, Moritz Steiner Cell Spotting -- Studying the Role of Cellular Networks in the Internet Conference Internet Measurement Conference (IMC), 2017. @conference{CellSpotting, title = {Cell Spotting -- Studying the Role of Cellular Networks in the Internet}, author = {John P. Rula and Fabián E. Bustamante and Moritz Steiner}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/census.pdf}, year = {2017}, date = {2017-11-03}, booktitle = {Internet Measurement Conference (IMC)}, journal = {In Proc. of IMC}, abstract = {The increasingly dominant role of the mobile Internet and its economic implications have been the topic of several stud- ies and surveys from industry and academia. Most previous work has focused on mobile devices, as a whole, independently of their connectivity, and taken the limited perspectives of either a few individual handsets or a single operator. We lack a comprehensive and global view of cellular net- works, their scope, configurations and usage. In this paper, we present a comprehensive analysis of global cellular networks. We describe an approach to accurately identify cellular network IP addresses using the Network Information API, a non-standard Javascipt API in several mobile browsers, and show its effectiveness in a range cellular network configurations. We combine this approach with the vantage point of one of the world’s largest CDNs, with over 200,000 servers in 1,450 networks and clients in over 46,000 ASes across 245 countries, to characterize cellular access around the globe. We discover over 350 thousand /24 and 23 thousand /48 cellular IPv4 and IPv6 prefixes respectively. We find that the majority of cellular networks exist as mixed networks (i.e., networks that share both fixline and cellular devices), requiring prefix – not ASN – level identification. By utilizing addresses level traffic from the same CDN, we calculate the fraction of traffic coming from cellular addresses. Overall we find that cellular traffic comprises 16.2% of the CDN’s global traffic, and that cellular traffic ranges widely in importance between countries, from cap- turing nearly 96% of all traffic in Ghana to just 12.1% in France.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The increasingly dominant role of the mobile Internet and its economic implications have been the topic of several stud- ies and surveys from industry and academia. Most previous work has focused on mobile devices, as a whole, independently of their connectivity, and taken the limited perspectives of either a few individual handsets or a single operator. We lack a comprehensive and global view of cellular net- works, their scope, configurations and usage. In this paper, we present a comprehensive analysis of global cellular networks. We describe an approach to accurately identify cellular network IP addresses using the Network Information API, a non-standard Javascipt API in several mobile browsers, and show its effectiveness in a range cellular network configurations. We combine this approach with the vantage point of one of the world’s largest CDNs, with over 200,000 servers in 1,450 networks and clients in over 46,000 ASes across 245 countries, to characterize cellular access around the globe. We discover over 350 thousand /24 and 23 thousand /48 cellular IPv4 and IPv6 prefixes respectively. We find that the majority of cellular networks exist as mixed networks (i.e., networks that share both fixline and cellular devices), requiring prefix – not ASN – level identification. By utilizing addresses level traffic from the same CDN, we calculate the fraction of traffic coming from cellular addresses. Overall we find that cellular traffic comprises 16.2% of the CDN’s global traffic, and that cellular traffic ranges widely in importance between countries, from cap- turing nearly 96% of all traffic in Ghana to just 12.1% in France. |
Zachary S. Bischof, Fabián E. Bustamante, Nick Feamster. Characterizing and Improving the Reliability of Broadband Internet Access Online arXiv.org 2017. @online{bischof:breliability, title = {Characterizing and Improving the Reliability of Broadband Internet Access}, author = {Zachary S. Bischof and Fabián E. Bustamante and Nick Feamster.}, url = {https://arxiv.org/abs/1709.09349}, year = {2017}, date = {2017-09-03}, organization = {arXiv.org}, abstract = {In this paper, we empirically demonstrate the growing importance of reliability by measuring its effect on user behavior. We present an approach for broadband reliability characterization using data collected by many emerging national initiatives to study broadband and apply it to the data gathered by the Federal Communications Commission's Measuring Broadband America project. Motivated by our findings, we present the design, implementation, and evaluation of a practical approach for improving the reliability of broadband Internet access with multihoming }, keywords = {}, pubstate = {published}, tppubtype = {online} } In this paper, we empirically demonstrate the growing importance of reliability by measuring its effect on user behavior. We present an approach for broadband reliability characterization using data collected by many emerging national initiatives to study broadband and apply it to the data gathered by the Federal Communications Commission's Measuring Broadband America project. Motivated by our findings, we present the design, implementation, and evaluation of a practical approach for improving the reliability of broadband Internet access with multihoming |
Zachary S. Bischof, Fabián E. Bustamante, Rade Stanojevic The utility argument — Making a case for broadband SLAs Conference Passive and Active Measurement (PAM), 2017. @conference{bischof:sla, title = {The utility argument — Making a case for broadband SLAs}, author = {Zachary S. Bischof and Fabián E. Bustamante and Rade Stanojevic}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/PAM17_Bischof.pdf}, year = {2017}, date = {2017-03-03}, booktitle = {Passive and Active Measurement (PAM)}, journal = {In Proc. of PAM}, abstract = {Most residential broadband services are described in terms of their maximum potential throughput rate, often advertised as having speeds "up to X Mbps". Though such promises are often met, they are fairly limited in scope and, unfortunately, there is no basis for an appeal if a customer were to receive compromised quality of service. While this 'best effort' model was sufficient in the early days, we argue that as broadband customers and their devices become more dependent on Internet connectivity, we will see an increased demand for more encompassing Service Level Agreements (SLA). In this paper, we study the design space of broadband SLAs and explore some of the trade-offs between the level of strictness of SLAs and the cost of delivering them. We argue that certain SLAs could be offered almost immediately with minimal impact on retail prices, and that ISPs (or third parties) could accurately infer the risk of offering SLA to individual customers – with accuracy comparable to that in the car or credit insurance industry – and price the SLA service accordingly.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Most residential broadband services are described in terms of their maximum potential throughput rate, often advertised as having speeds "up to X Mbps". Though such promises are often met, they are fairly limited in scope and, unfortunately, there is no basis for an appeal if a customer were to receive compromised quality of service. While this 'best effort' model was sufficient in the early days, we argue that as broadband customers and their devices become more dependent on Internet connectivity, we will see an increased demand for more encompassing Service Level Agreements (SLA). In this paper, we study the design space of broadband SLAs and explore some of the trade-offs between the level of strictness of SLAs and the cost of delivering them. We argue that certain SLAs could be offered almost immediately with minimal impact on retail prices, and that ISPs (or third parties) could accurately infer the risk of offering SLA to individual customers – with accuracy comparable to that in the car or credit insurance industry – and price the SLA service accordingly. |
Fabián E. Bustamante, David Clark, Nick Feamster Workshop on Tracking Quality of Experience in the Internet: Summary and Outcomes Journal Article SIGCOMM Computer Communication Review (CCR), 47 (1), 2017. @article{bustamante:qoe, title = {Workshop on Tracking Quality of Experience in the Internet: Summary and Outcomes}, author = {Fabián E. Bustamante and David Clark and Nick Feamster}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/p55-bustamante.pdf}, year = {2017}, date = {2017-01-03}, journal = {SIGCOMM Computer Communication Review (CCR)}, volume = {47}, number = {1}, abstract = {This is a report on the Workshop on Tracking Quality of Experience in the Internet, held at Princeton, October 21– 22, 2015, jointly sponsored by the National Science Foundation and the Federal Communication Commission. The term Quality of Experience (QoE) describes a user’s subjective assessment of their experience when using a particular application. In the past, network engineers have typically focused on Quality of Service (QoS): performance metrics such as throughput, delay and jitter, packet loss, and the like. Yet, performance as measured by QoS parameters only matters if it affects the experience of users, as they attempt to use a particular application. Ultimately, the user’s experience is determined by QoE impairments (e.g., rebuffering). Although QoE and QoS are related—for example, a video rebuffering event may be caused by high packet-loss rate— QoE metrics ultimately affect a user’s experience. Identifying the causes of QoE impairments is complex, since the impairments may arise in one or another region of the network, in the home network, on the user’s device, in servers that are part of the application, or in supporting services such as the DNS. Additionally, metrics for QoE continue to evolve, as do the methods for relating QoE impairments to underlying causes that could be measurable using standard network measurement techniques. Finally, as the capabilities of the underlying network infrastructure continues to evolve, researchers should also consider how to design infrastructure and tools can best support measurements that can better identify the locations and causes of QoE impairments. The workshop's aim was to understand the current state of QoE research and to contemplate a community agenda to integrate ongoing threads of QoE research into a collaboration. This summary report describes the topics discussed and summarize the key points of the discussion. }, keywords = {}, pubstate = {published}, tppubtype = {article} } This is a report on the Workshop on Tracking Quality of Experience in the Internet, held at Princeton, October 21– 22, 2015, jointly sponsored by the National Science Foundation and the Federal Communication Commission. The term Quality of Experience (QoE) describes a user’s subjective assessment of their experience when using a particular application. In the past, network engineers have typically focused on Quality of Service (QoS): performance metrics such as throughput, delay and jitter, packet loss, and the like. Yet, performance as measured by QoS parameters only matters if it affects the experience of users, as they attempt to use a particular application. Ultimately, the user’s experience is determined by QoE impairments (e.g., rebuffering). Although QoE and QoS are related—for example, a video rebuffering event may be caused by high packet-loss rate— QoE metrics ultimately affect a user’s experience. Identifying the causes of QoE impairments is complex, since the impairments may arise in one or another region of the network, in the home network, on the user’s device, in servers that are part of the application, or in supporting services such as the DNS. Additionally, metrics for QoE continue to evolve, as do the methods for relating QoE impairments to underlying causes that could be measurable using standard network measurement techniques. Finally, as the capabilities of the underlying network infrastructure continues to evolve, researchers should also consider how to design infrastructure and tools can best support measurements that can better identify the locations and causes of QoE impairments. The workshop's aim was to understand the current state of QoE research and to contemplate a community agenda to integrate ongoing threads of QoE research into a collaboration. This summary report describes the topics discussed and summarize the key points of the discussion. |
2016 |
Dipendra Jha, John P. Rula, Fabián E. Bustamante eXploring Xfinity: A First Look at Provider-Enabled Community Networks Conference Passive and Active Measurement (PAM), 2016. @conference{jha:xfinity, title = { eXploring Xfinity: A First Look at Provider-Enabled Community Networks}, author = {Dipendra Jha, John P. Rula, Fabián E. Bustamante }, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/pam-xfinity.pdf}, year = {2016}, date = {2016-03-03}, booktitle = {Passive and Active Measurement (PAM)}, journal = { In Proc. PAM}, abstract = {Several broadband providers have been offering community WiFi as an additional service for existing customers and paid subscribers. These community networks provide Internet connectivity on the go for mobile devices and a path to offload cellular traffic. Rather than deploying new infrastructure or relying on the resources of an organized community, these provider-enabled community WiFi services leverage the existing hardware and connections of their customers. The past few years have seen a significant growth in their popularity and coverage and some municipalities and institutions have started to considered them as the basis for public Internet access. In this paper, we present the first characterization of one such service – the Xfinity Community WiFi network. Taking the perspectives of the home-router owner and the public hotspot user, we characterize the performance and availability of this service in urban and suburban settings, at different times, between September, 2014 and 2015. Our results highlight the challenges of providing these services in urban environments considering the tensions between coverage and interference, large obstructions and high population densities. Through a series of controlled experiments, we measure the impact to hosting customers, finding that in certain cases, the use of the public hotspot can degrade host network throughput by up-to 67% under high traffic on the public hotspot.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Several broadband providers have been offering community WiFi as an additional service for existing customers and paid subscribers. These community networks provide Internet connectivity on the go for mobile devices and a path to offload cellular traffic. Rather than deploying new infrastructure or relying on the resources of an organized community, these provider-enabled community WiFi services leverage the existing hardware and connections of their customers. The past few years have seen a significant growth in their popularity and coverage and some municipalities and institutions have started to considered them as the basis for public Internet access. In this paper, we present the first characterization of one such service – the Xfinity Community WiFi network. Taking the perspectives of the home-router owner and the public hotspot user, we characterize the performance and availability of this service in urban and suburban settings, at different times, between September, 2014 and 2015. Our results highlight the challenges of providing these services in urban environments considering the tensions between coverage and interference, large obstructions and high population densities. Through a series of controlled experiments, we measure the impact to hosting customers, finding that in certain cases, the use of the public hotspot can degrade host network throughput by up-to 67% under high traffic on the public hotspot. |
John Rula, Fabián E. Bustamante, David R. Choffnes When IPs Fly: A Case for Redefining Airline Communication Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2016. @workshop{rula:ipsfly, title = {When IPs Fly: A Case for Redefining Airline Communication}, author = {John Rula and Fabián E. Bustamante and David R. Choffnes}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/airline.pdf}, year = {2016}, date = {2016-02-03}, booktitle = {International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {In Proc. HotMobile}, abstract = {The global airline industry conducted over 33 million flights in 2014 alone, carrying over 3.3 billion passengers. Surpris- ingly, the traffic management system handling this flight volume communicates over either VHF audio transmissions or plane transponders, exhibiting several seconds of latency and single bits per second of throughput. There is a general consensus that for the airline industry to serve the growing demand will require of significant improvements to the air traffic management system; we believe that many of these improvements can leverage the past two decades of mobile networking research. In this paper, we make the case that moving to a common IP-based data channel to support flight communication can radically change the airline industry. While there remain many challenges to achieve this vision, we believe that such a shift can greatly improve the rate of innovation, overall efficiency of global air traffic management, enhance aircraft safety and create new applications that leverage the capability of an advanced data channel. Through preliminary measurements on existing in-flight Internet communication systems, we show that existing in-flight connectivity achieves order of magnitude higher throughput and lower latency than current systems, and operates as a highly reliable and available data link. This position paper takes a first look at the opportunity for IP-based flight communication, and identifies several promising research areas in this space.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } The global airline industry conducted over 33 million flights in 2014 alone, carrying over 3.3 billion passengers. Surpris- ingly, the traffic management system handling this flight volume communicates over either VHF audio transmissions or plane transponders, exhibiting several seconds of latency and single bits per second of throughput. There is a general consensus that for the airline industry to serve the growing demand will require of significant improvements to the air traffic management system; we believe that many of these improvements can leverage the past two decades of mobile networking research. In this paper, we make the case that moving to a common IP-based data channel to support flight communication can radically change the airline industry. While there remain many challenges to achieve this vision, we believe that such a shift can greatly improve the rate of innovation, overall efficiency of global air traffic management, enhance aircraft safety and create new applications that leverage the capability of an advanced data channel. Through preliminary measurements on existing in-flight Internet communication systems, we show that existing in-flight connectivity achieves order of magnitude higher throughput and lower latency than current systems, and operates as a highly reliable and available data link. This position paper takes a first look at the opportunity for IP-based flight communication, and identifies several promising research areas in this space. |
2015 |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger A measurement experimentation platform at the Internet’s edge Journal Article IEEE/ACM Transactions on Networking (TON), 23 (6), 2015. @article{sanchez:dasu-ton, title = {A measurement experimentation platform at the Internet’s edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/ton-dasu.pdf}, year = {2015}, date = {2015-12-01}, journal = {IEEE/ACM Transactions on Networking (TON)}, volume = {23}, number = {6}, abstract = {Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent, well-known problem has served as motivation for numerous proposals to build or extend existing Internet measurement platforms by recruiting larger, more diverse vantage points. Capturing the edge of the network, however, remains an elusive goal. We argue that at its root the problem is one of incentives. Today's measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present a measurement experimentation platform that reaches the network edge by explicitly aligning the objectives of the experimenters with those of the users hosting the platform. Dasu -- our current prototype -- is designed to support both network measurement experimentation and broadband characterization. Dasu has been publicly available since July 2010 and is currently in use by over 100K users with a heterogeneous set of connections spreading across 2,431 networks and 166 countries. We discuss some of the challenges we faced building and using a platform for the Internet's edge, describe its design and implementation, and illustrate the unique perspective its current deployment brings to Internet measurement.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent, well-known problem has served as motivation for numerous proposals to build or extend existing Internet measurement platforms by recruiting larger, more diverse vantage points. Capturing the edge of the network, however, remains an elusive goal. We argue that at its root the problem is one of incentives. Today's measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present a measurement experimentation platform that reaches the network edge by explicitly aligning the objectives of the experimenters with those of the users hosting the platform. Dasu -- our current prototype -- is designed to support both network measurement experimentation and broadband characterization. Dasu has been publicly available since July 2010 and is currently in use by over 100K users with a heterogeneous set of connections spreading across 2,431 networks and 166 countries. We discuss some of the challenges we faced building and using a platform for the Internet's edge, describe its design and implementation, and illustrate the unique perspective its current deployment brings to Internet measurement. |
Zachary S. Bischof, John P. Rula, Fabián E. Bustamante In and Out of Cuba: Characterizing Cuba's Connectivity Conference Internet Measurement Conference (IMC), 2015. @conference{Cuba, title = {In and Out of Cuba: Characterizing Cuba's Connectivity}, author = {Zachary S. Bischof and John P. Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/imc207s-bischofA.pdf}, year = {2015}, date = {2015-10-03}, booktitle = {Internet Measurement Conference (IMC)}, journal = {In Proc. of IMC}, abstract = {The goal of our work is to characterize the current state of Cuba's access to the wider Internet. This work is motivated by recent improvements in connectivity to the island and the growing commercial interest following the ease of restrictions on travel and trade with the US. In this paper, we profile Cuba’s networks, their connections to the rest of the world, and the routes of international traffic going to and from the island. Despite the addition of the ALBA-1 submarine cable, we find that round trip times to websites hosted off the island remain very high; pings to popular websites frequently took over 300 ms. We also find a high degree of path asymmetry in traffic to/from Cuba. Specifically, in our analysis we find that traffic going out of Cuba typically travels through the ALBA-1 cable, but, surprisingly, traffic on the reverse path often traverses high-latency satellite links, adding over 200 ms to round trip times. Last, we analyze queries to public DNS servers and SSL certificate requests to characterize the availability of network services in Cuba.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The goal of our work is to characterize the current state of Cuba's access to the wider Internet. This work is motivated by recent improvements in connectivity to the island and the growing commercial interest following the ease of restrictions on travel and trade with the US. In this paper, we profile Cuba’s networks, their connections to the rest of the world, and the routes of international traffic going to and from the island. Despite the addition of the ALBA-1 submarine cable, we find that round trip times to websites hosted off the island remain very high; pings to popular websites frequently took over 300 ms. We also find a high degree of path asymmetry in traffic to/from Cuba. Specifically, in our analysis we find that traffic going out of Cuba typically travels through the ALBA-1 cable, but, surprisingly, traffic on the reverse path often traverses high-latency satellite links, adding over 200 ms to round trip times. Last, we analyze queries to public DNS servers and SSL certificate requests to characterize the availability of network services in Cuba. |
Mario A. Sánchez, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger Experiment coordination for large-scale measurement platforms Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. @workshop{sanchez:coordination, title = {Experiment coordination for large-scale measurement platforms}, author = {Mario A. Sánchez and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/c2b16-sanchezAPT.pdf}, year = {2015}, date = {2015-08-06}, booktitle = {ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D)}, journal = {In Proc. Sigcomm C2B(I)D Workshop}, abstract = {The risk of placing an undesired load on networks and networked services through probes originating from measurement platforms has always been present. While several scheduling schemes have been proposed to avoid undue loads or DDoS-like effects from uncontrolled experiments, the motivation scenarios for such schemes have generally been considered “sufficiently unlikely” and safely ignored by most existing measurement platforms. We argue that the growth of large, crowdsourced measurement systems means we cannot ignore this risk any longer. In this paper we expand on our original lease-based coordination scheme designed for measurement platforms that embrace crowdsourcing as their method-of-choice. We compare it with two alternative strategies currently implemented by some of the existing crowdsourced measurement platforms: centralized rate-limiting and individual rate limiting. Our preliminary results show that our solution outperforms these two naive strategies for coordination according to at least two different intuitive metrics: resource utilization and bound compliance. We find that our scheme efficiently allows the scalable and effective coordination of measurements among potentially thousands of hosts while providing individual clients with enough flexibility to act on their own. }, keywords = {}, pubstate = {published}, tppubtype = {workshop} } The risk of placing an undesired load on networks and networked services through probes originating from measurement platforms has always been present. While several scheduling schemes have been proposed to avoid undue loads or DDoS-like effects from uncontrolled experiments, the motivation scenarios for such schemes have generally been considered “sufficiently unlikely” and safely ignored by most existing measurement platforms. We argue that the growth of large, crowdsourced measurement systems means we cannot ignore this risk any longer. In this paper we expand on our original lease-based coordination scheme designed for measurement platforms that embrace crowdsourcing as their method-of-choice. We compare it with two alternative strategies currently implemented by some of the existing crowdsourced measurement platforms: centralized rate-limiting and individual rate limiting. Our preliminary results show that our solution outperforms these two naive strategies for coordination according to at least two different intuitive metrics: resource utilization and bound compliance. We find that our scheme efficiently allows the scalable and effective coordination of measurements among potentially thousands of hosts while providing individual clients with enough flexibility to act on their own. |
John P. Rula, Zachary S. Bischof, Fabián E. Bustamante Second Chance - Understanding diversity in broadband access network performance Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. @workshop{rula:bdiversity, title = {Second Chance - Understanding diversity in broadband access network performance}, author = {John P. Rula and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/c2b15-rula.pdf}, year = {2015}, date = {2015-08-03}, booktitle = {ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D)}, journal = {In Proc. Sigcomm C2B(I)D workshop}, abstract = {In recognition of the increasing importance of broadband, several governments have embarked on large-scale efforts to measure broadband services from devices within end-user’s homes. Participants for these studies were selected based on features that, a priori, were thought to be relevant to service performance such as geographic region, access technology and subscription level. Every new-year deployment since has followed the same model, ensuring that the number of measurement points remains stable despite the natural churn. In this paper, we start to explore the issue of vantage point selection in residential broadband networks by lever- aging the publicly available datasets collected as part of the FCC Broadband America study. We present the first analysis of the variation of performance in edge networks and diversity of individual vantage points. We explore the underlying causes of this diversity through a factor analysis of contextual factors within an ISP such as the geographic location of subscribers. The goal of this analysis is to inform additional deployments in ongoing studies, and guide the design and deployment of future investigations into broadband networks.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } In recognition of the increasing importance of broadband, several governments have embarked on large-scale efforts to measure broadband services from devices within end-user’s homes. Participants for these studies were selected based on features that, a priori, were thought to be relevant to service performance such as geographic region, access technology and subscription level. Every new-year deployment since has followed the same model, ensuring that the number of measurement points remains stable despite the natural churn. In this paper, we start to explore the issue of vantage point selection in residential broadband networks by lever- aging the publicly available datasets collected as part of the FCC Broadband America study. We present the first analysis of the variation of performance in edge networks and diversity of individual vantage points. We explore the underlying causes of this diversity through a factor analysis of contextual factors within an ISP such as the geographic location of subscribers. The goal of this analysis is to inform additional deployments in ongoing studies, and guide the design and deployment of future investigations into broadband networks. |
John P. Rula, Fabián E. Bustamante Crowdsensing Under (Soft) Control Conference IEEE INFOCOM, 2015. @conference{rula:softcontrol, title = {Crowdsensing Under (Soft) Control}, author = {John P. Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/csc.pdf}, year = {2015}, date = {2015-04-03}, booktitle = {IEEE INFOCOM}, journal = { In Proc. of INFOCOM}, abstract = {Crowdsensing leverages the pervasiveness and power of mobile devices such as smartphones and tablets, to enable ordinary citizens to collect, transport and verify data. Application domains range from environment monitoring, to infrastructure management and social computing. Crowdsensing services' effectiveness is a direct result of their coverage, which is driven by the recruitment and mobility patterns of participants. Due to the population distribution of most areas, and the regular mobility patterns of participants, less popular or populated areas suffer from poor coverage. In this paper, we present Crowd Soft Control (CSC), an approach to exert limited control over the actions of participants by leveraging the built-in incentives of location-based gaming and social applications. By pairing community sensing with location-based applications, CSC allows sensing services to reuse the incentives of location-based apps to steer the actions of participating users and increase the effectiveness of sensing campaigns. While there are several domains where this intentional movement is useful such as data muling, the paper presents the design, implementation and evaluation of CSC applied to crowdsensing. We built a prototype of CSC and integrated it with two location-based applications, and crowdsensing services. Experimental results demonstrate the low-cost of integration and minimal overhead of CSC.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Crowdsensing leverages the pervasiveness and power of mobile devices such as smartphones and tablets, to enable ordinary citizens to collect, transport and verify data. Application domains range from environment monitoring, to infrastructure management and social computing. Crowdsensing services' effectiveness is a direct result of their coverage, which is driven by the recruitment and mobility patterns of participants. Due to the population distribution of most areas, and the regular mobility patterns of participants, less popular or populated areas suffer from poor coverage. In this paper, we present Crowd Soft Control (CSC), an approach to exert limited control over the actions of participants by leveraging the built-in incentives of location-based gaming and social applications. By pairing community sensing with location-based applications, CSC allows sensing services to reuse the incentives of location-based apps to steer the actions of participating users and increase the effectiveness of sensing campaigns. While there are several domains where this intentional movement is useful such as data muling, the paper presents the design, implementation and evaluation of CSC applied to crowdsensing. We built a prototype of CSC and integrated it with two location-based applications, and crowdsensing services. Experimental results demonstrate the low-cost of integration and minimal overhead of CSC. |
John Rula, Byungjin Jun, Fabián E. Bustamante Mobile AD(D): Estimating Mobile App Session Times for Better Ads Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2015. @workshop{rula:appt, title = {Mobile AD(D): Estimating Mobile App Session Times for Better Ads}, author = {John Rula and Byungjin Jun and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/hot81-rula.pdf}, year = {2015}, date = {2015-02-03}, booktitle = {International Workshop on Mobile Computing Systems and Applications (HotMobile)}, journal = {In Proc. HotMobile}, abstract = {While mobile advertisements are the dominant source of revenue for mobile apps, the usage patterns of mobile users, and thus their engagement times, may be in conflict with the effectiveness of these ads. With any given application, a user may engage for anywhere between a few seconds to several minutes depending on a number of factors such as their location and goals. Despite the resulting wide-range of session times, the current nature of ad auctions dictates that ads are priced and sold prior to actual viewing, that is regardless of the actual display time. We argue that the wealth of easy-to-gather contextual information on mobile devices is sufficient to make better choices by effectively predicting exposure time. We analyze mobile device usage patterns with a detailed two-week long user study of 37 users in the US and South Korea. After characterizing application session times, we use factor analysis to derive a simple predictive model and show that this model is able to offer improved accuracy compared to mean session time over 90% of the time. We make the case for including predicted ad exposure duration in the price of mobile advertisements and posit that such information could significantly improve the effectiveness of mobile advertisement, giving publishers the ability to tune campaigns for engagement length and enabling a more efficient market for ad impressions, select appropriate media for an ad impression and lowering the cost to users including network utilization and device power.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } While mobile advertisements are the dominant source of revenue for mobile apps, the usage patterns of mobile users, and thus their engagement times, may be in conflict with the effectiveness of these ads. With any given application, a user may engage for anywhere between a few seconds to several minutes depending on a number of factors such as their location and goals. Despite the resulting wide-range of session times, the current nature of ad auctions dictates that ads are priced and sold prior to actual viewing, that is regardless of the actual display time. We argue that the wealth of easy-to-gather contextual information on mobile devices is sufficient to make better choices by effectively predicting exposure time. We analyze mobile device usage patterns with a detailed two-week long user study of 37 users in the US and South Korea. After characterizing application session times, we use factor analysis to derive a simple predictive model and show that this model is able to offer improved accuracy compared to mean session time over 90% of the time. We make the case for including predicted ad exposure duration in the price of mobile advertisements and posit that such information could significantly improve the effectiveness of mobile advertisement, giving publishers the ability to tune campaigns for engagement length and enabling a more efficient market for ad impressions, select appropriate media for an ad impression and lowering the cost to users including network utilization and device power. |
2014 |
Arnau Gavaldà-Miralles, John S. Otto, Fabián E. Bustamante, Luís A. N. Amaral, Jordi Duch, Roger Guimerà User behavior and change; File sharers and copyright laws Conference International Conference on emerging Networking EXperiments and Technologies (CoNEXT), 2014. @conference{gavalda:p2pbehaviour, title = {User behavior and change; File sharers and copyright laws}, author = {Arnau Gavaldà-Miralles and John S. Otto and Fabián E. Bustamante and Luís A. N. Amaral and Jordi Duch and Roger Guimerà}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/conext14.pdf}, year = {2014}, date = {2014-12-06}, booktitle = {International Conference on emerging Networking EXperiments and Technologies (CoNEXT)}, journal = {In Proc. of CoNEXT}, abstract = {Though the impact of file-sharing of copyrighted content has been discussed for over a decade, only in the past few years have countries begun to adopt legislation to criminalize this behavior. These laws impose penalties ranging from warnings and monetary fines to disconnecting Internet service. While their supporters are quick to point out trends showing the efficacy of these laws at reducing use of file-sharing sites, their analyses rely on brief snapshots of activity that cannot reveal long- and short-term trends. In this paper, we introduce an approach to model user behavior based on a hidden Markov model and apply it to analyze a two-year-long user-level trace of download activity of over 38k users from around the world. This approach allows us to quantify the true impact of file- sharing laws on user behavior, identifying behavioral trends otherwise difficult to identify. For instance, despite an initial reduction in activity in New Zealand when a three-strikes law took effect, after two months activity had returned to the level observed prior to the law being enacted. Given that punishment results only at best on short-term compliance, we suggest that incentives-based approaches may be more effective at changing user behavior. }, keywords = {}, pubstate = {published}, tppubtype = {conference} } Though the impact of file-sharing of copyrighted content has been discussed for over a decade, only in the past few years have countries begun to adopt legislation to criminalize this behavior. These laws impose penalties ranging from warnings and monetary fines to disconnecting Internet service. While their supporters are quick to point out trends showing the efficacy of these laws at reducing use of file-sharing sites, their analyses rely on brief snapshots of activity that cannot reveal long- and short-term trends. In this paper, we introduce an approach to model user behavior based on a hidden Markov model and apply it to analyze a two-year-long user-level trace of download activity of over 38k users from around the world. This approach allows us to quantify the true impact of file- sharing laws on user behavior, identifying behavioral trends otherwise difficult to identify. For instance, despite an initial reduction in activity in New Zealand when a three-strikes law took effect, after two months activity had returned to the level observed prior to the law being enacted. Given that punishment results only at best on short-term compliance, we suggest that incentives-based approaches may be more effective at changing user behavior. |
Mario A. Sánchez, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger, Georgios Smaragdakis, Jeffrey Erman Internet Inter-Domain Traffic Estimation for the Outsider Journal Article In Proc. of IMC, 2014. @article{Domain, title = {Internet Inter-Domain Traffic Estimation for the Outsider}, author = {Mario A. Sánchez and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger and Georgios Smaragdakis and Jeffrey Erman}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/altps-camera-ready.pdf}, year = {2014}, date = {2014-11-09}, journal = {In Proc. of IMC}, abstract = {Characterizing the flow of Internet traffic is important in a wide range of contexts, from network engineering and application design to understanding the network impact of consumer demand and business relationships. Despite the growing interest, the nearly impossible task of collecting large-scale, Internet-wide traffic data has severely constrained the focus of traffic-related studies. In this paper, we introduce a novel approach to characterize inter-domain traffic by reusing large, publicly available traceroute datasets. Our approach builds on a simple insight -- the popularity of a route on the Internet can serve as an informative proxy for the volume of traffic it carries. It applies structural analysis to a dual-representation of the AS-level connectivity graph derived from available traceroute datasets. Drawing analogies with city grids and traffic, it adapts data transformations and metrics of route popularity from urban planning to serve as proxies for traffic volume. We call this approach Network Syntax, highlighting the connection to urban planning Space Syntax. We apply Network Syntax in the context of a global ISP and a large Internet eXchange Point and use ground-truth data to demonstrate the strong correlation (r^2 values of up to 0.9) between inter-domain traffic volume and the different proxy metrics. Working with these two network entities, we show the potential of Network Syntax for identifying critical links and inferring missing traffic matrix measurements.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Characterizing the flow of Internet traffic is important in a wide range of contexts, from network engineering and application design to understanding the network impact of consumer demand and business relationships. Despite the growing interest, the nearly impossible task of collecting large-scale, Internet-wide traffic data has severely constrained the focus of traffic-related studies. In this paper, we introduce a novel approach to characterize inter-domain traffic by reusing large, publicly available traceroute datasets. Our approach builds on a simple insight -- the popularity of a route on the Internet can serve as an informative proxy for the volume of traffic it carries. It applies structural analysis to a dual-representation of the AS-level connectivity graph derived from available traceroute datasets. Drawing analogies with city grids and traffic, it adapts data transformations and metrics of route popularity from urban planning to serve as proxies for traffic volume. We call this approach Network Syntax, highlighting the connection to urban planning Space Syntax. We apply Network Syntax in the context of a global ISP and a large Internet eXchange Point and use ground-truth data to demonstrate the strong correlation (r^2 values of up to 0.9) between inter-domain traffic volume and the different proxy metrics. Working with these two network entities, we show the potential of Network Syntax for identifying critical links and inferring missing traffic matrix measurements. |
Zachary S. Bischof, Fabián E. Bustamante, Rade Stanojevic Need, Want, or Can Afford - Broadband Markets and the Behavior of Users Journal Article In Proc. of IMC, 2014. @article{Behaviour, title = {Need, Want, or Can Afford - Broadband Markets and the Behavior of Users}, author = {Zachary S. Bischof and Fabián E. Bustamante and Rade Stanojevic}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/imc220-bischof.pdf}, year = {2014}, date = {2014-11-06}, journal = {In Proc. of IMC}, abstract = {We present the first study of broadband services in their broader context, evaluating the impact of service characteristics (such as capacity, latency and loss), their broadband pricing and user demand. We explore these relationships, beyond correlation, with the application of natural experiments. Most efforts on broadband service characterization have so far focused on performance and availability, yet we lack a clear understanding of how such services are being utilized and how their use is impacted by the particulars of the market. By analyzing over 23-months of data collected from 53,000 end hosts and residential gateways in 160 countries, along with a global survey of retail broadband plans, we empirically study the relationship between broadband service characteristics, pricing and demand. We show a strong correlation between capacity and demand, even though subscribers rarely fully utilize their links, but note a law of diminishing returns with relatively smaller increases in demand at higher capacities. Despite the fourfold increase in global IP traffic, we find that user demand on the network over a three year period remained constant for a given bandwidth capacity. We exploit natural experiments to examine the causality between these factors. The reported findings represent an important step towards understanding how user behavior, and the market features that shape it, affect broadband networks and the Internet at large.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present the first study of broadband services in their broader context, evaluating the impact of service characteristics (such as capacity, latency and loss), their broadband pricing and user demand. We explore these relationships, beyond correlation, with the application of natural experiments. Most efforts on broadband service characterization have so far focused on performance and availability, yet we lack a clear understanding of how such services are being utilized and how their use is impacted by the particulars of the market. By analyzing over 23-months of data collected from 53,000 end hosts and residential gateways in 160 countries, along with a global survey of retail broadband plans, we empirically study the relationship between broadband service characteristics, pricing and demand. We show a strong correlation between capacity and demand, even though subscribers rarely fully utilize their links, but note a law of diminishing returns with relatively smaller increases in demand at higher capacities. Despite the fourfold increase in global IP traffic, we find that user demand on the network over a three year period remained constant for a given bandwidth capacity. We exploit natural experiments to examine the causality between these factors. The reported findings represent an important step towards understanding how user behavior, and the market features that shape it, affect broadband networks and the Internet at large. |
John Rula, Fabián E. Bustamante Behind the Curtain - Cellular DNS and Content Replica Selection Journal Article In Proc. IMC, 2014. @article{DNSb, title = {Behind the Curtain - Cellular DNS and Content Replica Selection}, author = {John Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/dns.pdf}, year = {2014}, date = {2014-11-03}, journal = {In Proc. IMC}, abstract = {DNS plays a critical role in the performance of smartdevices within cellular networks. Besides name resolution, DNS is commonly relied upon for directing users to nearby content caches for better performance. In light of this, it is surprising how little is known about the structure of cellular DNS and its effectiveness as a client localization method. In this paper we take a close look at cellular network DNS and uncover several features of cellular DNS, such as cellular network opaqueness and client to resolver inconsistency, that make it unsuitable for client localization in modern cellular networks. We study these issues in two leading mobile network markets – US and South Korea – using a collection of over 340 volunteer devices to probe the DNS infrastructure of each client’s cellular provider. We show the extent of the problem with regards to replica selec- tion and compare its localization performance against public DNS alternatives. As a testament to cellular DNS’s poor localization, we find surprisingly that public DNS can render equal or better replica performance over 75% of the time.}, keywords = {}, pubstate = {published}, tppubtype = {article} } DNS plays a critical role in the performance of smartdevices within cellular networks. Besides name resolution, DNS is commonly relied upon for directing users to nearby content caches for better performance. In light of this, it is surprising how little is known about the structure of cellular DNS and its effectiveness as a client localization method. In this paper we take a close look at cellular network DNS and uncover several features of cellular DNS, such as cellular network opaqueness and client to resolver inconsistency, that make it unsuitable for client localization in modern cellular networks. We study these issues in two leading mobile network markets – US and South Korea – using a collection of over 340 volunteer devices to probe the DNS infrastructure of each client’s cellular provider. We show the extent of the problem with regards to replica selec- tion and compare its localization performance against public DNS alternatives. As a testament to cellular DNS’s poor localization, we find surprisingly that public DNS can render equal or better replica performance over 75% of the time. |
Arnau Gavaldà-Miralles, David R. Choffnes, John S. Otto, Mario A. Sánchez, Fabián E. Bustamante, Luís A. N. Amaral, Jordi Duch, Roger Guimerà Impact of heterogeneity and socieconomic factors on individual behavior in decentralized sharing ecosystems Journal Article Proc. of the National Academy of Science (early edition), 2014. @article{Decentralized, title = {Impact of heterogeneity and socieconomic factors on individual behavior in decentralized sharing ecosystems}, author = {Arnau Gavaldà-Miralles and David R. Choffnes and John S. Otto and Mario A. Sánchez and Fabián E. Bustamante and Luís A. N. Amaral and Jordi Duch and Roger Guimerà}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/PNAS-2014-Gavaldà-Miralles-1309389111.pdf}, year = {2014}, date = {2014-09-03}, journal = {Proc. of the National Academy of Science (early edition)}, abstract = {Tens of millions of individuals around the world use decentralized content distribution systems, a fact of growing social, economic, and technological importance. These sharing systems are poorly understood because, unlike in other technosocial systems, it is difficult to gather large-scale data about user behavior. Here, we investigate user activity patterns and the socioeconomic factors that could explain the behavior. Our analysis reveals that (i) the ecosystem is heterogeneous at several levels: content types are heterogeneous, users specialize in a few content types, and countries are heterogeneous in user profiles; and (ii) there is a strong correlation between socioeconomic indicators of a country and users behavior. Our findings open a research area on the dynamics of decentralized sharing ecosystems and the socioeconomic factors affecting them, and may have implications for the design of algorithms and for policymaking.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Tens of millions of individuals around the world use decentralized content distribution systems, a fact of growing social, economic, and technological importance. These sharing systems are poorly understood because, unlike in other technosocial systems, it is difficult to gather large-scale data about user behavior. Here, we investigate user activity patterns and the socioeconomic factors that could explain the behavior. Our analysis reveals that (i) the ecosystem is heterogeneous at several levels: content types are heterogeneous, users specialize in a few content types, and countries are heterogeneous in user profiles; and (ii) there is a strong correlation between socioeconomic indicators of a country and users behavior. Our findings open a research area on the dynamics of decentralized sharing ecosystems and the socioeconomic factors affecting them, and may have implications for the design of algorithms and for policymaking. |
Zachary S. Bischof, Fabián E. Bustamante A Time for Reliability – The Growing Importance of Being Always On Journal Article Poster in Proc. of ACM SIGCOMM, 2014. BibTeX | Links: @article{TRTGIBA, title = {A Time for Reliability – The Growing Importance of Being Always On}, author = {Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Bischof-Sigcomm-Poster-2014.pdf}, year = {2014}, date = {2014-08-09}, journal = {Poster in Proc. of ACM SIGCOMM}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Angela H. Jiang, Zachary S. Bischof, Fabián E. Bustamante A cliq of content curators Journal Article Poster in Proc. of ACM SIGCOMM, 2014. @article{CCC, title = {A cliq of content curators}, author = {Angela H. Jiang and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Jiang-Sigcomm-Poster-2014-1.pdf}, year = {2014}, date = {2014-08-06}, journal = {Poster in Proc. of ACM SIGCOMM}, abstract = {A social news site presents user-curated content, ranked by popularity. Popular curators like Reddit, or Facebook have become effective way of crowdsourcing news or sharing personal opinions. Traditionally, these services require a centralized authority to aggregate data and determine what to display. However, the trust issues that arise from a centralized system are particularly damaging to the "Web democracy" that social news sites are meant to provide. We present cliq, a decentralized social news curator. cliq is a P2P based social news curator that provides private and unbiased reporting. All users in cliq share responsibility for tracking and providing popular content. Any user data that cliq needs to store is also managed across the network. We first inform our design of cliq through an analysis of Reddit. We design a way to provide content curation without a persistent moderator, or usernames. }, keywords = {}, pubstate = {published}, tppubtype = {article} } A social news site presents user-curated content, ranked by popularity. Popular curators like Reddit, or Facebook have become effective way of crowdsourcing news or sharing personal opinions. Traditionally, these services require a centralized authority to aggregate data and determine what to display. However, the trust issues that arise from a centralized system are particularly damaging to the "Web democracy" that social news sites are meant to provide. We present cliq, a decentralized social news curator. cliq is a P2P based social news curator that provides private and unbiased reporting. All users in cliq share responsibility for tracking and providing popular content. Any user data that cliq needs to store is also managed across the network. We first inform our design of cliq through an analysis of Reddit. We design a way to provide content curation without a persistent moderator, or usernames. |
John Rula, Fabián E. Bustamante Behind the Curtain: The importance of replica selection in next generation cellular networks Journal Article Poster in ACM Sigcomm, 2014. @article{Mobilec, title = {Behind the Curtain: The importance of replica selection in next generation cellular networks}, author = {John Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/dns-poster.pdf}, year = {2014}, date = {2014-08-03}, journal = {Poster in ACM Sigcomm}, abstract = {Smartdevices are becoming the primary or only Internet point of access for an ever larger fraction of the population. Nearly a quarter of current web traffic is mobile, and recent industry studies have estimated a fourfold increase on global mobile data traffic by 2018, mainly driven by the content demands and growing number of smart phones and tablets [2]. The most recent CISCO VNI report estimates that by 2018, the majority of North America devices and connections will have 4G capability and, while 4G will be 15% of world-wide connections then, these connections will be responsible for 51% of traffic. Cellular networks pose a challenge to content delivery networks (CDNs) given their opaque network structure, limited number of ingress points, and obfuscated DNS infrastructure. Previously, large cellular radio latencies meant CDN replica selection had little impact on the total end-to-end latency. However, the advancement of 4G networks such as LTE has lowered mobile device access latency to make it comparable with many existing broadband services, making the choice of content replica server a significant contributor to end-to-end performance. In general, but particularly in cellular networks, CDNs have limited signals for locating clients. Mobile IPs have been shown to be dynamic for mobile end hosts [1], and external entities such as CDNs are prevented from probing their mobile clients or their infrastructure by NAT and firewall policies implemented by cellular operators. In this poster, we present preliminary work looking at the impact of replica selection in next generation cellular networks. Using a collection of over 250 mobile end-hosts over a two-month period, we explore CDN replica selection in cellular networks measuring the latency to content replicas for a selection of popular mobile websites. We find that clients in next generation radio technologies can see up to 400% differences in latency to selected replicas. We discover that, in large part, these poor selections are due to current localization approaches employed by CDNs such as DNS redirection which, while fairly effective in wired hosts, performs rather poorly within cellular networks mainly due to cellular DNS behavior. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Smartdevices are becoming the primary or only Internet point of access for an ever larger fraction of the population. Nearly a quarter of current web traffic is mobile, and recent industry studies have estimated a fourfold increase on global mobile data traffic by 2018, mainly driven by the content demands and growing number of smart phones and tablets [2]. The most recent CISCO VNI report estimates that by 2018, the majority of North America devices and connections will have 4G capability and, while 4G will be 15% of world-wide connections then, these connections will be responsible for 51% of traffic. Cellular networks pose a challenge to content delivery networks (CDNs) given their opaque network structure, limited number of ingress points, and obfuscated DNS infrastructure. Previously, large cellular radio latencies meant CDN replica selection had little impact on the total end-to-end latency. However, the advancement of 4G networks such as LTE has lowered mobile device access latency to make it comparable with many existing broadband services, making the choice of content replica server a significant contributor to end-to-end performance. In general, but particularly in cellular networks, CDNs have limited signals for locating clients. Mobile IPs have been shown to be dynamic for mobile end hosts [1], and external entities such as CDNs are prevented from probing their mobile clients or their infrastructure by NAT and firewall policies implemented by cellular operators. In this poster, we present preliminary work looking at the impact of replica selection in next generation cellular networks. Using a collection of over 250 mobile end-hosts over a two-month period, we explore CDN replica selection in cellular networks measuring the latency to content replicas for a selection of popular mobile websites. We find that clients in next generation radio technologies can see up to 400% differences in latency to selected replicas. We discover that, in large part, these poor selections are due to current localization approaches employed by CDNs such as DNS redirection which, while fairly effective in wired hosts, performs rather poorly within cellular networks mainly due to cellular DNS behavior. |
John P. Rula, Vishnu Navda, Fabián E. Bustamante, Ranjita Bhagwan, Saikat Guha No "One-size fits all": Towards a principled approach for incentives in mobile crowdsourcing Journal Article In Proc. of the Fifteenth Workshop on Mobile Computing Systems and Applications (HotMobile), 2014. @article{Mobileb, title = {No "One-size fits all": Towards a principled approach for incentives in mobile crowdsourcing}, author = {John P. Rula and Vishnu Navda and Fabián E. Bustamante and Ranjita Bhagwan and Saikat Guha}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/incentives_hotmobile.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/hotmobile_2014.pdf}, year = {2014}, date = {2014-02-03}, journal = {In Proc. of the Fifteenth Workshop on Mobile Computing Systems and Applications (HotMobile)}, abstract = {We are becoming increasingly aware that the effectiveness of mobile crowdsourcing systems critically depends on the whims of their human participants, impacting everything from participant engagement to their compliance with the crowdsourced tasks. In response, a number of such systems have started to incorpo- rate different incentive features aimed at a wide range of goals that span from improving participation levels, to extending the systems’ coverage, and enhancing the quality of the collected data. Despite the many related efforts, the inclusion of incentives in crowdsourced systems has so far been mostly ad-hoc, treating incentives as a wild-card response fitted for any occasion and goal. Using data from a large, 2-day experiment with 96 participants at a corporate conference, we present an analysis of the impact of two incentive structures on the recruitment, compliance and user effort of a basic mobile crowdsourced service. We build on these preliminary results to argue for a principled approach for selecting incentive and incentive structures to match the variety of requirements of mobile crowdsourcing applications and discuss key issues in working toward that goal.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We are becoming increasingly aware that the effectiveness of mobile crowdsourcing systems critically depends on the whims of their human participants, impacting everything from participant engagement to their compliance with the crowdsourced tasks. In response, a number of such systems have started to incorpo- rate different incentive features aimed at a wide range of goals that span from improving participation levels, to extending the systems’ coverage, and enhancing the quality of the collected data. Despite the many related efforts, the inclusion of incentives in crowdsourced systems has so far been mostly ad-hoc, treating incentives as a wild-card response fitted for any occasion and goal. Using data from a large, 2-day experiment with 96 participants at a corporate conference, we present an analysis of the impact of two incentive structures on the recruitment, compliance and user effort of a basic mobile crowdsourced service. We build on these preliminary results to argue for a principled approach for selecting incentive and incentive structures to match the variety of requirements of mobile crowdsourcing applications and discuss key issues in working toward that goal. |
2013 |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger Dasu: A measurement experimentation platform at the Internet’s edge Technical Report Department of Computer Science, Northwestern University ( NWU-EECS-13-09), 2013. @techreport{DASUe, title = { Dasu: A measurement experimentation platform at the Internet’s edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/NU-EECS-13-09.pdf}, year = {2013}, date = {2013-09-06}, number = { NWU-EECS-13-09}, institution = {Department of Computer Science, Northwestern University}, abstract = {Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent well-known problem has served as motivation for numerous proposals to build or extend existing platforms by recruiting larger, more diverse vantage points. However, capturing the edge of the network remains an elusive goal. We argue that at its root the problem is one of incentives. Today’s measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present Dasu, a measurement experimentation platform built on an alternate model that explicitly aligns the objectives of the experimenters with those of the users hosting the platform. Dasu is designed to support both network measurement experimentation and broadband characterization. In this paper, we discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective our current deployment brings to Internet measurement. Dasu has been publicly available since July 2010 and is currently in use by over 95,000 users with a heterogeneous set of connections spreading across 1,802 networks and 151 countries. }, keywords = {}, pubstate = {published}, tppubtype = {techreport} } Poor visibility into the network hampers progress in a number of important research areas, from network troubleshooting to Internet topology and performance mapping. This persistent well-known problem has served as motivation for numerous proposals to build or extend existing platforms by recruiting larger, more diverse vantage points. However, capturing the edge of the network remains an elusive goal. We argue that at its root the problem is one of incentives. Today’s measurement platforms build on the assumption that the goals of experimenters and those hosting the platform are the same. As much of the Internet growth occurs in residential broadband networks, this assumption no longer holds. We present Dasu, a measurement experimentation platform built on an alternate model that explicitly aligns the objectives of the experimenters with those of the users hosting the platform. Dasu is designed to support both network measurement experimentation and broadband characterization. In this paper, we discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective our current deployment brings to Internet measurement. Dasu has been publicly available since July 2010 and is currently in use by over 95,000 users with a heterogeneous set of connections spreading across 1,802 networks and 151 countries. |
John S. Otto, Fabián E. Bustamante The hidden locality in swarms Journal Article In Proc. of IEEE P2P, 2013. @article{Swarms, title = {The hidden locality in swarms}, author = {John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/P2P2013Otto.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/P2P2013Otto-slides.pptx}, year = {2013}, date = {2013-09-03}, journal = {In Proc. of IEEE P2P}, abstract = {People use P2P systems such as BitTorrent to share an unprecedented variety and amount of content with others around the world. The random connection pattern used by BitTorrent has been shown to result in reduced performance for users and costly cross-ISP traffic. Although several client-side systems have been proposed to improve the locality of BitTorrent traffic, their effectiveness is limited by the availability of local peers. We show that sufficient locality is present in swarms -- if one looks at the right time. We find that 50% of ISPs have at least five local peers online during the ISP's peak hour, typically in the evening, compared to only 20% of ISPs during the median hour. To better discover these local peers, we show how to increase the overall peer discovery rate by over two orders of magnitude using client-side techniques: leveraging additional trackers, requesting more peers per sample, and sampling more frequently. We propose an approach to predict future availability of local peers based on observed diurnal patterns. This approach enables peers to selectively apply these techniques to minimize undue load on trackers.}, keywords = {}, pubstate = {published}, tppubtype = {article} } People use P2P systems such as BitTorrent to share an unprecedented variety and amount of content with others around the world. The random connection pattern used by BitTorrent has been shown to result in reduced performance for users and costly cross-ISP traffic. Although several client-side systems have been proposed to improve the locality of BitTorrent traffic, their effectiveness is limited by the availability of local peers. We show that sufficient locality is present in swarms -- if one looks at the right time. We find that 50% of ISPs have at least five local peers online during the ISP's peak hour, typically in the evening, compared to only 20% of ISPs during the median hour. To better discover these local peers, we show how to increase the overall peer discovery rate by over two orders of magnitude using client-side techniques: leveraging additional trackers, requesting more peers per sample, and sampling more frequently. We propose an approach to predict future availability of local peers based on observed diurnal patterns. This approach enables peers to selectively apply these techniques to minimize undue load on trackers. |
Zachary S. Bischof, Mario A. Sánchez, John S. Otto, John P. Rula, Fabián E. Bustamante Characterizing Broadband Services with Dasu Journal Article Demonstration at USENIX NSDI, 2013. @article{DASUd, title = {Characterizing Broadband Services with Dasu}, author = {Zachary S. Bischof and Mario A. Sánchez and John S. Otto and John P. Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi13-broadband-demo.pdf}, year = {2013}, date = {2013-04-09}, journal = {Demonstration at USENIX NSDI}, abstract = {We present the broadband characterization functionality of Dasu, showcase its user-interface, and include side-by-side comparisons of competing broadband services. This poster complements Sánchez et al. (appearing in NSDI) and its related demo submission; both focus on the design and implementation of Dasu as an experimental platform. As mentioned in the NSDI work, Dasu partially relies on service characterization as incentive for adoption. This side of Dasu is a prototype implementation of our crowdsourced-based, end-system approach to broadband characterization. By leveraging monitoring information from local hosts and home routers, our approach can attain scalability, continuity and end-user perspective while avoiding the potential pitfalls of similar models.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present the broadband characterization functionality of Dasu, showcase its user-interface, and include side-by-side comparisons of competing broadband services. This poster complements Sánchez et al. (appearing in NSDI) and its related demo submission; both focus on the design and implementation of Dasu as an experimental platform. As mentioned in the NSDI work, Dasu partially relies on service characterization as incentive for adoption. This side of Dasu is a prototype implementation of our crowdsourced-based, end-system approach to broadband characterization. By leveraging monitoring information from local hosts and home routers, our approach can attain scalability, continuity and end-user perspective while avoiding the potential pitfalls of similar models. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Walter Willinger Experiments at the Internet's Edge with Dasu Journal Article Demonstration at USENIX NSDI, 2013. @article{DASUc, title = {Experiments at the Internet's Edge with Dasu}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/nsdi13-dasu-experiment-demo.pdf}, year = {2013}, date = {2013-04-06}, journal = {Demonstration at USENIX NSDI}, abstract = {Dasu is an extensible measurement experimentation platform for the Internet's edge. Dasu is composed of a distributed collection of clients, hosted by participating end hosts, and a core set of services for managing and coordinating experimentation. Dasu supports and builds on broadband characterization as an incentive for adoption to capture the network and service diversity of the commercial Internet. This demo presents Dasu in action, focusing on its experiment delegation mechanism and showing how it enables third-party experimentation and maintains security and accountability.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Dasu is an extensible measurement experimentation platform for the Internet's edge. Dasu is composed of a distributed collection of clients, hosted by participating end hosts, and a core set of services for managing and coordinating experimentation. Dasu supports and builds on broadband characterization as an incentive for adoption to capture the network and service diversity of the commercial Internet. This demo presents Dasu in action, focusing on its experiment delegation mechanism and showing how it enables third-party experimentation and maintains security and accountability. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, David R. Choffnes, Fabián E. Bustamante, Balachander Krishnamurthy, Walter Willinger Dasu: Pushing Experiments to the Internet's Edge Journal Article In Proc. of the USENIX Symposium on Networked Systems Design and Implementation (NSDI), 2013. @article{DASUb, title = { Dasu: Pushing Experiments to the Internet's Edge}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and David R. Choffnes and Fabián E. Bustamante and Balachander Krishnamurthy and Walter Willinger}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/dasu-measurement.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Sanchez2013NSDISlides.pdf}, year = {2013}, date = {2013-04-03}, journal = {In Proc. of the USENIX Symposium on Networked Systems Design and Implementation (NSDI)}, abstract = {We present Dasu, a measurement experimentation platform for the Internet’s edge. Dasu supports both controlled network experimentation and broadband char- acterization, building on public interest on the latter to gain the adoption necessary for the former. We discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective it brings to Internet measurement. Dasu has been publicly available since July 2010 and has been installed by over 90,000 users with a heterogeneous set of connections spreading across 1,802 networks and 147 countries.}, keywords = {}, pubstate = {published}, tppubtype = {article} } We present Dasu, a measurement experimentation platform for the Internet’s edge. Dasu supports both controlled network experimentation and broadband char- acterization, building on public interest on the latter to gain the adoption necessary for the former. We discuss some of the challenges we faced building a platform for the Internet’s edge, describe our current design and implementation, and illustrate the unique perspective it brings to Internet measurement. Dasu has been publicly available since July 2010 and has been installed by over 90,000 users with a heterogeneous set of connections spreading across 1,802 networks and 147 countries. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, Fabián E. Bustamante Trying Broadband Characterization at Home Journal Article In Proc. of the Passive and Active Measurement Conference (PAM), 2013. @article{TBCH, title = {Trying Broadband Characterization at Home}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/pam-upnp.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Slides.pdf}, year = {2013}, date = {2013-03-03}, journal = {In Proc. of the Passive and Active Measurement Conference (PAM)}, abstract = {In recent years the quantity and diversity of Internet-enabled consumer devices in the home have increased significantly. These trends complicate device usability and home resource management and have implications for crowdsourced approaches to broadband characterization. The UPnP protocol has emerged as an open standard for device and service discovery to simplify device usability and resource management in home networks. In this work, we leverage UPnP to understand the dynamics of home device usage, both at a macro and micro level, and to sketch an effective approach to broadband characterization that runs behind the last meter. Using UPnP measurements collected from over 13K end users, we show that while home networks can be quite complex, the number of devices that actively and regularly connect to the Internet is limited. Furthermore, we find a high correlation between the number of UPnP-enabled devices in home networks and the presence of UPnP-enabled gateways, and show how this can be leveraged for effective broadband characterization.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In recent years the quantity and diversity of Internet-enabled consumer devices in the home have increased significantly. These trends complicate device usability and home resource management and have implications for crowdsourced approaches to broadband characterization. The UPnP protocol has emerged as an open standard for device and service discovery to simplify device usability and resource management in home networks. In this work, we leverage UPnP to understand the dynamics of home device usage, both at a macro and micro level, and to sketch an effective approach to broadband characterization that runs behind the last meter. Using UPnP measurements collected from over 13K end users, we show that while home networks can be quite complex, the number of devices that actively and regularly connect to the Internet is limited. Furthermore, we find a high correlation between the number of UPnP-enabled devices in home networks and the presence of UPnP-enabled gateways, and show how this can be leveraged for effective broadband characterization. |
2012 |
John S. Otto, Mario A. Sánchez, John P. Rula, Fabián E. Bustamante Content delivery and the natural evolution of DNS - Remote DNS Trends, Performance Issues and Alternative Solutions Journal Article In Proc. of IMC, 2012. @article{CDNEDNS, title = {Content delivery and the natural evolution of DNS - Remote DNS Trends, Performance Issues and Alternative Solutions}, author = {John S. Otto and Mario A. Sánchez and John P. Rula and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/OttoIMC2012.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/Otto2012IMCSlides.pptx}, year = {2012}, date = {2012-11-03}, journal = {In Proc. of IMC}, abstract = {Content Delivery Networks (CDNs) rely on the Domain Name System (DNS) for replica server selection. DNS-based server selection builds on the assumption that, in the absence of information about the client's actual network location, the location of a client's DNS resolver provides a good approximation. The recent growth of remote DNS services breaks this assumption and can negatively impact client's web performance. In this paper, we assess the end-to-end impact of using remote DNS services on CDN performance and present the first evaluation of an industry-proposed solution to the problem. We find that remote DNS usage can indeed significantly impact client's web performance and that the proposed solution, if available, can effectively address the problem for most clients. Considering the performance cost of remote DNS usage and the limited adoption base of the industry-proposed solution, we present and evaluate an alternative approach, Direct Resolution, to readily obtain comparable performance improvements without requiring CDN or DNS participation. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Content Delivery Networks (CDNs) rely on the Domain Name System (DNS) for replica server selection. DNS-based server selection builds on the assumption that, in the absence of information about the client's actual network location, the location of a client's DNS resolver provides a good approximation. The recent growth of remote DNS services breaks this assumption and can negatively impact client's web performance. In this paper, we assess the end-to-end impact of using remote DNS services on CDN performance and present the first evaluation of an industry-proposed solution to the problem. We find that remote DNS usage can indeed significantly impact client's web performance and that the proposed solution, if available, can effectively address the problem for most clients. Considering the performance cost of remote DNS usage and the limited adoption base of the industry-proposed solution, we present and evaluate an alternative approach, Direct Resolution, to readily obtain comparable performance improvements without requiring CDN or DNS participation. |
Zachary S. Bischof, John S. Otto, Fabián E. Bustamante Up, Down and Around the Stack: ISP Characterization from Network Intensive Applications Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2012. @article{UDS, title = {Up, Down and Around the Stack: ISP Characterization from Network Intensive Applications}, author = {Zachary S. Bischof and John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof12WMUST.pdf}, year = {2012}, date = {2012-10-06}, journal = {In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST)}, abstract = {Broadband characterization has recently attracted much attention from the research community and the general public. Given the important business and policy implications of residential Internet service characterization, recent years have brought a variety of approaches to profiling Internet services, ranging from Web-based platforms to dedicated infrastructure inside home networks. We have previously argued that network-intensive applications provide an almost ideal vantage point for broadband characterization at sufficient scale, nearly continuously and from end users. While we have shown that the approach is indeed effective at service characterization and can enable performance comparisons between service providers and geographic regions, a key unanswered question is how well the performance characteristics captured by these systems can predict the overall user experience with different applications. In this paper, using BitTorrent as an example host application, we present initial results that demonstrate how to obtain estimates of bandwidth and latency of a network connection by leveraging passive monitoring and limited active measurements from network intensive applications. We then analyze user experienced web performance under a variety of network conditions and show how estimates from a network intensive application can serve as good web performance predictors.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Broadband characterization has recently attracted much attention from the research community and the general public. Given the important business and policy implications of residential Internet service characterization, recent years have brought a variety of approaches to profiling Internet services, ranging from Web-based platforms to dedicated infrastructure inside home networks. We have previously argued that network-intensive applications provide an almost ideal vantage point for broadband characterization at sufficient scale, nearly continuously and from end users. While we have shown that the approach is indeed effective at service characterization and can enable performance comparisons between service providers and geographic regions, a key unanswered question is how well the performance characteristics captured by these systems can predict the overall user experience with different applications. In this paper, using BitTorrent as an example host application, we present initial results that demonstrate how to obtain estimates of bandwidth and latency of a network connection by leveraging passive monitoring and limited active measurements from network intensive applications. We then analyze user experienced web performance under a variety of network conditions and show how estimates from a network intensive application can serve as good web performance predictors. |
John S. Otto, Mario A. Sánchez, John P. Rula, Ted Stein, Fabián E. Bustamante namehelp: intelligent client-side DNS resolution Journal Article In ACM SIGCOMM CCR Special Issue, 42 (4), 2012. @article{namehelp, title = {namehelp: intelligent client-side DNS resolution}, author = {John S. Otto and Mario A. Sánchez and John P. Rula and Ted Stein and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/OttoSigcommPoster2012.pdf}, year = {2012}, date = {2012-10-03}, journal = {In ACM SIGCOMM CCR Special Issue}, volume = {42}, number = {4}, abstract = {The Domain Name System (DNS) is a fundamental component of today’s Internet. Recent years have seen radical changes to DNS with increases in usage of remote DNS and public DNS services such as OpenDNS. Given the close relationship between DNS and Content Delivery Networks (CDNs) and the pervasive use of CDNs by many popular applications including web browsing and real-time entertainment services, it is important to understand the impact of remote and public DNS services on users’ overall experience on the Web. This work presents a tool, namehelp, which comparatively evaluates DNS services in terms of the web performance they provide, and implements an end-host solution to address the performance impact of remote DNS on CDNs. The demonstration will show the functionality of namehelp with online results for its performance improvements.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The Domain Name System (DNS) is a fundamental component of today’s Internet. Recent years have seen radical changes to DNS with increases in usage of remote DNS and public DNS services such as OpenDNS. Given the close relationship between DNS and Content Delivery Networks (CDNs) and the pervasive use of CDNs by many popular applications including web browsing and real-time entertainment services, it is important to understand the impact of remote and public DNS services on users’ overall experience on the Web. This work presents a tool, namehelp, which comparatively evaluates DNS services in terms of the web performance they provide, and implements an end-host solution to address the performance impact of remote DNS on CDNs. The demonstration will show the functionality of namehelp with online results for its performance improvements. |
John Rula, Fabián E. Bustamante Crowd (Soft) Control: Moving Beyond the Opportunistic Journal Article In Proc. of the Thirteenth Workshop on Mobile Computing Systems and Applications (HotMobile), 2012. @article{CCMBO, title = {Crowd (Soft) Control: Moving Beyond the Opportunistic}, author = {John Rula and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula12HotMobile.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JRula12HotMobileSlides.pdf }, year = {2012}, date = {2012-02-03}, journal = {In Proc. of the Thirteenth Workshop on Mobile Computing Systems and Applications (HotMobile)}, abstract = {A number of novel wireless networked services, ranging from participatory sensing to social networking, leverage the increasing capabilities of mobile devices and the movement of the individuals carrying them. For many of these systems, their effectiveness fundamentally depends on coverage and the particular mobility patterns of the participants. Given the strong spatial and temporal regularity of human mobility, the needed coverage can typically only be attained through a large participant base. In this paper we explore an alternative approach to attain coverage without scale -- (soft) controlling the movement of participants. We present Crowd Soft Control (CSC), an approach to exert limited control over the temporal and spatial movements of mobile users by leveraging the built-in incentives of location-based gaming and social applications. By pairing network services with these location-based apps, CSC allows researchers to use an application's incentives (e.g. games objectives) to control the movement of participating users, increasing the effectiveness and efficiency of the associated network service. After outlining the case for Crowd Soft Control, we present an initial prototype of our ideas and discuss potential benefits and costs in the context of two case studies. }, keywords = {}, pubstate = {published}, tppubtype = {article} } A number of novel wireless networked services, ranging from participatory sensing to social networking, leverage the increasing capabilities of mobile devices and the movement of the individuals carrying them. For many of these systems, their effectiveness fundamentally depends on coverage and the particular mobility patterns of the participants. Given the strong spatial and temporal regularity of human mobility, the needed coverage can typically only be attained through a large participant base. In this paper we explore an alternative approach to attain coverage without scale -- (soft) controlling the movement of participants. We present Crowd Soft Control (CSC), an approach to exert limited control over the temporal and spatial movements of mobile users by leveraging the built-in incentives of location-based gaming and social applications. By pairing network services with these location-based apps, CSC allows researchers to use an application's incentives (e.g. games objectives) to control the movement of participating users, increasing the effectiveness and efficiency of the associated network service. After outlining the case for Crowd Soft Control, we present an initial prototype of our ideas and discuss potential benefits and costs in the context of two case studies. |
2011 |
Zachary S. Bischof, John S. Otto, Fabián E. Bustamante Distributed Systems and Natural Disasters -- BitTorrent as a Global Witness Journal Article In Proc. of CoNEXT Special Workshop on the Internet and Disasters (SWID), 2011. @article{DSND, title = {Distributed Systems and Natural Disasters -- BitTorrent as a Global Witness}, author = {Zachary S. Bischof and John S. Otto and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11SWID.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11SWID_Slides.pdf}, year = {2011}, date = {2011-12-03}, journal = {In Proc. of CoNEXT Special Workshop on the Internet and Disasters (SWID)}, abstract = {Peer-to-peer (P2P) systems represent some of the largest distributed systems in today's Internet. Among P2P systems, BitTorrent is the most popular, potentially accounting for 20-50% of P2P file-sharing traffic. In this paper, we argue that this popularity can be leveraged to monitor the impact of natural disasters and political unrest on the Internet. We focus our analysis on the 2011 Tohoku earthquake and tsunami and use a view from BitTorrent to show that it is possible to identify specific regions and network links where Internet usage and connectivity were most affected.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Peer-to-peer (P2P) systems represent some of the largest distributed systems in today's Internet. Among P2P systems, BitTorrent is the most popular, potentially accounting for 20-50% of P2P file-sharing traffic. In this paper, we argue that this popularity can be leveraged to monitor the impact of natural disasters and political unrest on the Internet. We focus our analysis on the 2011 Tohoku earthquake and tsunami and use a view from BitTorrent to show that it is possible to identify specific regions and network links where Internet usage and connectivity were most affected. |
Zachary S. Bischof, John S. Otto, Mario A. Sánchez, John P. Rula, David R. Choffnes, Fabián E. Bustamante Crowdsourcing ISP Characterization to the Network Edge Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2011. @article{CISPCNE, title = {Crowdsourcing ISP Characterization to the Network Edge}, author = {Zachary S. Bischof and John S. Otto and Mario A. Sánchez and John P. Rula and David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11WMUST.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ZBischof11WMUST_Slides.pdf}, year = {2011}, date = {2011-08-09}, journal = {In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST)}, abstract = {Evaluating and characterizing Internet Service Providers (ISPs) is critical to subscribers shopping for alternative ISPs, companies providing reliable Internet services, and governments surveying the coverage of broadband services to its citizens. Ideally, ISP characterization should be done at scale, continuously, and from end users. While there has been significant progress toward this end, current approaches exhibit apparently unavoidable tradeoffs between coverage, continuous monitoring and capturing user- perceived performance. In this paper, we argue that network-intensive applications running on end systems avoid these tradeoffs, thereby offering an ideal platform for ISP characterization. Based on data collected from 500,000 peer-to-peer BitTorrent users across 3,150 networks, together with the reported results from the U.K. Ofcom/SamKnows studies, we show the feasibility of this approach to characterize the service that subscribers can expect from a particular ISP. We discuss remaining research challenges and design requirements for a solution that enables efficient and accurate ISP characterization at an Internet scale. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Evaluating and characterizing Internet Service Providers (ISPs) is critical to subscribers shopping for alternative ISPs, companies providing reliable Internet services, and governments surveying the coverage of broadband services to its citizens. Ideally, ISP characterization should be done at scale, continuously, and from end users. While there has been significant progress toward this end, current approaches exhibit apparently unavoidable tradeoffs between coverage, continuous monitoring and capturing user- perceived performance. In this paper, we argue that network-intensive applications running on end systems avoid these tradeoffs, thereby offering an ideal platform for ISP characterization. Based on data collected from 500,000 peer-to-peer BitTorrent users across 3,150 networks, together with the reported results from the U.K. Ofcom/SamKnows studies, we show the feasibility of this approach to characterize the service that subscribers can expect from a particular ISP. We discuss remaining research challenges and design requirements for a solution that enables efficient and accurate ISP characterization at an Internet scale. |
John S. Otto, Mario A. Sánchez, David R. Choffnes, Fabián E. Bustamante, Georgos Siganos On Blind Mice and the Elephant -- Understanding the Network Impact of a Large Distributed System Journal Article In Proc. of ACM SIGCOMM, 2011. @article{BME, title = {On Blind Mice and the Elephant -- Understanding the Network Impact of a Large Distributed System}, author = {John S. Otto and Mario A. Sánchez and David R. Choffnes and Fabián E. Bustamante and Georgos Siganos}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/JOtto11SIGCOMM.pdf http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/OttoSigcomm2011.pptx}, year = {2011}, date = {2011-08-06}, journal = {In Proc. of ACM SIGCOMM}, abstract = {A thorough understanding of the network impact of emerging large-scale distributed systems -- where traffic flows and what it costs -- must encompass users' behavior, the traffic they generate and the topology over which that traffic flows. In the case of BitTorrent, however, previous studies have been limited by narrow perspectives that restrict such analysis. This paper presents a comprehensive view of BitTorrent, using data from a representative set of 500,000 users sampled over a two year period, located in 169 countries and 3,150 networks. This unique perspective captures unseen trends and reveals several unexpected features of the largest peer-to-peer system. For instance, over the past year total BitTorrent traffic has increased by 12%, driven by 25% increases in per-peer hourly download volume despite a 10% decrease in the average number of online peers. We also observe stronger diurnal usage patterns and, surprisingly given the bandwidth-intensive nature of the application, a close alignment between these patterns and overall traffic. Considering the aggregated traffic across access links, this has potential implications on BitTorrent-associated costs for Internet Service Providers (ISPs). Using data from a transit ISP, we find a disproportionately large impact under a commonly used burstable (95th-percentile) billing model. Last, when examining BitTorrent traffic's paths, we find that for over half its users, most network traffic never reaches large transit networks, but is instead carried by small transit ISPs. This raises questions on the effectiveness of most in-network monitoring systems to capture trends on peer-to-peer traffic and further motivates our approach.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A thorough understanding of the network impact of emerging large-scale distributed systems -- where traffic flows and what it costs -- must encompass users' behavior, the traffic they generate and the topology over which that traffic flows. In the case of BitTorrent, however, previous studies have been limited by narrow perspectives that restrict such analysis. This paper presents a comprehensive view of BitTorrent, using data from a representative set of 500,000 users sampled over a two year period, located in 169 countries and 3,150 networks. This unique perspective captures unseen trends and reveals several unexpected features of the largest peer-to-peer system. For instance, over the past year total BitTorrent traffic has increased by 12%, driven by 25% increases in per-peer hourly download volume despite a 10% decrease in the average number of online peers. We also observe stronger diurnal usage patterns and, surprisingly given the bandwidth-intensive nature of the application, a close alignment between these patterns and overall traffic. Considering the aggregated traffic across access links, this has potential implications on BitTorrent-associated costs for Internet Service Providers (ISPs). Using data from a transit ISP, we find a disproportionately large impact under a commonly used burstable (95th-percentile) billing model. Last, when examining BitTorrent traffic's paths, we find that for over half its users, most network traffic never reaches large transit networks, but is instead carried by small transit ISPs. This raises questions on the effectiveness of most in-network monitoring systems to capture trends on peer-to-peer traffic and further motivates our approach. |
Mario A. Sánchez, John S. Otto, Zachary S. Bischof, Fabián E. Bustamante Dasu - ISP Characterization from the Edge: A BitTorrent Implementation Journal Article Demo in Proc. of ACM SIGCOMM, 2011. @article{DASU, title = {Dasu - ISP Characterization from the Edge: A BitTorrent Implementation}, author = {Mario A. Sánchez and John S. Otto and Zachary S. Bischof and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/MSanchez11SIGCOMM.pdf}, year = {2011}, date = {2011-08-03}, journal = {Demo in Proc. of ACM SIGCOMM}, abstract = {Evaluating and characterizing access ISPs is critical to consumers shopping for alternative services and governments surveying the availability of broadband services to their citizens. We present Dasu, a service for crowdsourcing ISP characterization to the edge of the network. Dasu is implemented as an extension to a popular BitTorrent client and has been available since July 2010. While the prototype uses BitTorrent as its host application, its design is agnostic to the particular host application. The demo showcases our current implementation using both a prerecorded execution trace and a live run.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Evaluating and characterizing access ISPs is critical to consumers shopping for alternative services and governments surveying the availability of broadband services to their citizens. We present Dasu, a service for crowdsourcing ISP characterization to the edge of the network. Dasu is implemented as an extension to a popular BitTorrent client and has been available since July 2010. While the prototype uses BitTorrent as its host application, its design is agnostic to the particular host application. The demo showcases our current implementation using both a prerecorded execution trace and a live run. |
2010 |
Gareth Bennett, Eoin A. King, Jan Curn, Vinny Cahill, Fabián E. Bustamante, Henry J. Rice Environmental Noise Mapping Using Measurements in Transit Journal Article In Proc. of the International Conference on Noise and Vibration Engineering (ISMA), 2010. @article{ENMUMT, title = {Environmental Noise Mapping Using Measurements in Transit}, author = {Gareth Bennett and Eoin A. King and Jan Curn and Vinny Cahill and Fabián E. Bustamante and Henry J. Rice}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/ISMA10-Bennett.pdf}, year = {2010}, date = {2010-09-03}, journal = {In Proc. of the International Conference on Noise and Vibration Engineering (ISMA)}, abstract = {Due to the ever increasing level of environmental noise that the EU population is exposed to, all countries are directed to disseminate community noise level exposures to the public in accordance with EU Directive 2002/49/EC. Environmental noise maps are used for this purpose and as a means to avoid, prevent or reduce the harmful effects caused by exposure to environmental noise. There is no common standard to which these maps are generated in the EU and indeed these maps are in most cases inaccurate due to poorly informed predictive models. This paper develops a novel environmental noise monitoring methodology which will allow accurate road noise measurements to replace erroneous source model approximations in the generation of noise maps. The approach proposes the acquisition of sound levels and position coordinates by instrumented vehicles such as bicycles or cars or by pedestrians equipped with a Smartphone. The accumulation of large amounts of data over time will result in extremely high spatial and temporal resolution resulting in an accurate measurement of environmental noise.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Due to the ever increasing level of environmental noise that the EU population is exposed to, all countries are directed to disseminate community noise level exposures to the public in accordance with EU Directive 2002/49/EC. Environmental noise maps are used for this purpose and as a means to avoid, prevent or reduce the harmful effects caused by exposure to environmental noise. There is no common standard to which these maps are generated in the EU and indeed these maps are in most cases inaccurate due to poorly informed predictive models. This paper develops a novel environmental noise monitoring methodology which will allow accurate road noise measurements to replace erroneous source model approximations in the generation of noise maps. The approach proposes the acquisition of sound levels and position coordinates by instrumented vehicles such as bicycles or cars or by pedestrians equipped with a Smartphone. The accumulation of large amounts of data over time will result in extremely high spatial and temporal resolution resulting in an accurate measurement of environmental noise. |
David R. Choffnes, Fabián E. Bustamante, Zihui Ge Crowdsourcing Service-Level Network Event Detection Journal Article In Proc. of ACM SIGCOMM, 2010. @article{CSLNED, title = {Crowdsourcing Service-Level Network Event Detection}, author = {David R. Choffnes and Fabián E. Bustamante and Zihui Ge}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10SIGCOMM.pdf}, year = {2010}, date = {2010-08-03}, journal = {In Proc. of ACM SIGCOMM}, abstract = {The user experience for networked applications is becoming a key benchmark for customers and network providers. Perceived user experience is largely determined by the frequency, duration and severity of network events that impact a service. While today’s networks implement sophisticated infrastructure that issues alarms for most failures, there remains a class of silent outages (e.g., caused by configuration errors) that are not detected. Further, existing alarms provide little information to help operators understand the impact of network events on services. Attempts to address this through infrastructure that monitors end-to-end performance for customers have been hampered by the cost of deployment and by the volume of data generated by these solutions. We present an alternative approach that pushes monitoring to applications on end systems and uses their collective view to detect network events and their impact on services - an approach we call Crowdsourcing Event Monitoring (CEM). This paper presents a general framework for CEM systems and demonstrates its effectiveness for a P2P application using a large dataset gathered from BitTorrent users and confirmed network events from two ISPs. We discuss how we designed and deployed a prototype CEM implementation as an extension to BitTorrent. This system performs online service-level network event detection through passive monitoring and correlation of performance in end-users’ applications.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The user experience for networked applications is becoming a key benchmark for customers and network providers. Perceived user experience is largely determined by the frequency, duration and severity of network events that impact a service. While today’s networks implement sophisticated infrastructure that issues alarms for most failures, there remains a class of silent outages (e.g., caused by configuration errors) that are not detected. Further, existing alarms provide little information to help operators understand the impact of network events on services. Attempts to address this through infrastructure that monitors end-to-end performance for customers have been hampered by the cost of deployment and by the volume of data generated by these solutions. We present an alternative approach that pushes monitoring to applications on end systems and uses their collective view to detect network events and their impact on services - an approach we call Crowdsourcing Event Monitoring (CEM). This paper presents a general framework for CEM systems and demonstrates its effectiveness for a P2P application using a large dataset gathered from BitTorrent users and confirmed network events from two ISPs. We discuss how we designed and deployed a prototype CEM implementation as an extension to BitTorrent. This system performs online service-level network event detection through passive monitoring and correlation of performance in end-users’ applications. |
David R. Choffnes, Jordi Duch, Dean Malmgren, Roger Guimera, Fabián E. Bustamante, Luis Amaral Strange Bedfellows: Communities in BitTorrent Journal Article In Proc. of the 9th International Workshop on Peer-to-Peer Systems (IPTPS), 2010. @article{SB, title = {Strange Bedfellows: Communities in BitTorrent}, author = {David R. Choffnes and Jordi Duch and Dean Malmgren and Roger Guimera and Fabián E. Bustamante and Luis Amaral}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10IPTPS.pdf}, year = {2010}, date = {2010-04-06}, journal = {In Proc. of the 9th International Workshop on Peer-to-Peer Systems (IPTPS)}, abstract = {While P2P systems benefit from large numbers of interconnected nodes, each of these connections provides an opportunity for eavesdropping. Using only the connection patterns gathered from 10,000 BitTorrent (BT) users during a one-month period, we determine whether randomized connection patterns give rise to communities of users. Even though connections in BT require not only shared interest in content, but also concurrent sessions, we find that strong communities naturally form -- users inside a typical community are 5 to 25 times more likely to connect to each other than with users outside. These strong communities enable guilt by association, where the behavior of an entire community of users can be inferred by monitoring one of its members.Our study shows that through a single observation point, an attacker trying to identify such communities can uncover 50% of the network within a distance of two hops. Finally, we propose and evaluate a practical solution that mitigates this threat.}, keywords = {}, pubstate = {published}, tppubtype = {article} } While P2P systems benefit from large numbers of interconnected nodes, each of these connections provides an opportunity for eavesdropping. Using only the connection patterns gathered from 10,000 BitTorrent (BT) users during a one-month period, we determine whether randomized connection patterns give rise to communities of users. Even though connections in BT require not only shared interest in content, but also concurrent sessions, we find that strong communities naturally form -- users inside a typical community are 5 to 25 times more likely to connect to each other than with users outside. These strong communities enable guilt by association, where the behavior of an entire community of users can be inferred by monitoring one of its members.Our study shows that through a single observation point, an attacker trying to identify such communities can uncover 50% of the network within a distance of two hops. Finally, we propose and evaluate a practical solution that mitigates this threat. |
David R. Choffnes, Fabián E. Bustamante Pitfalls for Testbed Evaluations of Internet Systems Journal Article In ACM SIGCOMM CCR, 2010. @article{PTEIS, title = { Pitfalls for Testbed Evaluations of Internet Systems}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10CCR.pdf}, year = {2010}, date = {2010-04-03}, journal = {In ACM SIGCOMM CCR}, abstract = {Today's open platforms for network measurement and distributed system research, which we collectively refer to as testbeds in this article, provide opportunities for controllable experimentation and evaluations of systems at the scale of hundreds or thousands of hosts. In this article, we identify several issues with extending results from such platforms to Internet wide perspectives. Specifically, we try to quantify the level of inaccuracy and incompleteness of testbed results when applied to the context of a large-scale peer-to-peer (P2P) system. Based on our results, we emphasize the importance of measurements in the appropriate environment when evaluating Internet-scale systems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Today's open platforms for network measurement and distributed system research, which we collectively refer to as testbeds in this article, provide opportunities for controllable experimentation and evaluations of systems at the scale of hundreds or thousands of hosts. In this article, we identify several issues with extending results from such platforms to Internet wide perspectives. Specifically, we try to quantify the level of inaccuracy and incompleteness of testbed results when applied to the context of a large-scale peer-to-peer (P2P) system. Based on our results, we emphasize the importance of measurements in the appropriate environment when evaluating Internet-scale systems. |
David R. Choffnes, Mario A. Sánchez, Fabián E. Bustamante Network positioning from the edge: An empirical study of the effectiveness of network positioning in P2P systems Journal Article In Proc. of IEEE INFOCOM, 2010. @article{NPE, title = {Network positioning from the edge: An empirical study of the effectiveness of network positioning in P2P systems}, author = {David R. Choffnes and Mario A. Sánchez and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10Infocom.pdf http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10InfocomFinal.pdf}, year = {2010}, date = {2010-03-03}, journal = {In Proc. of IEEE INFOCOM}, abstract = {Network positioning systems provide an important service to large-scale P2P systems, potentially enabling clients to achieve higher performance, reduce cross-ISP traffic and improve the robustness of the system to failures. Because traces representative of this environment are generally unavailable, and there is no platform suited for experimentation at the appropriate scale, network positioning systems have been commonly implemented and evaluated in simulation and on research testbeds. The performance of network positioning remains an open question for large deployments at the edges of the network. This paper evaluates how four key classes of network po- sitioning systems fare when deployed at scale and measured in P2P systems where they are used. Using 2 billion network measurements gathered from more than 43,000 IP addresses probing over 8 million other IPs worldwide, we show that network positioning exhibits noticeably worse performance than previously reported in studies conducted on research testbeds. To explain this result, we identify several key properties of this environment that call into question fundamental assumptions driving network positioning research.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Network positioning systems provide an important service to large-scale P2P systems, potentially enabling clients to achieve higher performance, reduce cross-ISP traffic and improve the robustness of the system to failures. Because traces representative of this environment are generally unavailable, and there is no platform suited for experimentation at the appropriate scale, network positioning systems have been commonly implemented and evaluated in simulation and on research testbeds. The performance of network positioning remains an open question for large deployments at the edges of the network. This paper evaluates how four key classes of network po- sitioning systems fare when deployed at scale and measured in P2P systems where they are used. Using 2 billion network measurements gathered from more than 43,000 IP addresses probing over 8 million other IPs worldwide, we show that network positioning exhibits noticeably worse performance than previously reported in studies conducted on research testbeds. To explain this result, we identify several key properties of this environment that call into question fundamental assumptions driving network positioning research. |
David R. Choffnes, Fabián E. Bustamante Taming the Torrent Journal Article In USENIX, 2010. BibTeX | Links: @article{TTc, title = {Taming the Torrent}, author = {David R. Choffnes and Fabián E. Bustamante}, url = {http://typica.cs.northwestern.edu/wp-content/uploads/2019/02/DChoffnes10login.pdf}, year = {2010}, date = {2010-02-06}, journal = {In USENIX}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Guohan Lu, Yan Chen, Stefan Birrer, Fabian E. Bustamante, Xing Li POPI: A User-level Tool for Inferring Router Packet Forwarding Priority Journal Article In IEEE/ACM Transactions on Networking (ToN), 2010. @article{POPI, title = {POPI: A User-level Tool for Inferring Router Packet Forwarding Priority}, author = {Guohan Lu and Yan Chen and Stefan Birrer and Fabian E. Bustamante and Xing Li}, url = {https://ieeexplore.ieee.org/document/5233840}, year = {2010}, date = {2010-02-03}, journal = {In IEEE/ACM Transactions on Networking (ToN)}, abstract = {Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network operators. PFP can have a significant impact on the accuracy of network measurements, the performance of applications and the effectiveness of network troubleshooting procedures. Despite its potential impacts, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for PFP inference and its associated tool, POPI. This is the first attempt to infer router packet forwarding priority through end-to-end measurement. POPI enables users to discover such network policies through measurements of packet losses of different packet types. We evaluated our approach via statistical analysis, simulation and wide-area experimentation in PlanetLab. We employed POPI to analyze 156 paths among 162 PlanetLab sites. POPI flagged 15 paths with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them all confirming our inferences. Besides, we compared POPI with the inference mechanisms through other metrics such as packet reordering [called out-of-order (OOO)]. OOO is unable to find many priority paths such as those implemented via traffic policing. On the other hand, interestingly, we found it can detect existence of the mechanisms which induce delay differences among packet types such as slow processing path in the router and port-based load sharing. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Packet forwarding prioritization (PFP) in routers is one of the mechanisms commonly available to network operators. PFP can have a significant impact on the accuracy of network measurements, the performance of applications and the effectiveness of network troubleshooting procedures. Despite its potential impacts, no information on PFP settings is readily available to end users. In this paper, we present an end-to-end approach for PFP inference and its associated tool, POPI. This is the first attempt to infer router packet forwarding priority through end-to-end measurement. POPI enables users to discover such network policies through measurements of packet losses of different packet types. We evaluated our approach via statistical analysis, simulation and wide-area experimentation in PlanetLab. We employed POPI to analyze 156 paths among 162 PlanetLab sites. POPI flagged 15 paths with multiple priorities, 13 of which were further validated through hop-by-hop loss rates measurements. In addition, we surveyed all related network operators and received responses for about half of them all confirming our inferences. Besides, we compared POPI with the inference mechanisms through other metrics such as packet reordering [called out-of-order (OOO)]. OOO is unable to find many priority paths such as those implemented via traffic policing. On the other hand, interestingly, we found it can detect existence of the mechanisms which induce delay differences among packet types such as slow processing path in the router and port-based load sharing. |
2009 |
Kai Chen, David R. Choffnes, Rahul Potharaju, Yan Chen, Fabián E. Bustamante Where the Sidewalk Ends: Extending the Internet AS Graph Using Traceroutes From P2P Users Journal Article In Proc. of CoNEXT, 2009. @article{WSEEI, title = {Where the Sidewalk Ends: Extending the Internet AS Graph Using Traceroutes From P2P Users}, author = {Kai Chen and David R. Choffnes and Rahul Potharaju and Yan Chen and Fabián E. Bustamante}, url = {http://aqualab.cs.northwestern.edu/wp-content/uploads/2019/02/KChen09Conext.pdf}, year = {2009}, date = {2009-12-06}, journal = {In Proc. of CoNEXT}, abstract = {An accurate Internet topology graph is important in many areas of networking, from deciding ISP business relationships to diagnosing network anomalies. Most Internet mapping efforts have derived the network structure, at the level of interconnected autonomous systems (ASes), from a limited number of either BGP- or traceroute-based data sources. While techniques for charting the topology continue to improve, the growth of the number of vantage points is significantly outpaced by the rapid growth of the Internet. In this paper, we argue that a promising approach to revealing the hidden areas of the Internet topology is through active measurement from an observation platform that scales with the growing Internet. By leveraging measurements performed by an extension to a popular P2P system, we show that this approach indeed exposes significant new topological information. Based on traceroute measurements from more than 992,000 IPs in over 3,700 ASes distributed across the Internet hierarchy, our proposed heuristics identify 23,914 new AS links not visible in the publicly-available BGP data -- 12.86% more customer-provider links and 40.99% more peering links, than previously reported. We validate our heuristics using data from a tier-1 ISP and show that they correctly filter out all false links introduced by public IP-to-AS mapping. We have made the identified set of links and their inferred relationships publically available.}, keywords = {}, pubstate = {published}, tppubtype = {article} } An accurate Internet topology graph is important in many areas of networking, from deciding ISP business relationships to diagnosing network anomalies. Most Internet mapping efforts have derived the network structure, at the level of interconnected autonomous systems (ASes), from a limited number of either BGP- or traceroute-based data sources. While techniques for charting the topology continue to improve, the growth of the number of vantage points is significantly outpaced by the rapid growth of the Internet. In this paper, we argue that a promising approach to revealing the hidden areas of the Internet topology is through active measurement from an observation platform that scales with the growing Internet. By leveraging measurements performed by an extension to a popular P2P system, we show that this approach indeed exposes significant new topological information. Based on traceroute measurements from more than 992,000 IPs in over 3,700 ASes distributed across the Internet hierarchy, our proposed heuristics identify 23,914 new AS links not visible in the publicly-available BGP data -- 12.86% more customer-provider links and 40.99% more peering links, than previously reported. We validate our heuristics using data from a tier-1 ISP and show that they correctly filter out all false links introduced by public IP-to-AS mapping. We have made the identified set of links and their inferred relationships publically available. |
Publications
2019 |
Scaling up your web experience, everywhere Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2019. |
The Value of First Impressions: The Impact of Ad-Blocking on Web QoE Conference Passive and Active Measurement (PAM), 2019. |
AMP up your Mobile Web Experience: Characterizing the Impact of Google’s Accelerated Mobile Project Conference Annual International Conference on Mobile Computing and Networking (MobiCom), 2019. |
2018 |
Untangling the world-wide mesh of undersea cables Workshop ACM Workshop on Hot Topics in Networks (HotNets), 2018. |
The Growing Importance of Being Always On -- A first look at the reliability of broadband Internet access Conference Research Conference on Communication, Information and Internet Policy (TPRC), 2018. |
Anycast on the Move: A Look at Mobile Anycast Performance, Conference Network Traffic Measurement and Analysis Conference (TMA), 2018. |
Mile High WiFI: A First Look At In-Flight Internet Connectivity Conference The Web Conference (WWW), 2018. |
2017 |
Cell Spotting -- Studying the Role of Cellular Networks in the Internet Conference Internet Measurement Conference (IMC), 2017. |
Characterizing and Improving the Reliability of Broadband Internet Access Online arXiv.org 2017. |
The utility argument — Making a case for broadband SLAs Conference Passive and Active Measurement (PAM), 2017. |
Workshop on Tracking Quality of Experience in the Internet: Summary and Outcomes Journal Article SIGCOMM Computer Communication Review (CCR), 47 (1), 2017. |
2016 |
eXploring Xfinity: A First Look at Provider-Enabled Community Networks Conference Passive and Active Measurement (PAM), 2016. |
When IPs Fly: A Case for Redefining Airline Communication Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2016. |
2015 |
A measurement experimentation platform at the Internet’s edge Journal Article IEEE/ACM Transactions on Networking (TON), 23 (6), 2015. |
In and Out of Cuba: Characterizing Cuba's Connectivity Conference Internet Measurement Conference (IMC), 2015. |
Experiment coordination for large-scale measurement platforms Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. |
Second Chance - Understanding diversity in broadband access network performance Workshop ACM SIGCOMM Workshop on Crowdsourcing and crowdsharing of Big (Internet) Data (C2B(I)D), 2015. |
Crowdsensing Under (Soft) Control Conference IEEE INFOCOM, 2015. |
Mobile AD(D): Estimating Mobile App Session Times for Better Ads Workshop International Workshop on Mobile Computing Systems and Applications (HotMobile), 2015. |
2014 |
User behavior and change; File sharers and copyright laws Conference International Conference on emerging Networking EXperiments and Technologies (CoNEXT), 2014. |
Internet Inter-Domain Traffic Estimation for the Outsider Journal Article In Proc. of IMC, 2014. |
Need, Want, or Can Afford - Broadband Markets and the Behavior of Users Journal Article In Proc. of IMC, 2014. |
Behind the Curtain - Cellular DNS and Content Replica Selection Journal Article In Proc. IMC, 2014. |
Impact of heterogeneity and socieconomic factors on individual behavior in decentralized sharing ecosystems Journal Article Proc. of the National Academy of Science (early edition), 2014. |
A Time for Reliability – The Growing Importance of Being Always On Journal Article Poster in Proc. of ACM SIGCOMM, 2014. |
A cliq of content curators Journal Article Poster in Proc. of ACM SIGCOMM, 2014. |
Behind the Curtain: The importance of replica selection in next generation cellular networks Journal Article Poster in ACM Sigcomm, 2014. |
No "One-size fits all": Towards a principled approach for incentives in mobile crowdsourcing Journal Article In Proc. of the Fifteenth Workshop on Mobile Computing Systems and Applications (HotMobile), 2014. |
2013 |
Dasu: A measurement experimentation platform at the Internet’s edge Technical Report Department of Computer Science, Northwestern University ( NWU-EECS-13-09), 2013. |
The hidden locality in swarms Journal Article In Proc. of IEEE P2P, 2013. |
Characterizing Broadband Services with Dasu Journal Article Demonstration at USENIX NSDI, 2013. |
Experiments at the Internet's Edge with Dasu Journal Article Demonstration at USENIX NSDI, 2013. |
Dasu: Pushing Experiments to the Internet's Edge Journal Article In Proc. of the USENIX Symposium on Networked Systems Design and Implementation (NSDI), 2013. |
Trying Broadband Characterization at Home Journal Article In Proc. of the Passive and Active Measurement Conference (PAM), 2013. |
2012 |
Content delivery and the natural evolution of DNS - Remote DNS Trends, Performance Issues and Alternative Solutions Journal Article In Proc. of IMC, 2012. |
Up, Down and Around the Stack: ISP Characterization from Network Intensive Applications Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2012. |
namehelp: intelligent client-side DNS resolution Journal Article In ACM SIGCOMM CCR Special Issue, 42 (4), 2012. |
Crowd (Soft) Control: Moving Beyond the Opportunistic Journal Article In Proc. of the Thirteenth Workshop on Mobile Computing Systems and Applications (HotMobile), 2012. |
2011 |
Distributed Systems and Natural Disasters -- BitTorrent as a Global Witness Journal Article In Proc. of CoNEXT Special Workshop on the Internet and Disasters (SWID), 2011. |
Crowdsourcing ISP Characterization to the Network Edge Journal Article In Proc. of ACM SIGCOMM Workshop on Measurements Up the STack (W-MUST), 2011. |
On Blind Mice and the Elephant -- Understanding the Network Impact of a Large Distributed System Journal Article In Proc. of ACM SIGCOMM, 2011. |
Dasu - ISP Characterization from the Edge: A BitTorrent Implementation Journal Article Demo in Proc. of ACM SIGCOMM, 2011. |
2010 |
Environmental Noise Mapping Using Measurements in Transit Journal Article In Proc. of the International Conference on Noise and Vibration Engineering (ISMA), 2010. |
Crowdsourcing Service-Level Network Event Detection Journal Article In Proc. of ACM SIGCOMM, 2010. |
Strange Bedfellows: Communities in BitTorrent Journal Article In Proc. of the 9th International Workshop on Peer-to-Peer Systems (IPTPS), 2010. |
Pitfalls for Testbed Evaluations of Internet Systems Journal Article In ACM SIGCOMM CCR, 2010. |
Network positioning from the edge: An empirical study of the effectiveness of network positioning in P2P systems Journal Article In Proc. of IEEE INFOCOM, 2010. |
Taming the Torrent Journal Article In USENIX, 2010. |
POPI: A User-level Tool for Inferring Router Packet Forwarding Priority Journal Article In IEEE/ACM Transactions on Networking (ToN), 2010. |
2009 |
Where the Sidewalk Ends: Extending the Internet AS Graph Using Traceroutes From P2P Users Journal Article In Proc. of CoNEXT, 2009. |