@article{WiegmannKerstenSenaratneetal., author = {Wiegmann, Matti and Kersten, Jens and Senaratne, Hansi and Potthast, Martin and Klan, Friederike and Stein, Benno}, title = {Opportunities and risks of disaster data from social media: a systematic review of incident information}, series = {Natural Hazards and Earth System Sciences}, volume = {2021}, journal = {Natural Hazards and Earth System Sciences}, number = {Volume 21, Issue 5}, publisher = {European Geophysical Society}, address = {Katlenburg-Lindau}, doi = {10.5194/nhess-21-1431-2021}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44634}, pages = {1431 -- 1444}, abstract = {Compiling and disseminating information about incidents and disasters are key to disaster management and relief. But due to inherent limitations of the acquisition process, the required information is often incomplete or missing altogether. To fill these gaps, citizen observations spread through social media are widely considered to be a promising source of relevant information, and many studies propose new methods to tap this resource. Yet, the overarching question of whether and under which circumstances social media can supply relevant information (both qualitatively and quantitatively) still remains unanswered. To shed some light on this question, we review 37 disaster and incident databases covering 27 incident types, compile a unified overview of the contained data and their collection processes, and identify the missing or incomplete information. The resulting data collection reveals six major use cases for social media analysis in incident data collection: (1) impact assessment and verification of model predictions, (2) narrative generation, (3) recruiting citizen volunteers, (4) supporting weakly institutionalized areas, (5) narrowing surveillance areas, and (6) reporting triggers for periodical surveillance. Furthermore, we discuss the benefits and shortcomings of using social media data for closing information gaps related to incidents and disasters.}, subject = {Katastrophe}, language = {en} } @article{VakkariVoelskePotthastetal., author = {Vakkari, Pertti and V{\"o}lske, Michael and Potthast, Martin and Hagen, Matthias and Stein, Benno}, title = {Predicting essay quality from search and writing behavior}, series = {Journal of Association for Information Science and Technology}, volume = {2021}, journal = {Journal of Association for Information Science and Technology}, number = {volume 72, issue 7}, publisher = {Wiley}, address = {Hoboken, NJ}, doi = {10.1002/asi.24451}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44692}, pages = {839 -- 852}, abstract = {Few studies have investigated how search behavior affects complex writing tasks. We analyze a dataset of 150 long essays whose authors searched the ClueWeb09 corpus for source material, while all querying, clicking, and writing activity was meticulously recorded. We model the effect of search and writing behavior on essay quality using path analysis. Since the boil-down and build-up writing strategies identified in previous research have been found to affect search behavior, we model each writing strategy separately. Our analysis shows that the search process contributes significantly to essay quality through both direct and mediated effects, while the author's writing strategy moderates this relationship. Our models explain 25-35\% of the variation in essay quality through rather simple search and writing process characteristics alone, a fact that has implications on how search engines could personalize result pages for writing tasks. Authors' writing strategies and associated searching patterns differ, producing differences in essay quality. In a nutshell: essay quality improves if search and writing strategies harmonizeā€”build-up writers benefit from focused, in-depth querying, while boil-down writers fare better with a broader and shallower querying strategy.}, subject = {Information Retrieval}, language = {en} } @phdthesis{Potthast, author = {Potthast, Martin}, title = {Technologies for Reusing Text from the Web}, doi = {10.25643/bauhaus-universitaet.1566}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120217-15663}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {237}, abstract = {Texts from the web can be reused individually or in large quantities. The former is called text reuse and the latter language reuse. We first present a comprehensive overview of the different ways in which text and language is reused today, and how exactly information retrieval technologies can be applied in this respect. The remainder of the thesis then deals with specific retrieval tasks. In general, our contributions consist of models and algorithms, their evaluation, and for that purpose, large-scale corpus construction. The thesis divides into two parts. The first part introduces technologies for text reuse detection, and our contributions are as follows: (1) A unified view of projecting-based and embedding-based fingerprinting for near-duplicate detection and the first time evaluation of fingerprint algorithms on Wikipedia revision histories as a new, large-scale corpus of near-duplicates. (2) A new retrieval model for the quantification of cross-language text similarity, which gets by without parallel corpora. We have evaluated the model in comparison to other models on many different pairs of languages. (3) An evaluation framework for text reuse and particularly plagiarism detectors, which consists of tailored detection performance measures and a large-scale corpus of automatically generated and manually written plagiarism cases. The latter have been obtained via crowdsourcing. This framework has been successfully applied to evaluate many different state-of-the-art plagiarism detection approaches within three international evaluation competitions. The second part introduces technologies that solve three retrieval tasks based on language reuse, and our contributions are as follows: (4) A new model for the comparison of textual and non-textual web items across media, which exploits web comments as a source of information about the topic of an item. In this connection, we identify web comments as a largely neglected information source and introduce the rationale of comment retrieval. (5) Two new algorithms for query segmentation, which exploit web n-grams and Wikipedia as a means of discerning the user intent of a keyword query. Moreover, we crowdsource a new corpus for the evaluation of query segmentation which surpasses existing corpora by two orders of magnitude. (6) A new writing assistance tool called Netspeak, which is a search engine for commonly used language. Netspeak indexes the web in the form of web n-grams as a source of writing examples and implements a wildcard query processor on top of it.}, subject = {Information Retrieval}, language = {en} } @article{FrommholzHaiderMPotthastetal., author = {Frommholz, Ingo and Haider M., al-Khateeb and Potthast, Martin and Ghasem, Zinnar and Shukla, Mitul and Short, Emma}, title = {On Textual Analysis and Machine Learning for Cyberstalking Detection}, series = {Datenbank Spektrum}, journal = {Datenbank Spektrum}, doi = {10.1007/s13222-016-0221-x}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170418-31352}, pages = {127 -- 135}, abstract = {Cyber security has become a major concern for users and businesses alike. Cyberstalking and harassment have been identified as a growing anti-social problem. Besides detecting cyberstalking and harassment, there is the need to gather digital evidence, often by the victim. To this end, we provide an overview of and discuss relevant technological means, in particular coming from text analytics as well as machine learning, that are capable to address the above challenges. We present a framework for the detection of text-based cyberstalking and the role and challenges of some core techniques such as author identification, text classification and personalisation. We then discuss PAN, a network and evaluation initiative that focusses on digital text forensics, in particular author identification.}, subject = {Text Mining}, language = {en} }