@misc{Theiler, type = {Master Thesis}, author = {Theiler, Michael}, title = {Interaktive Visualisierung von Qualit{\"a}tsdefiziten komplexer Bauwerksinformationsmodelle auf Basis der Industry Foundation Classes (IFC) in einer webbasierten Umgebung}, doi = {10.25643/bauhaus-universitaet.1786}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20121214-17869}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {93}, abstract = {Der inhaltlichen Qualit{\"a}tssicherung von Bauwerksinformationsmodellen (BIM) kommt im Zuge einer stetig wachsenden Nutzung der verwendeten BIM f{\"u}r unterschiedliche Anwen-dungsf{\"a}lle eine große Bedeutung zu. Diese ist f{\"u}r jede am Datenaustausch beteiligte Software dem Projektziel entsprechend durchzuf{\"u}hren. Mit den Industry Foundation Classes (IFC) steht ein etabliertes Format f{\"u}r die Beschreibung und den Austausch eines solchen Modells zur Verf{\"u}gung. F{\"u}r den Prozess der Qualit{\"a}tssicherung wird eine serverbasierte Testumgebung Bestandteil des neuen Zertifizierungsverfahrens der IFC sein. Zu diesem Zweck wurde durch das „iabi - Institut f{\"u}r angewandte Bauinformatik" in Zusammenarbeit mit „buildingSMART e.V." (http://www.buildingsmart.de) ein Global Testing Documentation Server (GTDS) implementiert. Der GTDS ist eine, auf einer Datenbank basierte, Web-Applikation, die folgende Intentionen verfolgt: • Bereitstellung eines Werkzeugs f{\"u}r das qualitative Testen IFC-basierter Modelle • Unterst{\"u}tzung der Kommunikation zwischen IFC Entwicklern und Anwendern • Dokumentation der Qualit{\"a}t von IFC-basierten Softwareanwendungen • Bereitstellung einer Plattform f{\"u}r die Zertifizierung von IFC Anwendungen Gegenstand der Arbeit ist die Planung und exemplarische Umsetzung eines Werkzeugs zur interaktiven Visualisierung von Qualit{\"a}tsdefiziten, die vom GTDS im Modell erkannt wurden. Die exemplarische Umsetzung soll dabei aufbauend auf den OPEN IFC TOOLS (http://www.openifctools.org) erfolgen.}, subject = {BIM}, language = {de} } @article{TonnTatarin, author = {Tonn, Christian and Tatarin, Ren{\´e}}, title = {Volumen Rendering in der Architektur: {\"U}berlagerung und Kombination von 3D Voxel Volumendaten mit 3D Geb{\"a}udemodellen}, doi = {10.25643/bauhaus-universitaet.2671}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160822-26718}, abstract = {Volumerendering ist eine Darstellungstechnik, um verschiedene r{\"a}umliche Mess- und Simulationsdaten anschaulich, interaktiv grafisch darzustellen. Im folgenden Beitrag wird ein Verfahren vorgestellt, mehrere Volumendaten mit einem Architekturfl{\"a}chenmodell zu {\"u}berlagern. Diese komplexe Darstellungsberechnung findet mit hardwarebeschleunigten Shadern auf der Grafikkarte statt. Im Beitrag wird hierzu der implementierte Softwareprototyp "VolumeRendering" vorgestellt. Neben dem interaktiven Berechnungsverfahren wurde ebenso Wert auf eine nutzerfreundliche Bedienung gelegt. Das Ziel bestand darin, eine einfache Bewertung der Volumendaten durch Fachplaner zu erm{\"o}glichen. Durch die {\"U}berlagerung, z. B. verschiedener Messverfahren mit einem Fl{\"a}chenmodell, ergeben sich Synergien und neue Auswertungsm{\"o}glichkeiten. Abschließend wird anhand von Beispielen aus einem interdisziplin{\"a}ren Forschungsprojekt die Anwendung des Softwareprototyps illustriert.}, subject = {Multiple Volume Rendering}, language = {de} } @phdthesis{Potthast, author = {Potthast, Martin}, title = {Technologies for Reusing Text from the Web}, doi = {10.25643/bauhaus-universitaet.1566}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120217-15663}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {237}, abstract = {Texts from the web can be reused individually or in large quantities. The former is called text reuse and the latter language reuse. We first present a comprehensive overview of the different ways in which text and language is reused today, and how exactly information retrieval technologies can be applied in this respect. The remainder of the thesis then deals with specific retrieval tasks. In general, our contributions consist of models and algorithms, their evaluation, and for that purpose, large-scale corpus construction. The thesis divides into two parts. The first part introduces technologies for text reuse detection, and our contributions are as follows: (1) A unified view of projecting-based and embedding-based fingerprinting for near-duplicate detection and the first time evaluation of fingerprint algorithms on Wikipedia revision histories as a new, large-scale corpus of near-duplicates. (2) A new retrieval model for the quantification of cross-language text similarity, which gets by without parallel corpora. We have evaluated the model in comparison to other models on many different pairs of languages. (3) An evaluation framework for text reuse and particularly plagiarism detectors, which consists of tailored detection performance measures and a large-scale corpus of automatically generated and manually written plagiarism cases. The latter have been obtained via crowdsourcing. This framework has been successfully applied to evaluate many different state-of-the-art plagiarism detection approaches within three international evaluation competitions. The second part introduces technologies that solve three retrieval tasks based on language reuse, and our contributions are as follows: (4) A new model for the comparison of textual and non-textual web items across media, which exploits web comments as a source of information about the topic of an item. In this connection, we identify web comments as a largely neglected information source and introduce the rationale of comment retrieval. (5) Two new algorithms for query segmentation, which exploit web n-grams and Wikipedia as a means of discerning the user intent of a keyword query. Moreover, we crowdsource a new corpus for the evaluation of query segmentation which surpasses existing corpora by two orders of magnitude. (6) A new writing assistance tool called Netspeak, which is a search engine for commonly used language. Netspeak indexes the web in the form of web n-grams as a source of writing examples and implements a wildcard query processor on top of it.}, subject = {Information Retrieval}, language = {en} } @masterthesis{Mueller, type = {Bachelor Thesis}, author = {M{\"u}ller, Naira}, title = {Erweiterung von Fliplife mit bauphysikalischen Inhalten}, doi = {10.25643/bauhaus-universitaet.1676}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120704-16763}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {105}, abstract = {In dieser Arbeit wurde ein Konzept erstellt, das Fliplife um einen bauphysikalischen Karriereweg erweitert. In das Spiel wurden beispielhaft bauphysikalische Inhalte sowie spielkonzept-kompatible und wissensvermittelnde Spielmechaniken implementiert.}, subject = {Social Game}, language = {de} } @masterthesis{Held2011, type = {Bachelor Thesis}, author = {Held, Janina}, title = {Entwurf eines Spieler-Modells f{\"u}r eine erweiterbare Spielplattform zur Ausbildung in der Bauphysik}, doi = {10.25643/bauhaus-universitaet.1524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120117-15249}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {65}, year = {2011}, abstract = {Im Projekt Intelligentes Lernen besch{\"a}ftigen sich die Professuren Content Management und Web-Technologien, Systeme der Virtuellen Realit{\"a}t und Bauphysik der Bauhaus- Universit{\"a}t Weimar mit der Entwicklung innovativer Informationstechnologien f{\"u}r eLearning- Umgebungen. In den Teilbereichen Retrieval, Extraktion und Visualisierung großer Dokumentkollektionen, sowie simulations- und planbasierter Wissensvermittlung werden Algorithmen und Werkzeuge erforscht, um eLearning-Systeme leistungsf{\"a}higer zu machen und um somit den Lernerfolg zu optimieren. Ziel des Projekts, auf dem Gebiet des simulationsbasierten Wissenstransfers, ist die Entwicklung eines Multiplayer Online Games (MOG) zur Ausbildungsunterst{\"u}tzung in der Bauphysik. Im Rahmen der vorliegenden Bachelorarbeit wird f{\"u}r diese digitale Lernsoftware ein Spieler- Modell zur Verwaltung der spielerspezifischen Daten entworfen und in das bestehende Framework integriert. Der Schwerpunkt der Arbeit liegt in der Organisation der erlernten F{\"a}higkeiten des Spielers und in der an den Wissensstand angepassten Auswahl geeigneter Spielaufgaben. F{\"u}r die Anwendung im eLearning-Bereich ist die Erweiterbarkeit des Modells um neue Lernkomplexe eine wesentliche Anforderung.}, subject = {Skill}, language = {de} } @phdthesis{Anderka, author = {Anderka, Maik}, title = {Analyzing and Predicting Quality Flaws in User-generated Content: The Case of Wikipedia}, doi = {10.25643/bauhaus-universitaet.1977}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130709-19778}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Web applications that are based on user-generated content are often criticized for containing low-quality information; a popular example is the online encyclopedia Wikipedia. The major points of criticism pertain to the accuracy, neutrality, and reliability of information. The identification of low-quality information is an important task since for a huge number of people around the world it has become a habit to first visit Wikipedia in case of an information need. Existing research on quality assessment in Wikipedia either investigates only small samples of articles, or else deals with the classification of content into high-quality or low-quality. This thesis goes further, it targets the investigation of quality flaws, thus providing specific indications of the respects in which low-quality content needs improvement. The original contributions of this thesis, which relate to the fields of user-generated content analysis, data mining, and machine learning, can be summarized as follows: (1) We propose the investigation of quality flaws in Wikipedia based on user-defined cleanup tags. Cleanup tags are commonly used in the Wikipedia community to tag content that has some shortcomings. Our approach is based on the hypothesis that each cleanup tag defines a particular quality flaw. (2) We provide the first comprehensive breakdown of Wikipedia's quality flaw structure. We present a flaw organization schema, and we conduct an extensive exploratory data analysis which reveals (a) the flaws that actually exist, (b) the distribution of flaws in Wikipedia, and, (c) the extent of flawed content. (3) We present the first breakdown of Wikipedia's quality flaw evolution. We consider the entire history of the English Wikipedia from 2001 to 2012, which comprises more than 508 million page revisions, summing up to 7.9 TB. Our analysis reveals (a) how the incidence and the extent of flaws have evolved, and, (b) how the handling and the perception of flaws have changed over time. (4) We are the first who operationalize an algorithmic prediction of quality flaws in Wikipedia. We cast quality flaw prediction as a one-class classification problem, develop a tailored quality flaw model, and employ a dedicated one-class machine learning approach. A comprehensive evaluation based on human-labeled Wikipedia articles underlines the practical applicability of our approach.}, subject = {Data Mining}, language = {en} } @phdthesis{Lipka, author = {Lipka, Nedim}, title = {Modeling Non-Standard Text Classification Tasks}, doi = {10.25643/bauhaus-universitaet.1862}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130307-18626}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Text classification deals with discovering knowledge in texts and is used for extracting, filtering, or retrieving information in streams and collections. The discovery of knowledge is operationalized by modeling text classification tasks, which is mainly a human-driven engineering process. The outcome of this process, a text classification model, is used to inductively learn a text classification solution from a priori classified examples. The building blocks of modeling text classification tasks cover four aspects: (1) the way examples are represented, (2) the way examples are selected, (3) the way classifiers learn from examples, and (4) the way models are selected. This thesis proposes methods that improve the prediction quality of text classification solutions for unseen examples, especially for non-standard tasks where standard models do not fit. The original contributions are related to the aforementioned building blocks: (1) Several topic-orthogonal text representations are studied in the context of non-standard tasks and a new representation, namely co-stems, is introduced. (2) A new active learning strategy that goes beyond standard sampling is examined. (3) A new one-class ensemble for improving the effectiveness of one-class classification is proposed. (4) A new model selection framework to cope with subclass distribution shifts that occur in dynamic environments is introduced.}, subject = {Text Classification}, language = {en} } @inproceedings{TreyerKleinKoenigetal., author = {Treyer, Lukas and Klein, Bernhard and K{\"o}nig, Reinhard and Meixner, Christine}, title = {Lightweight urban computation interchange (LUCI) system}, series = {FOSS4G 2015 Conference}, booktitle = {FOSS4G 2015 Conference}, publisher = {FOSS4G}, address = {Seoul, South Korea}, doi = {10.25643/bauhaus-universitaet.2504}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160118-25042}, pages = {12}, abstract = {In this paper we introduce LUCI, a Lightweight Urban Calculation Interchange system, designed to bring the advantages of a calculation and content co-ordination system to small planning and design groups by the means of an open source middle-ware. The middle-ware focuses on problems typical to urban planning and therefore features a geo-data repository as well as a job runtime administration, to coordinate simulation models and its multiple views. The described system architecture is accompanied by two exemplary use cases that have been used to test and further develop our concepts and implementations.}, subject = {Architektur}, language = {en} } @inproceedings{ChirkinKoenig, author = {Chirkin, Artem and K{\"o}nig, Reinhard}, title = {Concept of Interactive Machine Learning in Urban Design Problems : proceedings}, publisher = {ACM New York, NY, USA}, address = {San Jose, CA, USA}, doi = {10.25643/bauhaus-universitaet.2600}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26000}, pages = {10 -- 13}, abstract = {This work presents a concept of interactive machine learning in a human design process. An urban design problem is viewed as a multiple-criteria optimization problem. The outlined feature of an urban design problem is the dependence of a design goal on a context of the problem. We model the design goal as a randomized fitness measure that depends on the context. In terms of multiple-criteria decision analysis (MCDA), the defined measure corresponds to a subjective expected utility of a user. In the first stage of the proposed approach we let the algorithm explore a design space using clustering techniques. The second stage is an interactive design loop; the user makes a proposal, then the program optimizes it, gets the user's feedback and returns back the control over the application interface.}, subject = {Stadtgestaltung}, language = {en} } @article{TreyerKleinKoenigetal., author = {Treyer, Lukas and Klein, Bernhard and K{\"o}nig, Reinhard and Meixner, Christine}, title = {Lightweight Urban Computation Interchange (LUCI): A System to Couple Heterogenous Simulations and Views}, series = {Spatial Information Research}, journal = {Spatial Information Research}, doi = {10.25643/bauhaus-universitaet.2603}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26037}, pages = {1 -- 12}, abstract = {In this paper we introduce LUCI, a Lightweight Urban Calculation Interchange system, designed to bring the advantages of calculation and content co-ordination system to small planning and design groups by the means of an open source middle-ware. The middle-ware focuses on problems typical to urban planning and therefore features a geo-data repository as well as a job runtime administration, to coordinate simulation models and its multiple views. The described system architecture is accompanied by two exemplary use cases, that have been used to test and further develop our concepts and implementations.}, language = {en} }