@phdthesis{Gerold, author = {Gerold, Fabian}, title = {Konzepte zur interaktiven Entwurfsraum-Exploration im Tragwerksentwurf}, doi = {10.25643/bauhaus-universitaet.2153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20140408-21532}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {101}, abstract = {Der Entwurfsraum f{\"u}r den Entwurf eines Tragwerks ist ein n-dimensionaler Raum, der aus allen freien Parametern des Modells aufgespannt wird. Traditionell werden nur wenige Punkte dieses Raumes durch eine numerische (computergest{\"u}tzte) Simulation evaluiert, meist auf Basis der Finite-Elemente-Methode. Mehrere Faktoren f{\"u}hren dazu, dass heute oft viele Revisionen eines Simulationsmodells durchlaufen werden: Zum einen ergeben sich oft Planungs{\"a}nderungen, zum anderen ist oft die Untersuchung von Planungsalternativen und die Suche nach einem Optimum w{\"u}nschenswert. In dieser Arbeit soll f{\"u}r ein vorhandenes Finite-Elemente-Framework die sequentielle Datei-Eingabeschnittstelle durch eine Netzwerkschnittstelle ersetzt werden, die den Erfordernissen einer interaktiven Arbeitsweise entspricht. So erlaubt die hier konzipierte Schnittstelle interaktive, inkrementelle Modell{\"a}nderungen sowie Status- und Berechnungsergebnis-Abfragen durch eine bidirektionale Schnittstelle. Die Kombination aus interaktiver numerischer Simulation und Interoperabilit{\"a}t durch die Anwendung von Konzepten zur Bauwerks-Informations-Modellierung im Tragwerksentwurf ist Ziel dieser Dissertation. Die Beschreibung der Konzeption und prototypischen Umsetzung ist Gegenstand der schriftlichen Arbeit.}, subject = {Interaktive numerische Simulation}, language = {de} } @inproceedings{KoenigTreyerSchmitt, author = {K{\"o}nig, Reinhard and Treyer, Lukas and Schmitt, Gerhard}, title = {Graphical smalltalk with my optimization system for urban planning tasks}, series = {31st eCAADe Conference - Volume 2}, booktitle = {31st eCAADe Conference - Volume 2}, publisher = {TU Delft}, address = {Delft, Netherlands}, doi = {10.25643/bauhaus-universitaet.2517}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160121-25171}, pages = {195 -- 203}, abstract = {Based on the description of a conceptual framework for the representation of planning problems on various scales, we introduce an evolutionary design optimization system. This system is exemplified by means of the generation of street networks with locally defined properties for centrality. We show three different scenarios for planning requirements and evaluate the resulting structures with respect to the requirements of our framework. Finally the potentials and challenges of the presented approach are discussed in detail.}, subject = {St{\"a}dtebau}, language = {en} } @techreport{KoenigTapiasSchmitt, author = {K{\"o}nig, Reinhard and Tapias, Estefania and Schmitt, Gerhard}, title = {New Methods in Urban Analysis and Simulation: Documentation of teaching results from the autumn semester 2013}, doi = {10.25643/bauhaus-universitaet.2516}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160121-25168}, pages = {60}, abstract = {Documentation of teaching results from the autumn semester 2013 at ETH Zurich}, subject = {St{\"a}dtebau}, language = {en} } @phdthesis{Moehring, author = {Moehring, Mathias}, title = {Realistic Interaction with Virtual Objects within Arm's Reach}, doi = {10.25643/bauhaus-universitaet.1859}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130301-18592}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {124}, abstract = {The automotive industry requires realistic virtual reality applications more than other domains to increase the efficiency of product development. Currently, the visual quality of virtual invironments resembles reality, but interaction within these environments is usually far from what is known in everyday life. Several realistic research approaches exist, however they are still not all-encompassing enough to be usable in industrial processes. This thesis realizes lifelike direct multi-hand and multi-finger interaction with arbitrary objects, and proposes algorithmic and technical improvements that also approach lifelike usability. In addition, the thesis proposes methods to measure the effectiveness and usability of such interaction techniques as well as discusses different types of grasping feedback that support the user during interaction. Realistic and reliable interaction is reached through the combination of robust grasping heuristics and plausible pseudophysical object reactions. The easy-to-compute grasping rules use the objects' surface normals, and mimic human grasping behavior. The novel concept of Normal Proxies increases grasping stability and diminishes challenges induced by adverse normals. The intricate act of picking-up thin and tiny objects remains challenging for some users. These cases are further supported by the consideration of finger pinches, which are measured with a specialized finger tracking device. With regard to typical object constraints, realistic object motion is geometrically calculated as a plausible reaction on user input. The resulting direct finger-based interaction technique enables realistic and intuitive manipulation of arbitrary objects. The thesis proposes two methods that prove and compare effectiveness and usability. An expert review indicates that experienced users quickly familiarize themselves with the technique. A quantitative and qualitative user study shows that direct finger-based interaction is preferred over indirect interaction in the context of functional car assessments. While controller-based interaction is more robust, the direct finger-based interaction provides greater realism, and becomes nearly as reliable when the pinch-sensitive mechanism is used. At present, the haptic channel is not used in industrial virtual reality applications. That is why it can be used for grasping feedback which improves the users' understanding of the grasping situation. This thesis realizes a novel pressure-based tactile feedback at the fingertips. As an alternative, vibro-tactile feedback at the same location is realized as well as visual feedback by the coloring of grasp-involved finger segments. The feedback approaches are also compared within the user study, which reveals that grasping feedback is a requirement to judge grasp status and that tactile feedback improves interaction independent of the used display system. The considerably stronger vibrational tactile feedback can quickly become annoying during interaction. The interaction improvements and hardware enhancements make it possible to interact with virtual objects in a realistic and reliable manner. By addressing realism and reliability, this thesis paves the way for the virtual evaluation of human-object interaction, which is necessary for a broader application of virtual environments in the automotive industry and other domains.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @phdthesis{Lux, author = {Lux, Christopher}, title = {A Data-Virtualization System for Large Model Visualization}, doi = {10.25643/bauhaus-universitaet.1985}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130725-19855}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {211}, abstract = {Interactive scientific visualizations are widely used for the visual exploration and examination of physical data resulting from measurements or simulations. Driven by technical advancements of data acquisition and simulation technologies, especially in the geo-scientific domain, large amounts of highly detailed subsurface data are generated. The oil and gas industry is particularly pushing such developments as hydrocarbon reservoirs are increasingly difficult to discover and exploit. Suitable visualization techniques are vital for the discovery of the reservoirs as well as their development and production. However, the ever-growing scale and complexity of geo-scientific data sets result in an expanding disparity between the size of the data and the capabilities of current computer systems with regard to limited memory and computing resources. In this thesis we present a unified out-of-core data-virtualization system supporting geo-scientific data sets consisting of multiple large seismic volumes and height-field surfaces, wherein each data set may exceed the size of the graphics memory or possibly even the main memory. Current data sets fall within the range of hundreds of gigabytes up to terabytes in size. Through the mutual utilization of memory and bandwidth resources by multiple data sets, our data-management system is able to share and balance limited system resources among different data sets. We employ multi-resolution methods based on hierarchical octree and quadtree data structures to generate level-of-detail working sets of the data stored in main memory and graphics memory for rendering. The working set generation in our system is based on a common feedback mechanism with inherent support for translucent geometric and volumetric data sets. This feedback mechanism collects information about required levels of detail during the rendering process and is capable of directly resolving data visibility without the application of any costly occlusion culling approaches. A central goal of the proposed out-of-core data management system is an effective virtualization of large data sets. Through an abstraction of the level-of-detail working sets, our system allows developers to work with extremely large data sets independent of their complex internal data representations and physical memory layouts. Based on this out-of-core data virtualization infrastructure, we present distinct rendering approaches for specific visualization problems of large geo-scientific data sets. We demonstrate the application of our data virtualization system and show how multi-resolution data can be treated exactly the same way as regular data sets during the rendering process. An efficient volume ray casting system is presented for the rendering of multiple arbitrarily overlapping multi-resolution volume data sets. Binary space-partitioning volume decomposition of the bounding boxes of the cube-shaped volumes is used to identify the overlapping and non-overlapping volume regions in order to optimize the rendering process. We further propose a ray casting-based rendering system for the visualization of geological subsurface models consisting of multiple very detailed height fields. The rendering of an entire stack of height-field surfaces is accomplished in a single rendering pass using a two-level acceleration structure, which combines a minimum-maximum quadtree for empty-space skipping and sorted lists of depth intervals to restrict ray intersection searches to relevant height fields and depth ranges. Ultimately, we present a unified rendering system for the visualization of entire geological models consisting of highly detailed stacked horizon surfaces and massive volume data. We demonstrate a single-pass ray casting approach facilitating correct visual interaction between distinct translucent model components, while increasing the rendering efficiency by reducing processing overhead of potentially invisible parts of the model. The combination of image-order rendering approaches and the level-of-detail feedback mechanism used by our out-of-core data-management system inherently accounts for occlusions of different data types without the application of costly culling techniques. The unified out-of-core data-management and virtualization infrastructure considerably facilitates the implementation of complex visualization systems. We demonstrate its applicability for the visualization of large geo-scientific data sets using output-sensitive rendering techniques. As a result, the magnitude and multitude of data sets that can be interactively visualized is significantly increased compared to existing approaches.}, subject = {Computer Graphics}, language = {en} } @phdthesis{Fleischmann, author = {Fleischmann, Ewan}, title = {Analysis and Design of Blockcipher Based Cryptographic Algorithms}, doi = {10.25643/bauhaus-universitaet.1983}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130722-19835}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {272}, abstract = {This thesis focuses on the analysis and design of hash functions and authenticated encryption schemes that are blockcipher based. We give an introduction into these fields of research - taking in a blockcipher based point of view - with special emphasis on the topics of double length, double call blockcipher based compression functions. The first main topic (thesis parts I - III) is on analysis and design of hash functions. We start with a collision security analysis of some well known double length blockcipher based compression functions and hash functions: Abreast-DM, Tandem-DM and MDC-4. We also propose new double length compression functions that have elevated collision security guarantees. We complement the collision analysis with a preimage analysis by stating (near) optimal security results for Abreast-DM, Tandem-DM, and Hirose-DM. Also, some generalizations are discussed. These are the first preimage security results for blockcipher based double length hash functions that go beyond the birthday barrier. We then raise the abstraction level and analyze the notion of 'hash function indifferentiability from a random oracle'. So we not anymore focus on how to obtain a good compression function but, instead, on how to obtain a good hash function using (other) cryptographic primitives. In particular we give some examples when this strong notion of hash function security might give questionable advice for building a practical hash function. In the second main topic (thesis part IV), which is on authenticated encryption schemes, we present an on-line authenticated encryption scheme, McOEx, that simultaneously achieves privacy and confidentiality and is secure against nonce-misuse. It is the first dedicated scheme that achieves high standards of security and - at the same time - is on-line computable.}, subject = {Kryptologie}, language = {en} } @phdthesis{Anderka, author = {Anderka, Maik}, title = {Analyzing and Predicting Quality Flaws in User-generated Content: The Case of Wikipedia}, doi = {10.25643/bauhaus-universitaet.1977}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130709-19778}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Web applications that are based on user-generated content are often criticized for containing low-quality information; a popular example is the online encyclopedia Wikipedia. The major points of criticism pertain to the accuracy, neutrality, and reliability of information. The identification of low-quality information is an important task since for a huge number of people around the world it has become a habit to first visit Wikipedia in case of an information need. Existing research on quality assessment in Wikipedia either investigates only small samples of articles, or else deals with the classification of content into high-quality or low-quality. This thesis goes further, it targets the investigation of quality flaws, thus providing specific indications of the respects in which low-quality content needs improvement. The original contributions of this thesis, which relate to the fields of user-generated content analysis, data mining, and machine learning, can be summarized as follows: (1) We propose the investigation of quality flaws in Wikipedia based on user-defined cleanup tags. Cleanup tags are commonly used in the Wikipedia community to tag content that has some shortcomings. Our approach is based on the hypothesis that each cleanup tag defines a particular quality flaw. (2) We provide the first comprehensive breakdown of Wikipedia's quality flaw structure. We present a flaw organization schema, and we conduct an extensive exploratory data analysis which reveals (a) the flaws that actually exist, (b) the distribution of flaws in Wikipedia, and, (c) the extent of flawed content. (3) We present the first breakdown of Wikipedia's quality flaw evolution. We consider the entire history of the English Wikipedia from 2001 to 2012, which comprises more than 508 million page revisions, summing up to 7.9 TB. Our analysis reveals (a) how the incidence and the extent of flaws have evolved, and, (b) how the handling and the perception of flaws have changed over time. (4) We are the first who operationalize an algorithmic prediction of quality flaws in Wikipedia. We cast quality flaw prediction as a one-class classification problem, develop a tailored quality flaw model, and employ a dedicated one-class machine learning approach. A comprehensive evaluation based on human-labeled Wikipedia articles underlines the practical applicability of our approach.}, subject = {Data Mining}, language = {en} }