@techreport{Schaber, author = {Schaber, Carsten}, title = {The Flow of People as an Indicator for the Appraisal of HST related Strategies and Interventions into Urban Space}, doi = {10.25643/bauhaus-universitaet.2115}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20140205-21159}, pages = {69}, abstract = {This paper divides into a theoretical and a practical part. The former describes the relevance of the flow of people for urban development and the appraisal of HST related issues. Further Space Syntax and its main ideas and measures are introduced, like the role of axial maps and their preparation for example. Part one also contains background information about the collection of data on site in Leipzig. The second part exemplifies the case of the City-Tunnel Project in Leipzig, Germany and the practical use of the Space Syntax method. The project stands for the implementation of a future regional train network in the wider metropolitan area of Leipzig and Halle}, subject = {Urbanistik, St{\"a}dtebau}, language = {en} } @techreport{GrossFetterSeifert2007, author = {Gross, Tom and Fetter, Mirko and Seifert, Julian}, title = {CoDaMine: Supporting Privacy and Trust Management in Ubiquitous Environments Through Communication Data Mining}, doi = {10.25643/bauhaus-universitaet.815}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8154}, year = {2007}, abstract = {In ubiquitous environments an increasing number of sensors capture information on users and at the same time an increasing number of actuators are available to present information to users. This vast capturing of information potentially enables the system to adapt to the users. At the same time the system might violate the users' privacy by capturing information that the users do not want to share, and the system might disrupt the users by being too obtrusive in its adaptation or information supply. In this paper we present CoDaMine - a novel approach for providing users with system - generated feedback and control in ubiquitous environments giving them the freedom they need while reducing their effort. Basically, CoDaMine captures and analyses the users' online communication to learn about their social relationships in order to provide them with recommendations for inter-personal privacy and trust management.}, subject = {Angewandte Informatik}, language = {en} } @techreport{GrossFetterLiebsch2007, author = {Gross, Tom and Fetter, Mirko and Liebsch, Sascha}, title = {The cueTable Cooperative Multi-Touch Interactive Tabletop: Implementation and User Feedback}, doi = {10.25643/bauhaus-universitaet.633}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-6331}, year = {2007}, abstract = {Es wurde ein multi-touch interaktives Tabletop als Basistechnologie zur Exploration neuer Interaktionskonzepte f{\"u}r kooperative multi-touch Anwendungen entwickelt. In dieser Publikation stellen wir vor, wie ein kooperatives multi-touch interaktives Tabletop basierend auf g{\"u}nstiger Standard-Hardware mit geringem Realisierungsaufwand gebaut werden kann. Wir pr{\"a}sentieren eine Software-Anwendung, die wir daf{\"u}r entwickelt haben. And wir berichten {\"u}ber Benutzerkommentare zum Tabletop und der Anwendung.}, subject = {Angewandte Informatik}, language = {de} } @techreport{KurzHaentschGrosseetal.2007, author = {Kurz, Daniel and H{\"a}ntsch, Ferry and Grosse, Max and Schiewe, Alexander and Bimber, Oliver}, title = {Laser Pointer Tracking in Projector-Augmented Architectural Environments}, doi = {10.25643/bauhaus-universitaet.818}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8183}, year = {2007}, abstract = {We present a system that applies a custom-built pan-tilt-zoom camera for laser-pointer tracking in arbitrary real environments. Once placed in a building environment, it carries out a fully automatic self-registration, registrations of projectors, and sampling of surface parameters, such as geometry and reflectivity. After these steps, it can be used for tracking a laser spot on the surface as well as an LED marker in 3D space, using inter-playing fisheye context and controllable detail cameras. The captured surface information can be used for masking out areas that are critical to laser-pointer tracking, and for guiding geometric and radiometric image correction techniques that enable a projector-based augmentation on arbitrary surfaces. We describe a distributed software framework that couples laser-pointer tracking for interaction, projector-based AR as well as video see-through AR for visualizations with the domain specific functionality of existing desktop tools for architectural planning, simulation and building surveying.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @techreport{BrunsBrombachBimber2007, author = {Bruns, Erich and Brombach, Benjamin and Bimber, Oliver}, title = {Mobile Phone Enabled Museum Guidance with Adaptive Classification}, doi = {10.25643/bauhaus-universitaet.940}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-9406}, year = {2007}, abstract = {Although audio guides are widely established in many museums, they suffer from several drawbacks compared to state-of-the-art multimedia technologies: First, they provide only audible information to museum visitors, while other forms of media presentation, such as reading text or video could be beneficial for museum guidance tasks. Second, they are not very intuitive. Reference numbers have to be manually keyed in by the visitor before information about the exhibit is provided. These numbers are either displayed on visible tags that are located near the exhibited objects, or are printed in brochures that have to be carried. Third, offering mobile guidance equipment to visitors leads to acquisition and maintenance costs that have to be covered by the museum. With our project PhoneGuide we aim at solving these problems by enabling the application of conventional camera-equipped mobile phones for museum guidance purposes. The advantages are obvious: First, today's off-the-shelf mobile phones offer a rich pallet of multimedia functionalities ---ranging from audio (over speaker or head-set) and video (graphics, images, movies) to simple tactile feedback (vibration). Second, integrated cameras, improvements in processor performance and more memory space enable supporting advanced computer vision algorithms. Instead of keying in reference numbers, objects can be recognized automatically by taking non-persistent photographs of them. This is more intuitive and saves museum curators from distributing and maintaining a large number of physical (visible or invisible) tags. Together with a few sensor-equipped reference tags only, computer vision based object recognition allows for the classification of single objects; whereas overlapping signal ranges of object-distinct active tags (such as RFID) would prevent the identification of individuals that are grouped closely together. Third, since we assume that museum visitors will be able to use their own devices, the acquisition and maintenance cost for museum-owned devices decreases.}, subject = {Objektverfolgung}, language = {en} } @techreport{BrunsBimber2007, author = {Bruns, Erich and Bimber, Oliver}, title = {Adaptive Training of Video Sets for Image Recognition on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.822}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8223}, year = {2007}, abstract = {We present an enhancement towards adaptive video training for PhoneGuide, a digital museum guidance system for ordinary camera-equipped mobile phones. It enables museum visitors to identify exhibits by capturing photos of them. In this article, a combined solution of object recognition and pervasive tracking is extended to a client-server-system for improving data acquisition and for supporting scale-invariant object recognition.}, subject = {Objektverfolgung}, language = {en} } @techreport{GrundhoeferSeegerHaentschetal.2007, author = {Grundh{\"o}fer, Anselm and Seeger, Manja and H{\"a}ntsch, Ferry and Bimber, Oliver}, title = {Dynamic Adaptation of Projected Imperceptible Codes}, doi = {10.25643/bauhaus-universitaet.816}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8168}, year = {2007}, abstract = {In this paper we present a novel adaptive imperceptible pattern projection technique that considers parameters of human visual perception. A coded image that is invisible for human observers is temporally integrated into the projected image, but can be reconstructed by a synchronized camera. The embedded code is dynamically adjusted on the fly to guarantee its non-perceivability and to adapt it to the current camera pose. Linked with real-time flash keying, for instance, this enables in-shot optical tracking using a dynamic multi-resolution marker technique. A sample prototype is realized that demonstrates the application of our method in the context of augmentations in television studios.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @techreport{WetzsteinBimber2007, author = {Wetzstein, Gordon and Bimber, Oliver}, title = {Radiometric Compensation through Inverse Light Transport}, doi = {10.25643/bauhaus-universitaet.812}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8126}, year = {2007}, abstract = {Radiometric compensation techniques allow seamless projections onto complex everyday surfaces. Implemented with projector-camera systems they support the presentation of visual content in situations where projection-optimized screens are not available or not desired - as in museums, historic sites, air-plane cabins, or stage performances. We propose a novel approach that employs the full light transport between a projector and a camera to account for many illumination aspects, such as interreflections, refractions and defocus. Precomputing the inverse light transport in combination with an efficient implementation on the GPU makes the real-time compensation of captured local and global light modulations possible.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} }