@techreport{BimberIwai2008, author = {Bimber, Oliver and Iwai, Daisuke}, title = {Superimposing Dynamic Range}, doi = {10.25643/bauhaus-universitaet.1287}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080422-13585}, year = {2008}, abstract = {We present a simple and cost-efficient way of extending contrast, perceived tonal resolution, and the color space of static hardcopy images, beyond the capabilities of hardcopy devices or low-dynamic range displays alone. A calibrated projector-camera system is applied for automatic registration, scanning and superimposition of hardcopies. We explain how high-dynamic range content can be split for linear devices with different capabilities, how luminance quantization can be optimized with respect to the non-linear response of the human visual system as well as for the discrete nature of the applied modulation devices; and how inverse tone-mapping can be adapted in case only untreated hardcopies and softcopies (such as regular photographs) are available. We believe that our approach has the potential to complement hardcopy-based technologies, such as X-ray prints for filmless imaging, in domains that operate with high quality static image content, like radiology and other medical fields, or astronomy.}, subject = {Bildverarbeitung}, language = {en} } @techreport{Bimber2008, author = {Bimber, Oliver}, title = {Superimposing Dynamic Range}, doi = {10.25643/bauhaus-universitaet.1379}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20090303-14662}, year = {2008}, abstract = {Replacing a uniform illumination by a high-frequent illumination enhances the contrast of observed and captured images. We modulate spatially and temporally multiplexed (projected) light with reflective or transmissive matter to achieve high dynamic range visualizations of radiological images on printed paper or ePaper, and to boost the optical contrast of images viewed or imaged with light microscopes.}, subject = {Bildverarbeitung}, language = {en} } @techreport{KlemensTetzner2011, author = {Klemens, Laub and Tetzner, Thomas}, title = {Social Game Environmental Management}, doi = {10.25643/bauhaus-universitaet.1459}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20110905-15587}, year = {2011}, abstract = {Was ist soziales spielen? Warum ist es heutzutage so beliebt? Welche Mechaniken stecken hinter dem Erfolg? Weiterhin behandelt der Bericht eine neue Spielidee, die eine umwelttechnische Komponente f{\"u}r Bildungszwecke beinhaltet.}, subject = {Facebook}, language = {en} } @techreport{GrossEglaMarquardt2006, author = {Gross, Tom and Egla, Tareg and Marquardt, Nicolai}, title = {Sens-ation: A Service-Oriented Platform for Developing Sensor-Based Infrastructures}, doi = {10.25643/bauhaus-universitaet.744}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7447}, year = {2006}, abstract = {In today's information society the vast technical progress and the sinking cost of information and communication technology provide new opportunities for information supply, and new technical support for communication and cooperation over distance. These trends also entail challenges such as supplying information that is adequate for a particular person in a specific situation as well as managing communication among geographically distributed parties efficiently. Context-aware systems that use sensors in order to analyse their environment and to adapt their behaviour. Yet, adequate tools for developing sensor-based infrastructures are missing. We have designed and developed Sens-ation, an open and generic service-oriented platform, which provides powerful, yet easy-to-use, tools to software developers who want to develop context-aware, sensor-based infrastructures. The service-oriented paradigm of Sens-ation enables standardised communication within individual infrastructures, between infrastructures and their sensors, but also among distributed infrastructures. On a whole, Sens-ation facilitates the development allowing developers to concentrate on the semantics of their infrastructures, and to develop innovative concepts and implementations of context-aware systems.}, subject = {Angewandte Informatik}, language = {en} } @techreport{GrossOemig2008, author = {Gross, Tom and Oemig, Christoph}, title = {Presence, Privacy, and PRIMIFaces: Towards Selective Information Disclosure in Instant Messaging}, doi = {10.25643/bauhaus-universitaet.1275}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080314-13452}, year = {2008}, abstract = {Efficient distant cooperation often requires spontaneous ad-hoc social interaction, which is only possible with adequate information on the prospective communication partner. This often requires disclosing and sharing personal information via tools such as instant messaging systems and can conflict with the users' wishes for privacy. In this paper we present an initial study investigating this trade-off and discuss implications for the design of instant messaging systems. We present the functionality and design of the PRIMIFaces instant messaging prototype supporting flexible identity management and selective information disclosure.}, subject = {Angewandte Informatik}, language = {en} } @techreport{FoecklerZeidlerBimber2005, author = {F{\"o}ckler, Paul and Zeidler, Thomas and Bimber, Oliver}, title = {PhoneGuide: Museum Guidance Supported by On-Device Object Recognition on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.650}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-6500}, year = {2005}, abstract = {We present PhoneGuide - an enhanced museum guidance approach that uses camera-equipped mobile phones and on-device object recognition. Our main technical achievement is a simple and light-weight object recognition approach that is realized with single-layer perceptron neuronal networks. In contrast to related systems which perform computational intensive image processing tasks on remote servers, our intention is to carry out all computations directly on the phone. This ensures little or even no network traffic and consequently decreases cost for online times. Our laboratory experiments and field surveys have shown that photographed museum exhibits can be recognized with a probability of over 90\%. We have evaluated different feature sets to optimize the recognition rate and performance. Our experiments revealed that normalized color features are most effective for our method. Choosing such a feature set allows recognizing an object below one second on up-to-date phones. The amount of data that is required for differentiating 50 objects from multiple perspectives is less than 6KBytes.}, subject = {Neuronales Netz}, language = {en} } @techreport{ExnerBrunsKurzetal.2009, author = {Exner, David and Bruns, Erich and Kurz, Daniel and Grundh{\"o}fer, Anselm and Bimber, Oliver}, title = {Fast and Reliable CAMShift Tracking}, organization = {JP AUgmented Reality, Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.1410}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20091217-14962}, year = {2009}, abstract = {CAMShift is a well-established and fundamental algorithm for kernel-based visual object tracking. While it performs well with objects that have a simple and constant appearance, it is not robust in more complex cases. As it solely relies on back projected probabilities it can fail in cases when the object's appearance changes (e.g. due to object or camera movement, or due to lighting changes), when similarly colored objects have to be re-detected or when they cross their trajectories. We propose extensions to CAMShift that address and resolve all of these problems. They allow the accumulation of multiple histograms to model more complex object appearance and the continuous monitoring of object identi- ties to handle ambiguous cases of partial or full occlusion. Most steps of our method are carried out on the GPU for achieving real-time tracking of multiple targets simultaneously. We explain an ecient GPU implementations of histogram generation, probability back projection, im- age moments computations, and histogram intersection. All of these techniques make full use of a GPU's high parallelization.}, subject = {Bildverarbeitung}, language = {en} } @techreport{GrossBeckmann2008, author = {Gross, Tom and Beckmann, Christoph}, title = {CoLocScribe: A Media Space for Information Disclosure in Storytelling}, doi = {10.25643/bauhaus-universitaet.1361}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20081103-14361}, year = {2008}, abstract = {Digital storytelling of remote social interaction, where the situation of a remote group distributed over two locations is captured and a story is generated for later retrieval, can provide valuable insight into the structure and processes in a group. Yet, capturing these situations is a challenge—both from a technical perspective, and from a social perspective. In this paper we present CoLocScribe: a concept and prototype of an advanced media space featuring ubiquitous computing technology for capturing remote social interaction as well as a study of its use providing valuable feedback for the captured persons as well as input for the authors.}, subject = {Angewandte Informatik}, language = {en} } @techreport{GrossMarquardt2006, author = {Gross, Tom and Marquardt, Nicolai}, title = {CollaborationBus: An Editor for the Easy Configuration of Complex Ubiquitous Environment}, doi = {10.25643/bauhaus-universitaet.746}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7463}, year = {2006}, abstract = {Early sensor-based infrastructures were often developed by experts with a thorough knowledge of base technology for sensing information, for processing the captured data, and for adapting the system's behaviour accordingly. In this paper we argue that also end-users should be able to configure Ubiquitous Computing environments. We introduce the CollaborationBus application: a graphical editor that provides abstractions from base technology and thereby allows multifarious users to configure Ubiquitous Computing environments. By composing pipelines users can easily specify the information flows from selected sensors via optional filters for processing the sensor data to actuators changing the system behaviour according to the users' wishes. Users can compose pipelines for both home and work environments. An integrated sharing mechanism allows them to share their own compositions, and to reuse and build upon others' compositions. Real-time visualisations help them understand how the information flows through their pipelines. In this paper we present the concept, implementation, and early user feedback of the CollaborationBus application.}, subject = {Angewandte Informatik}, language = {en} } @techreport{GrossSchirmer2008, author = {Gross, Tom and Schirmer, Maximilian}, title = {CollaborationBus Aqua: An Editor for Storytelling in Mixed Reality Settings}, doi = {10.25643/bauhaus-universitaet.1360}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20081103-14359}, year = {2008}, abstract = {Capturing the interaction of users in a room based on real-world and electronic sensors provides valuable input for their interactive stories. However, in such complex scenarios there is a gap between the huge amount of rather fine-grained data that is captured and the story summarising and representing the most significant aspects of the interaction. In this paper we present the CollaborationBus Aqua editor that provides an easy to use graphical editor for capturing, authoring, and sharing stories based on mixed-reality scenarios.}, subject = {Angewandte Informatik}, language = {en} }