@incollection{Manning, author = {Manning, Erin}, title = {10 Propositionen f{\"u}r eine radikale P{\"a}dagogik, oder: Wie den Wert neu denken?}, series = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, booktitle = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, publisher = {Nocturne}, address = {Berlin und Weimar}, doi = {10.25643/bauhaus-universitaet.4266}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201008-42660}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {121 -- 138}, abstract = {Radikale P{\"a}dagogik richtet ihre Aufmerksamkeit sowohl auf die allt{\"a}glichen Techniken p{\"a}dagogischer Praxis - Techniken zur Aktivierung eines Begegnungsraumes, Techniken, sich um die Arbeit und einander zu k{\"u}mmern, Techniken des kultur{\"u}bergreifenden Zuh{\"o}rens, Techniken, sich dem Mehr-als zuzuwenden - als auch auf Techniken zum »{\"U}berschreiten der Schwelle«. Das {\"U}berschreiten der Schwelle h{\"a}ngt mit der Art und Weise der Anpassung (accommodation) zusammen, die es erm{\"o}glicht, das Lernen in all seinen Erscheinungsformen wertzusch{\"a}tzen.}, subject = {P{\"a}dagogik}, language = {de} } @phdthesis{Kunert, author = {Kunert, Andr{\´e}}, title = {3D Interaction Techniques in Multi-User Virtual Reality : towards scalable templates and implementation patterns for cooperative interfaces}, doi = {10.25643/bauhaus-universitaet.4296}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201204-42962}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {147}, abstract = {Multi-user projection systems provide a coherent 3D interaction space for multiple co-located users that facilitates mutual awareness, full-body interaction, and the coordination of activities. The users perceive the shared scene from their respective viewpoints and can directly interact with the 3D content. This thesis reports on novel interaction patterns for collaborative 3D interaction for local and distributed user groups based on such multi-user projection environments. A particular focus of our developments lies in the provision of multiple independent interaction territories in our workspaces and their tight integration into collaborative workflows. The motivation for such multi-focus workspaces is grounded in research on social cooperation patterns, specifically in the requirement for supporting phases of loose and tight collaboration and the emergence of dedicated orking territories for private usage and public exchange. We realized independent interaction territories in the form of handheld virtual viewing windows and multiple co-located hardware displays in a joint workspace. They provide independent views of a shared virtual environment and serve as access points for the exploration and manipulation of the 3D content. Their tight integration into our workspace supports fluent transitions between individual work and joint user engagement. The different affordances of various displays in an exemplary workspace consisting of a large 3D wall, a 3D tabletop, and handheld virtual viewing windows, promote different usage scenarios, for instance for views from an egocentric perspective, miniature scene representations, close-up views, or storage and transfer areas. This work shows that this versatile workspace can make the cooperation of multiple people in joint tasks more effective, e.g. by parallelizing activities, distributing subtasks, and providing mutual support. In order to create, manage, and share virtual viewing windows, this thesis presents the interaction technique of Photoportals, a tangible interface based on the metaphor of digital photography. They serve as configurable viewing territories and enable the individual examination of scene details as well as the immediate sharing of the prepared views. Photoportals are specifically designed to complement other interface facets and provide extended functionality for scene navigation, object manipulation, and for the creation of temporal recordings of activities in the virtual scene. A further objective of this work is the realization of a coherent interaction space for direct 3D input across the independent interaction territories in multi-display setups. This requires the simultaneous consideration of user input in several potential interaction windows as well as configurable disambiguation schemes for the implicit selection of distinct interaction contexts. We generalized the required implementation structures into a high-level software pattern and demonstrated its versatility by means of various multi-context 3D interaction tools. Additionally, this work tackles specific problems related to group navigation in multiuser projection systems. Joint navigation of a collocated group of users can lead to unintentional collisions when passing narrow scene sections. In this context, we suggest various solutions that prevent individual collisions during group navigation and discuss their effect on the perceived integrity of the travel group and the 3D scene. For collaboration scenarios involving distributed user groups, we furthermore explored different configurations for joint and individual travel. Last but not least, this thesis provides detailed information and implementation templates for the realization of the proposed interaction techniques and collaborative workspaces in scenegraph-based VR systems. These contributions to the abstraction of specific interaction patterns, such as group navigation and multi-window interaction, facilitate their reuse in other virtual reality systems and their adaptation to further collaborative scenarios.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{AbbaspourGilandehMolaeeSabzietal., author = {Abbaspour-Gilandeh, Yousef and Molaee, Amir and Sabzi, Sajad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir}, title = {A Combined Method of Image Processing and Artificial Neural Network for the Identification of 13 Iranian Rice Cultivars}, series = {agronomy}, volume = {2020}, journal = {agronomy}, number = {Volume 10, Issue 1, 117}, publisher = {MDPI}, doi = {10.3390/agronomy10010117}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200123-40695}, pages = {21}, abstract = {Due to the importance of identifying crop cultivars, the advancement of accurate assessment of cultivars is considered essential. The existing methods for identifying rice cultivars are mainly time-consuming, costly, and destructive. Therefore, the development of novel methods is highly beneficial. The aim of the present research is to classify common rice cultivars in Iran based on color, morphologic, and texture properties using artificial intelligence (AI) methods. In doing so, digital images of 13 rice cultivars in Iran in three forms of paddy, brown, and white are analyzed through pre-processing and segmentation of using MATLAB. Ninety-two specificities, including 60 color, 14 morphologic, and 18 texture properties, were identified for each rice cultivar. In the next step, the normal distribution of data was evaluated, and the possibility of observing a significant difference between all specificities of cultivars was studied using variance analysis. In addition, the least significant difference (LSD) test was performed to obtain a more accurate comparison between cultivars. To reduce data dimensions and focus on the most effective components, principal component analysis (PCA) was employed. Accordingly, the accuracy of rice cultivar separations was calculated for paddy, brown rice, and white rice using discriminant analysis (DA), which was 89.2\%, 87.7\%, and 83.1\%, respectively. To identify and classify the desired cultivars, a multilayered perceptron neural network was implemented based on the most effective components. The results showed 100\% accuracy of the network in identifying and classifying all mentioned rice cultivars. Hence, it is concluded that the integrated method of image processing and pattern recognition methods, such as statistical classification and artificial neural networks, can be used for identifying and classification of rice cultivars.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianJadhavMohammadetal., author = {Harirchian, Ehsan and Jadhav, Kirti and Mohammad, Kifaytullah and Aghakouchaki Hosseini, Seyed Ehsan and Lahmer, Tom}, title = {A Comparative Study of MCDM Methods Integrated with Rapid Visual Seismic Vulnerability Assessment of Existing RC Structures}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 18, article 6411}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10186411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200918-42360}, pages = {24}, abstract = {Recently, the demand for residence and usage of urban infrastructure has been increased, thereby resulting in the elevation of risk levels of human lives over natural calamities. The occupancy demand has rapidly increased the construction rate, whereas the inadequate design of structures prone to more vulnerability. Buildings constructed before the development of seismic codes have an additional susceptibility to earthquake vibrations. The structural collapse causes an economic loss as well as setbacks for human lives. An application of different theoretical methods to analyze the structural behavior is expensive and time-consuming. Therefore, introducing a rapid vulnerability assessment method to check structural performances is necessary for future developments. The process, as mentioned earlier, is known as Rapid Visual Screening (RVS). This technique has been generated to identify, inventory, and screen structures that are potentially hazardous. Sometimes, poor construction quality does not provide some of the required parameters; in this case, the RVS process turns into a tedious scenario. Hence, to tackle such a situation, multiple-criteria decision-making (MCDM) methods for the seismic vulnerability assessment opens a new gateway. The different parameters required by RVS can be taken in MCDM. MCDM evaluates multiple conflicting criteria in decision making in several fields. This paper has aimed to bridge the gap between RVS and MCDM. Furthermore, to define the correlation between these techniques, implementation of the methodologies from Indian, Turkish, and Federal Emergency Management Agency (FEMA) codes has been done. The effects of seismic vulnerability of structures have been observed and compared.}, subject = {Erdbebensicherheit}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Raj Das, Rohan and Rasulzade, Shahla and Lahmer, Tom}, title = {A Machine Learning Framework for Assessing Seismic Hazard Safety of Reinforced Concrete Buildings}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7153}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42744}, pages = {18}, abstract = {Although averting a seismic disturbance and its physical, social, and economic disruption is practically impossible, using the advancements in computational science and numerical modeling shall equip humanity to predict its severity, understand the outcomes, and equip for post-disaster management. Many buildings exist amidst the developed metropolitan areas, which are senile and still in service. These buildings were also designed before establishing national seismic codes or without the introduction of construction regulations. In that case, risk reduction is significant for developing alternatives and designing suitable models to enhance the existing structure's performance. Such models will be able to classify risks and casualties related to possible earthquakes through emergency preparation. Thus, it is crucial to recognize structures that are susceptible to earthquake vibrations and need to be prioritized for retrofitting. However, each building's behavior under seismic actions cannot be studied through performing structural analysis, as it might be unrealistic because of the rigorous computations, long period, and substantial expenditure. Therefore, it calls for a simple, reliable, and accurate process known as Rapid Visual Screening (RVS), which serves as a primary screening platform, including an optimum number of seismic parameters and predetermined performance damage conditions for structures. In this study, the damage classification technique was studied, and the efficacy of the Machine Learning (ML) method in damage prediction via a Support Vector Machine (SVM) model was explored. The ML model is trained and tested separately on damage data from four different earthquakes, namely Ecuador, Haiti, Nepal, and South Korea. Each dataset consists of varying numbers of input data and eight performance modifiers. Based on the study and the results, the ML model using SVM classifies the given input data into the belonging classes and accomplishes the performance on hazard safety evaluation of buildings.}, subject = {Erdbeben}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @article{SchirmerOsburg, author = {Schirmer, Ulrike and Osburg, Andrea}, title = {A new method for the quantification of adsorbed styrene acrylate copolymer particles on cementitious surfaces: a critical comparative study}, series = {SN Applied Sciences}, volume = {2020}, journal = {SN Applied Sciences}, number = {Volume 2, article 2061}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s42452-020-03825-5}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44729}, pages = {1 -- 11}, abstract = {The amount of adsorbed styrene acrylate copolymer (SA) particles on cementitious surfaces at the early stage of hydration was quantitatively determined using three different methodological approaches: the depletion method, the visible spectrophotometry (VIS) and the thermo-gravimetry coupled with mass spectrometry (TG-MS). Considering the advantages and disadvantages of each method, including the respectively required sample preparation, the results for four polymer-modified cement pastes, varying in polymer content and cement fineness, were evaluated. To some extent, significant discrepancies in the adsorption degrees were observed. There is a tendency that significantly lower amounts of adsorbed polymers were identified using TG-MS compared to values determined with the depletion method. Spectrophotometrically generated values were ​​lying in between these extremes. This tendency was found for three of the four cement pastes examined and is originated in sample preparation and methodical limitations. The main influencing factor is the falsification of the polymer concentration in the liquid phase during centrifugation. Interactions in the interface between sediment and supernatant are the cause. The newly developed method, using TG-MS for the quantification of SA particles, proved to be suitable for dealing with these revealed issues. Here, instead of the fluid phase, the sediment is examined with regard to the polymer content, on which the influence of centrifugation is considerably lower.}, subject = {Zement}, language = {en} } @phdthesis{Winkel, author = {Winkel, Benjamin}, title = {A three-dimensional model of skeletal muscle for physiological, pathological and experimental mechanical simulations}, doi = {10.25643/bauhaus-universitaet.4300}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201211-43002}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {In recent decades, a multitude of concepts and models were developed to understand, assess and predict muscular mechanics in the context of physiological and pathological events. Most of these models are highly specialized and designed to selectively address fields in, e.g., medicine, sports science, forensics, product design or CGI; their data are often not transferable to other ranges of application. A single universal model, which covers the details of biochemical and neural processes, as well as the development of internal and external force and motion patterns and appearance could not be practical with regard to the diversity of the questions to be investigated and the task to find answers efficiently. With reasonable limitations though, a generalized approach is feasible. The objective of the work at hand was to develop a model for muscle simulation which covers the phenomenological aspects, and thus is universally applicable in domains where up until now specialized models were utilized. This includes investigations on active and passive motion, structural interaction of muscles within the body and with external elements, for example in crash scenarios, but also research topics like the verification of in vivo experiments and parameter identification. For this purpose, elements for the simulation of incompressible deformations were studied, adapted and implemented into the finite element code SLang. Various anisotropic, visco-elastic muscle models were developed or enhanced. The applicability was demonstrated on the base of several examples, and a general base for the implementation of further material models was developed and elaborated.}, subject = {Biomechanik}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} }