@masterthesis{Lang, type = {Bachelor Thesis}, author = {Lang, Kevin}, title = {Worteinbettung als semantisches Feature in der argumentativen Analyse}, doi = {10.25643/bauhaus-universitaet.3934}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190617-39343}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {54}, abstract = {Diese Arbeit besch{\"a}ftigt sich mit der Nutzung von Worteinbettungen in der automatischen Analyse von argumentativen Texten. Die Arbeit diskutiert wichtige Einstellungen des Einbettungsverfahren sowie diverse Anwendungsmethoden der eingebetteten Wortvektoren f{\"u}r drei Aufgaben der automatischen argumentativen Analyse: Textsegmentierung, Argumentativit{\"a}ts-Klassifikation und Relationenfindung. Meine Experimente auf zwei Standard-Argumentationsdatens{\"a}tzen zeigen die folgenden Haupterkenntnisse: Bei der Textsegmentierung konnten keine Verbesserungen erzielt werden, w{\"a}hrend in der Argumentativit{\"a}ts-Klassifikation und der Relationenfindung sich kleine Erfolge gezeigt haben und weitere bestimmte Forschungsthesen bewahrheitet werden konnten. In der Diskussion wird darauf eingegangen, warum bei der einfachen Worteinbettung in der argumentativen Analyse sich kaum nutzbare Ergebnisse erzielen lassen konnten, diese sich aber in Zukunft durch erweiterte Worteinbettungsverfahren verbessern k{\"o}nnen.}, subject = {Argumentation}, language = {de} } @article{TonnTatarin, author = {Tonn, Christian and Tatarin, Ren{\´e}}, title = {Volumen Rendering in der Architektur: {\"U}berlagerung und Kombination von 3D Voxel Volumendaten mit 3D Geb{\"a}udemodellen}, doi = {10.25643/bauhaus-universitaet.2671}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160822-26718}, abstract = {Volumerendering ist eine Darstellungstechnik, um verschiedene r{\"a}umliche Mess- und Simulationsdaten anschaulich, interaktiv grafisch darzustellen. Im folgenden Beitrag wird ein Verfahren vorgestellt, mehrere Volumendaten mit einem Architekturfl{\"a}chenmodell zu {\"u}berlagern. Diese komplexe Darstellungsberechnung findet mit hardwarebeschleunigten Shadern auf der Grafikkarte statt. Im Beitrag wird hierzu der implementierte Softwareprototyp "VolumeRendering" vorgestellt. Neben dem interaktiven Berechnungsverfahren wurde ebenso Wert auf eine nutzerfreundliche Bedienung gelegt. Das Ziel bestand darin, eine einfache Bewertung der Volumendaten durch Fachplaner zu erm{\"o}glichen. Durch die {\"U}berlagerung, z. B. verschiedener Messverfahren mit einem Fl{\"a}chenmodell, ergeben sich Synergien und neue Auswertungsm{\"o}glichkeiten. Abschließend wird anhand von Beispielen aus einem interdisziplin{\"a}ren Forschungsprojekt die Anwendung des Softwareprototyps illustriert.}, subject = {Multiple Volume Rendering}, language = {de} } @article{FaizollahzadehArdabiliNajafiAlizamiretal., author = {Faizollahzadeh Ardabili, Sina and Najafi, Bahman and Alizamir, Meysam and Mosavi, Amir and Shamshirband, Shahaboddin and Rabczuk, Timon}, title = {Using SVM-RSM and ELM-RSM Approaches for Optimizing the Production Process of Methyl and Ethyl Esters}, series = {Energies}, journal = {Energies}, number = {11, 2889}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11112889}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181025-38170}, pages = {1 -- 20}, abstract = {The production of a desired product needs an effective use of the experimental model. The present study proposes an extreme learning machine (ELM) and a support vector machine (SVM) integrated with the response surface methodology (RSM) to solve the complexity in optimization and prediction of the ethyl ester and methyl ester production process. The novel hybrid models of ELM-RSM and ELM-SVM are further used as a case study to estimate the yield of methyl and ethyl esters through a trans-esterification process from waste cooking oil (WCO) based on American Society for Testing and Materials (ASTM) standards. The results of the prediction phase were also compared with artificial neural networks (ANNs) and adaptive neuro-fuzzy inference system (ANFIS), which were recently developed by the second author of this study. Based on the results, an ELM with a correlation coefficient of 0.9815 and 0.9863 for methyl and ethyl esters, respectively, had a high estimation capability compared with that for SVM, ANNs, and ANFIS. Accordingly, the maximum production yield was obtained in the case of using ELM-RSM of 96.86\% for ethyl ester at a temperature of 68.48 °C, a catalyst value of 1.15 wt. \%, mixing intensity of 650.07 rpm, and an alcohol to oil molar ratio (A/O) of 5.77; for methyl ester, the production yield was 98.46\% at a temperature of 67.62 °C, a catalyst value of 1.1 wt. \%, mixing intensity of 709.42 rpm, and an A/O of 6.09. Therefore, ELM-RSM increased the production yield by 3.6\% for ethyl ester and 3.1\% for methyl ester, compared with those for the experimental data.}, subject = {Biodiesel}, language = {en} } @phdthesis{Kulik, author = {Kulik, Alexander}, title = {User Interfaces for Cooperation}, doi = {10.25643/bauhaus-universitaet.2720}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20161202-27207}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {261}, abstract = {This thesis suggests cooperation as a design paradigm for human-computer interaction. The basic idea is that the synergistic co-operation of interfaces through concurrent user activities enables increased interaction fluency and expressiveness. This applies to bimanual interaction and multi-finger input, e.g., touch typing, as well as the collaboration of multiple users. Cooperative user interfaces offer more interaction flexibility and expressivity for single and multiple users. Part I of this thesis analyzes the state of the art in user interface design. It explores limitations of common approaches and reveals the crucial role of cooperative action in several established user interfaces and research prototypes. A review of related research in psychology and human-computer interaction offers insights to the cognitive, behavioral, and ergonomic foundations of cooperative user interfaces. Moreover, this thesis suggests a broad applicability of generic cooperation patterns and contributes three high-level design principles. Part II presents three experiments towards cooperative user interfaces in detail. A study on desktop-based 3D input devices, explores fundamental benefits of cooperative bimanual input and the impact of interface design on bimanual cooperative behavior. A novel interaction technique for multitouch devices is presented that follows the paradigm of cooperative user interfaces and demonstrates advantages over the status quo. Finally, this thesis introduces a fundamentally new display technology that provides up to six users with their individual perspectives of a shared 3D environment. The system creates new possibilities for the cooperative interaction of multiple users. Part III of this thesis builds on the research results described in Part II, in particular, the multi-user 3D display system. A series of case studies in the field of collaborative virtual reality provides exemplary evidence for the relevance and applicability of the suggested design principles.}, subject = {Human-Computer Interaction (HCI)}, language = {en} } @phdthesis{Potthast, author = {Potthast, Martin}, title = {Technologies for Reusing Text from the Web}, doi = {10.25643/bauhaus-universitaet.1566}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120217-15663}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {237}, abstract = {Texts from the web can be reused individually or in large quantities. The former is called text reuse and the latter language reuse. We first present a comprehensive overview of the different ways in which text and language is reused today, and how exactly information retrieval technologies can be applied in this respect. The remainder of the thesis then deals with specific retrieval tasks. In general, our contributions consist of models and algorithms, their evaluation, and for that purpose, large-scale corpus construction. The thesis divides into two parts. The first part introduces technologies for text reuse detection, and our contributions are as follows: (1) A unified view of projecting-based and embedding-based fingerprinting for near-duplicate detection and the first time evaluation of fingerprint algorithms on Wikipedia revision histories as a new, large-scale corpus of near-duplicates. (2) A new retrieval model for the quantification of cross-language text similarity, which gets by without parallel corpora. We have evaluated the model in comparison to other models on many different pairs of languages. (3) An evaluation framework for text reuse and particularly plagiarism detectors, which consists of tailored detection performance measures and a large-scale corpus of automatically generated and manually written plagiarism cases. The latter have been obtained via crowdsourcing. This framework has been successfully applied to evaluate many different state-of-the-art plagiarism detection approaches within three international evaluation competitions. The second part introduces technologies that solve three retrieval tasks based on language reuse, and our contributions are as follows: (4) A new model for the comparison of textual and non-textual web items across media, which exploits web comments as a source of information about the topic of an item. In this connection, we identify web comments as a largely neglected information source and introduce the rationale of comment retrieval. (5) Two new algorithms for query segmentation, which exploit web n-grams and Wikipedia as a means of discerning the user intent of a keyword query. Moreover, we crowdsource a new corpus for the evaluation of query segmentation which surpasses existing corpora by two orders of magnitude. (6) A new writing assistance tool called Netspeak, which is a search engine for commonly used language. Netspeak indexes the web in the form of web n-grams as a source of writing examples and implements a wildcard query processor on top of it.}, subject = {Information Retrieval}, language = {en} } @article{GhazvineiDarvishiMosavietal., author = {Ghazvinei, Pezhman Taherei and Darvishi, Hossein Hassanpour and Mosavi, Amir and Yusof, Khamaruzaman bin Wan and Alizamir, Meysam and Shamshirband, Shahaboddin and Chau, Kwok-Wing}, title = {Sugarcane growth prediction based on meteorological parameters using extreme learning machine and artificial neural network}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2018}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {12,1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2018.1526119}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181017-38129}, pages = {738 -- 749}, abstract = {Management strategies for sustainable sugarcane production need to deal with the increasing complexity and variability of the whole sugar system. Moreover, they need to accommodate the multiple goals of different industry sectors and the wider community. Traditional disciplinary approaches are unable to provide integrated management solutions, and an approach based on whole systems analysis is essential to bring about beneficial change to industry and the community. The application of this approach to water management, environmental management and cane supply management is outlined, where the literature indicates that the application of extreme learning machine (ELM) has never been explored in this realm. Consequently, the leading objective of the current research was set to filling this gap by applying ELM to launch swift and accurate model for crop production data-driven. The key learning has been the need for innovation both in the technical aspects of system function underpinned by modelling of sugarcane growth. Therefore, the current study is an attempt to establish an integrate model using ELM to predict the concluding growth amount of sugarcane. Prediction results were evaluated and further compared with artificial neural network (ANN) and genetic programming models. Accuracy of the ELM model is calculated using the statistics indicators of Root Means Square Error (RMSE), Pearson Coefficient (r), and Coefficient of Determination (R2) with promising results of 0.8, 0.47, and 0.89, respectively. The results also show better generalization ability in addition to faster learning curve. Thus, proficiency of the ELM for supplementary work on advancement of prediction model for sugarcane growth was approved with promising results.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @article{NabipourDehghaniMosavietal., author = {Nabipour, Narjes and Dehghani, Majid and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Short-Term Hydrological Drought Forecasting Based on Different Nature-Inspired Optimization Algorithms Hybridized With Artificial Neural Networks}, series = {IEEE Access}, volume = {2020}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2964584}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40796}, pages = {15210 -- 15222}, abstract = {Hydrological drought forecasting plays a substantial role in water resources management. Hydrological drought highly affects the water allocation and hydropower generation. In this research, short term hydrological drought forecasted based on the hybridized of novel nature-inspired optimization algorithms and Artificial Neural Networks (ANN). For this purpose, the Standardized Hydrological Drought Index (SHDI) and the Standardized Precipitation Index (SPI) were calculated in one, three, and six aggregated months. Then, three states where proposed for SHDI forecasting, and 36 input-output combinations were extracted based on the cross-correlation analysis. In the next step, newly proposed optimization algorithms, including Grasshopper Optimization Algorithm (GOA), Salp Swarm algorithm (SSA), Biogeography-based optimization (BBO), and Particle Swarm Optimization (PSO) hybridized with the ANN were utilized for SHDI forecasting and the results compared to the conventional ANN. Results indicated that the hybridized model outperformed compared to the conventional ANN. PSO performed better than the other optimization algorithms. The best models forecasted SHDI1 with R2 = 0.68 and RMSE = 0.58, SHDI3 with R 2 = 0.81 and RMSE = 0.45 and SHDI6 with R 2 = 0.82 and RMSE = 0.40.}, subject = {Maschinelles Lernen}, language = {en} } @article{OuaerHosseiniAmaretal., author = {Ouaer, Hocine and Hosseini, Amir Hossein and Amar, Menad Nait and Ben Seghier, Mohamed El Amine and Ghriga, Mohammed Abdelfetah and Nabipour, Narjes and Andersen, P{\aa}l {\O}steb{\o} and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Rigorous Connectionist Models to Predict Carbon Dioxide Solubility in Various Ionic Liquids}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, Issue 1, 304}, publisher = {MDPI}, doi = {https://doi.org/10.3390/app10010304}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200107-40558}, pages = {18}, abstract = {Estimating the solubility of carbon dioxide in ionic liquids, using reliable models, is of paramount importance from both environmental and economic points of view. In this regard, the current research aims at evaluating the performance of two data-driven techniques, namely multilayer perceptron (MLP) and gene expression programming (GEP), for predicting the solubility of carbon dioxide (CO2) in ionic liquids (ILs) as the function of pressure, temperature, and four thermodynamical parameters of the ionic liquid. To develop the above techniques, 744 experimental data points derived from the literature including 13 ILs were used (80\% of the points for training and 20\% for validation). Two backpropagation-based methods, namely Levenberg-Marquardt (LM) and Bayesian Regularization (BR), were applied to optimize the MLP algorithm. Various statistical and graphical assessments were applied to check the credibility of the developed techniques. The results were then compared with those calculated using Peng-Robinson (PR) or Soave-Redlich-Kwong (SRK) equations of state (EoS). The highest coefficient of determination (R2 = 0.9965) and the lowest root mean square error (RMSE = 0.0116) were recorded for the MLP-LMA model on the full dataset (with a negligible difference to the MLP-BR model). The comparison of results from this model with the vastly applied thermodynamic equation of state models revealed slightly better performance, but the EoS approaches also performed well with R2 from 0.984 up to 0.996. Lastly, the newly established correlation based on the GEP model exhibited very satisfactory results with overall values of R2 = 0.9896 and RMSE = 0.0201.}, subject = {Maschinelles Lernen}, language = {en} } @article{ShamshirbandBabanezhadMosavietal., author = {Shamshirband, Shahaboddin and Babanezhad, Meisam and Mosavi, Amir and Nabipour, Narjes and Hajnal, Eva and Nadai, Laszlo and Chau, Kwok-Wing}, title = {Prediction of flow characteristics in the bubble column reactor by the artificial pheromone-based communication of biological ants}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1715842}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200227-41013}, pages = {367 -- 378}, abstract = {A novel combination of the ant colony optimization algorithm (ACO)and computational fluid dynamics (CFD) data is proposed for modeling the multiphase chemical reactors. The proposed intelligent model presents a probabilistic computational strategy for predicting various levels of three-dimensional bubble column reactor (BCR) flow. The results prove an enhanced communication between ant colony prediction and CFD data in different sections of the BCR.}, subject = {Maschinelles Lernen}, language = {en} } @article{SchwenkeSoebkeKraft, author = {Schwenke, Nicolas and S{\"o}bke, Heinrich and Kraft, Eckhard}, title = {Potentials and Challenges of Chatbot-Supported Thesis Writing: An Autoethnography}, series = {Trends in Higher Education}, volume = {2023}, journal = {Trends in Higher Education}, number = {Volume 2, issue 4}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/higheredu2040037}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20231207-65016}, pages = {611 -- 635}, abstract = {The release of the large language model-based chatbot ChatGPT 3.5 in November 2022 has brought considerable attention to the subject of artificial intelligence, not only to the public. From the perspective of higher education, ChatGPT challenges various learning and assessment formats as it significantly reduces the effectiveness of their learning and assessment functionalities. In particular, ChatGPT might be applied to formats that require learners to generate text, such as bachelor theses or student research papers. Accordingly, the research question arises to what extent writing of bachelor theses is still a valid learning and assessment format. Correspondingly, in this exploratory study, the first author was asked to write his bachelor's thesis exploiting ChatGPT. For tracing the impact of ChatGPT methodically, an autoethnographic approach was used. First, all considerations on the potential use of ChatGPT were documented in logs, and second, all ChatGPT chats were logged. Both logs and chat histories were analyzed and are presented along with the recommendations for students regarding the use of ChatGPT suggested by a common framework. In conclusion, ChatGPT is beneficial for thesis writing during various activities, such as brainstorming, structuring, and text revision. However, there are limitations that arise, e.g., in referencing. Thus, ChatGPT requires continuous validation of the outcomes generated and thus fosters learning. Currently, ChatGPT is valued as a beneficial tool in thesis writing. However, writing a conclusive thesis still requires the learner's meaningful engagement. Accordingly, writing a thesis is still a valid learning and assessment format. With further releases of ChatGPT, an increase in capabilities is to be expected, and the research question needs to be reevaluated from time to time.}, subject = {Chatbot}, language = {en} } @misc{Froehlich, type = {Master Thesis}, author = {Fr{\"o}hlich, Jan}, title = {On systematic approaches for interpreted information transfer of inspection data from bridge models to structural analysis}, doi = {10.25643/bauhaus-universitaet.4131}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200416-41310}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {82}, abstract = {In conjunction with the improved methods of monitoring damage and degradation processes, the interest in reliability assessment of reinforced concrete bridges is increasing in recent years. Automated imagebased inspections of the structural surface provide valuable data to extract quantitative information about deteriorations, such as crack patterns. However, the knowledge gain results from processing this information in a structural context, i.e. relating the damage artifacts to building components. This way, transformation to structural analysis is enabled. This approach sets two further requirements: availability of structural bridge information and a standardized storage for interoperability with subsequent analysis tools. Since the involved large datasets are only efficiently processed in an automated manner, the implementation of the complete workflow from damage and building data to structural analysis is targeted in this work. First, domain concepts are derived from the back-end tasks: structural analysis, damage modeling, and life-cycle assessment. The common interoperability format, the Industry Foundation Class (IFC), and processes in these domains are further assessed. The need for usercontrolled interpretation steps is identified and the developed prototype thus allows interaction at subsequent model stages. The latter has the advantage that interpretation steps can be individually separated into either a structural analysis or a damage information model or a combination of both. This approach to damage information processing from the perspective of structural analysis is then validated in different case studies.}, subject = {Br{\"u}ckenbau}, language = {en} } @phdthesis{CarvajalBermudez, author = {Carvajal Berm{\´u}dez, Juan Carlos}, title = {New methods of citizen participation based on digital technologies}, doi = {10.25643/bauhaus-universitaet.4712}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220906-47124}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {The current thesis presents research about new methods of citizen participation based on digital technologies. The focus on the research lies on decentralized methods of participation where citizens take the role of co-creators. The research project first conducted a review of the literature on citizen participation, its origins and the different paradigms that have emerged over the years. The literature review also looked at the influence of technologies on participation processes and the theoretical frameworks that have emerged to understand the introduction of technologies in the context of urban development. The literature review generated the conceptual basis for the further development of the thesis. The research begins with a survey of technology enabled participation applications that examined the roles and structures emerging due to the introduction of technology. The results showed that cities use technology mostly to control and monitor urban infrastructure and are rather reluctant to give citizens the role of co-creators. Based on these findings, three case studies were developed. Digital tools for citizen participation were conceived and introduced for each case study. The adoption and reaction of the citizens were observed using three data collection methods. The results of the case studies showed consistently that previous participation and engagement with informal citizen participation are a determinining factor in the potential adoption of digital tools for decentralized engagement. Based on these results, the case studies proposed methods and frameworks that can be used for the conception and introduction of technologies for decentralized citizen participation.}, subject = {Partizipation}, language = {en} } @article{MeiabadiMoradiKaramimoghadametal., author = {Meiabadi, Mohammad Saleh and Moradi, Mahmoud and Karamimoghadam, Mojtaba and Ardabili, Sina and Bodaghi, Mahdi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Modeling the Producibility of 3D Printing in Polylactic Acid Using Artificial Neural Networks and Fused Filament Fabrication}, series = {polymers}, volume = {2021}, journal = {polymers}, number = {Volume 13, issue 19, article 3219}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/polym13193219}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220110-45518}, pages = {1 -- 21}, abstract = {Polylactic acid (PLA) is a highly applicable material that is used in 3D printers due to some significant features such as its deformation property and affordable cost. For improvement of the end-use quality, it is of significant importance to enhance the quality of fused filament fabrication (FFF)-printed objects in PLA. The purpose of this investigation was to boost toughness and to reduce the production cost of the FFF-printed tensile test samples with the desired part thickness. To remove the need for numerous and idle printing samples, the response surface method (RSM) was used. Statistical analysis was performed to deal with this concern by considering extruder temperature (ET), infill percentage (IP), and layer thickness (LT) as controlled factors. The artificial intelligence method of artificial neural network (ANN) and ANN-genetic algorithm (ANN-GA) were further developed to estimate the toughness, part thickness, and production-cost-dependent variables. Results were evaluated by correlation coefficient and RMSE values. According to the modeling results, ANN-GA as a hybrid machine learning (ML) technique could enhance the accuracy of modeling by about 7.5, 11.5, and 4.5\% for toughness, part thickness, and production cost, respectively, in comparison with those for the single ANN method. On the other hand, the optimization results confirm that the optimized specimen is cost-effective and able to comparatively undergo deformation, which enables the usability of printed PLA objects.}, subject = {3D-Druck}, language = {en} } @phdthesis{Lipka, author = {Lipka, Nedim}, title = {Modeling Non-Standard Text Classification Tasks}, doi = {10.25643/bauhaus-universitaet.1862}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130307-18626}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Text classification deals with discovering knowledge in texts and is used for extracting, filtering, or retrieving information in streams and collections. The discovery of knowledge is operationalized by modeling text classification tasks, which is mainly a human-driven engineering process. The outcome of this process, a text classification model, is used to inductively learn a text classification solution from a priori classified examples. The building blocks of modeling text classification tasks cover four aspects: (1) the way examples are represented, (2) the way examples are selected, (3) the way classifiers learn from examples, and (4) the way models are selected. This thesis proposes methods that improve the prediction quality of text classification solutions for unseen examples, especially for non-standard tasks where standard models do not fit. The original contributions are related to the aforementioned building blocks: (1) Several topic-orthogonal text representations are studied in the context of non-standard tasks and a new representation, namely co-stems, is introduced. (2) A new active learning strategy that goes beyond standard sampling is examined. (3) A new one-class ensemble for improving the effectiveness of one-class classification is proposed. (4) A new model selection framework to cope with subclass distribution shifts that occur in dynamic environments is introduced.}, subject = {Text Classification}, language = {en} } @phdthesis{Berhe, author = {Berhe, Asgedom Haile}, title = {Mitigating Risks of Corruption in Construction: A theoretical rationale for BIM adoption in Ethiopia}, doi = {10.25643/bauhaus-universitaet.4517}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211007-45175}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {336}, abstract = {This PhD thesis sets out to investigate the potentials of Building Information Modeling (BIM) to mitigate risks of corruption in the Ethiopian public construction sector. The wide-ranging capabilities and promises of BIM have led to the strong perception among researchers and practitioners that it is an indispensable technology. Consequently, it has become the frequent subject of science and research. Meanwhile, many countries, especially the developed ones, have committed themselves to applying the technology extensively. Increasing productivity is the most common and frequently cited reason for that. However, both technology developers and adopters are oblivious to the potentials of BIM in addressing critical challenges in the construction sector, such as corruption. This particularly would be significant in developing countries like Ethiopia, where its problems and effects are acute. Studies reveal that bribery and corruption have long pervaded the construction industry worldwide. The complex and fragmented nature of the sector provides an environment for corruption. The Ethiopian construction sector is not immune from this epidemic reality. In fact, it is regarded as one of the most vulnerable sectors owing to varying socio-economic and political factors. Since 2015, Ethiopia has started adopting BIM, yet without clear goals and strategies. As a result, the potential of BIM for combating concrete problems of the sector remains untapped. To this end, this dissertation does pioneering work by showing how collaboration and coordination features of the technology contribute to minimizing the opportunities for corruption. Tracing loopholes, otherwise, would remain complex and ineffective in the traditional documentation processes. Proceeding from this anticipation, this thesis brings up two primary questions: what are areas and risks of corruption in case of the Ethiopian public construction projects; and how could BIM be leveraged to mitigate these risks? To tackle these and other secondary questions, the research employs a mixed-method approach. The selected main research strategies are Survey, Grounded Theory (GT) and Archival Study. First, the author disseminates an online questionnaire among Ethiopian construction engineering professionals to pinpoint areas of vulnerability to corruption. 155 responses are compiled and scrutinized quantitatively. Then, a semi-structured in-depth interview is conducted with 20 senior professionals, primarily to comprehend opportunities for and risks of corruption in those identified highly vulnerable project stages and decision points. At the same time, open interviews (consultations) are held with 14 informants to be aware of state of the construction documentation, BIM and loopholes for corruption in the country. Consequently, these qualitative data are analyzed utilizing the principles of GT, heat/risk mapping and Social Network Analysis (SNA). The risk mapping assists the researcher in the course of prioritizing corruption risks; whilst through SNA, methodically, it is feasible to identify key actors/stakeholders in the corruption venture. Based on the generated research data, the author constructs a [substantive] grounded theory around the elements of corruption in the Ethiopian public construction sector. This theory, later, guides the subsequent strategic proposition of BIM. Finally, 85 public construction related cases are also analyzed systematically to substantiate and confirm previous findings. By ways of these multiple research endeavors that is based, first and foremost, on the triangulation of qualitative and quantitative data analysis, the author conveys a number of key findings. First, estimations, tender document preparation and evaluation, construction material as well as quality control and additional work orders are found to be the most vulnerable stages in the design, tendering and construction phases respectively. Second, middle management personnel of contractors and clients, aided by brokers, play most critical roles in corrupt transactions within the prevalent corruption network. Third, grand corruption persists in the sector, attributed to the fact that top management and higher officials entertain their overriding power, supported by the lack of project audits and accountability. Contrarily, individuals at operation level utilize intentional and unintentional 'errors' as an opportunity for corruption. In light of these findings, two conceptual BIM-based risk mitigation strategies are prescribed: active and passive automation of project audits; and the monitoring of project information throughout projects' value chain. These propositions are made in reliance on BIM's present dimensional capabilities and the promises of Integrated Project Delivery (IPD). Moreover, BIM's synchronous potentials with other technologies such as Information and Communication Technology (ICT), and Radio Frequency technologies are topics which received a treatment. All these arguments form the basis for the main thesis of this dissertation, that BIM is able to mitigate corruption risks in the Ethiopian public construction sector. The discourse on the skepticisms about BIM that would stem from the complex nature of corruption and strategic as well as technological limitations of BIM is also illuminated and complemented by this work. Thus, the thesis uncovers possible research gaps and lays the foundation for further studies.}, subject = {Building Information Modeling}, language = {en} } @article{TreyerKleinKoenigetal., author = {Treyer, Lukas and Klein, Bernhard and K{\"o}nig, Reinhard and Meixner, Christine}, title = {Lightweight Urban Computation Interchange (LUCI): A System to Couple Heterogenous Simulations and Views}, series = {Spatial Information Research}, journal = {Spatial Information Research}, doi = {10.25643/bauhaus-universitaet.2603}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26037}, pages = {1 -- 12}, abstract = {In this paper we introduce LUCI, a Lightweight Urban Calculation Interchange system, designed to bring the advantages of calculation and content co-ordination system to small planning and design groups by the means of an open source middle-ware. The middle-ware focuses on problems typical to urban planning and therefore features a geo-data repository as well as a job runtime administration, to coordinate simulation models and its multiple views. The described system architecture is accompanied by two exemplary use cases, that have been used to test and further develop our concepts and implementations.}, language = {en} } @inproceedings{TreyerKleinKoenigetal., author = {Treyer, Lukas and Klein, Bernhard and K{\"o}nig, Reinhard and Meixner, Christine}, title = {Lightweight urban computation interchange (LUCI) system}, series = {FOSS4G 2015 Conference}, booktitle = {FOSS4G 2015 Conference}, publisher = {FOSS4G}, address = {Seoul, South Korea}, doi = {10.25643/bauhaus-universitaet.2504}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160118-25042}, pages = {12}, abstract = {In this paper we introduce LUCI, a Lightweight Urban Calculation Interchange system, designed to bring the advantages of a calculation and content co-ordination system to small planning and design groups by the means of an open source middle-ware. The middle-ware focuses on problems typical to urban planning and therefore features a geo-data repository as well as a job runtime administration, to coordinate simulation models and its multiple views. The described system architecture is accompanied by two exemplary use cases that have been used to test and further develop our concepts and implementations.}, subject = {Architektur}, language = {en} } @misc{Theiler, type = {Master Thesis}, author = {Theiler, Michael}, title = {Interaktive Visualisierung von Qualit{\"a}tsdefiziten komplexer Bauwerksinformationsmodelle auf Basis der Industry Foundation Classes (IFC) in einer webbasierten Umgebung}, doi = {10.25643/bauhaus-universitaet.1786}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20121214-17869}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {93}, abstract = {Der inhaltlichen Qualit{\"a}tssicherung von Bauwerksinformationsmodellen (BIM) kommt im Zuge einer stetig wachsenden Nutzung der verwendeten BIM f{\"u}r unterschiedliche Anwen-dungsf{\"a}lle eine große Bedeutung zu. Diese ist f{\"u}r jede am Datenaustausch beteiligte Software dem Projektziel entsprechend durchzuf{\"u}hren. Mit den Industry Foundation Classes (IFC) steht ein etabliertes Format f{\"u}r die Beschreibung und den Austausch eines solchen Modells zur Verf{\"u}gung. F{\"u}r den Prozess der Qualit{\"a}tssicherung wird eine serverbasierte Testumgebung Bestandteil des neuen Zertifizierungsverfahrens der IFC sein. Zu diesem Zweck wurde durch das „iabi - Institut f{\"u}r angewandte Bauinformatik" in Zusammenarbeit mit „buildingSMART e.V." (http://www.buildingsmart.de) ein Global Testing Documentation Server (GTDS) implementiert. Der GTDS ist eine, auf einer Datenbank basierte, Web-Applikation, die folgende Intentionen verfolgt: • Bereitstellung eines Werkzeugs f{\"u}r das qualitative Testen IFC-basierter Modelle • Unterst{\"u}tzung der Kommunikation zwischen IFC Entwicklern und Anwendern • Dokumentation der Qualit{\"a}t von IFC-basierten Softwareanwendungen • Bereitstellung einer Plattform f{\"u}r die Zertifizierung von IFC Anwendungen Gegenstand der Arbeit ist die Planung und exemplarische Umsetzung eines Werkzeugs zur interaktiven Visualisierung von Qualit{\"a}tsdefiziten, die vom GTDS im Modell erkannt wurden. Die exemplarische Umsetzung soll dabei aufbauend auf den OPEN IFC TOOLS (http://www.openifctools.org) erfolgen.}, subject = {BIM}, language = {de} } @phdthesis{Gollub, author = {Gollub, Tim}, title = {Information Retrieval for the Digital Humanities}, doi = {10.25643/bauhaus-universitaet.4673}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220801-46738}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {177}, abstract = {In ten chapters, this thesis presents information retrieval technology which is tailored to the research activities that arise in the context of corpus-based digital humanities projects. The presentation is structured by a conceptual research process that is introduced in Chapter 1. The process distinguishes a set of five research activities: research question generation, corpus acquisition, research question modeling, corpus annotation, and result dissemination. Each of these research activities elicits different information retrieval tasks with special challenges, for which algorithmic approaches are presented after an introduction of the core information retrieval concepts in Chapter 2. A vital concept in many of the presented approaches is the keyquery paradigm introduced in Chapter 3, which represents an operation that returns relevant search queries in response to a given set of input documents. Keyqueries are proposed in Chapter 4 for the recommendation of related work, and in Chapter 5 for improving access to aspects hidden in the long tail of search result lists. With pseudo-descriptions, a document expansion approach is presented in Chapter 6. The approach improves the retrieval performance for corpora where only bibliographic meta-data is originally available. In Chapter 7, the keyquery paradigm is employed to generate dynamic taxonomies for corpora in an unsupervised fashion. Chapter 8 turns to the exploration of annotated corpora, and presents scoped facets as a conceptual extension to faceted search systems, which is particularly useful in exploratory search settings. For the purpose of highlighting the major topical differences in a sequence of sub-corpora, an algorithm called topical sequence profiling is presented in Chapter 9. The thesis concludes with two pilot studies regarding the visualization of (re)search results for the means of successful result dissemination: a metaphoric interpretation of the information nutrition label, as well as the philosophical bodies, which are 3D-printed search results.}, subject = {Information Retrieval}, language = {en} } @phdthesis{Beck, author = {Beck, Stephan}, title = {Immersive Telepresence Systems and Technologies}, doi = {10.25643/bauhaus-universitaet.3856}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190218-38569}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {149}, abstract = {Modern immersive telepresence systems enable people at different locations to meet in virtual environments using realistic three-dimensional representations of their bodies. For the realization of such a three-dimensional version of a video conferencing system, each user is continuously recorded in 3D. These 3D recordings are exchanged over the network between remote sites. At each site, the remote recordings of the users, referred to as 3D video avatars, are seamlessly integrated into a shared virtual scenery and displayed in stereoscopic 3D for each user from his or her perspective. This thesis reports on algorithmic and technical contributions to modern immersive telepresence systems and presents the design, implementation and evaluation of the first immersive group-to-group telepresence system in which each user is represented as realistic life-size 3D video avatar. The system enabled two remote user groups to meet and collaborate in a consistent shared virtual environment. The system relied on novel methods for the precise calibration and registration of color- and depth- sensors (RGBD) into the coordinate system of the application as well as an advanced distributed processing pipeline that reconstructs realistic 3D video avatars in real-time. During the course of this thesis, the calibration of 3D capturing systems was greatly improved. While the first development focused on precisely calibrating individual RGBD-sensors, the second stage presents a new method for calibrating and registering multiple color and depth sensors at a very high precision throughout a large 3D capturing volume. This method was further refined by a novel automatic optimization process that significantly speeds up the manual operation and yields similarly high accuracy. A core benefit of the new calibration method is its high runtime efficiency by directly mapping from raw depth sensor measurements into an application coordinate system and to the coordinates of its associated color sensor. As a result, the calibration method is an efficient solution in terms of precision and applicability in virtual reality and immersive telepresence applications. In addition to the core contributions, the results of two case studies which address 3D reconstruction and data streaming lead to the final conclusion of this thesis and to directions of future work in the rapidly advancing field of immersive telepresence research.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{SaqlaiGhaniKhanetal., author = {Saqlai, Syed Muhammad and Ghani, Anwar and Khan, Imran and Ahmed Khan Ghayyur, Shahbaz and Shamshirband, Shahaboddin and Nabipour, Narjes and Shokri, Manouchehr}, title = {Image Analysis Using Human Body Geometry and Size Proportion Science for Action Classification}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {volume 10, issue 16, article 5453}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10165453}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200904-42322}, pages = {24}, abstract = {Gestures are one of the basic modes of human communication and are usually used to represent different actions. Automatic recognition of these actions forms the basis for solving more complex problems like human behavior analysis, video surveillance, event detection, and sign language recognition, etc. Action recognition from images is a challenging task as the key information like temporal data, object trajectory, and optical flow are not available in still images. While measuring the size of different regions of the human body i.e., step size, arms span, length of the arm, forearm, and hand, etc., provides valuable clues for identification of the human actions. In this article, a framework for classification of the human actions is presented where humans are detected and localized through faster region-convolutional neural networks followed by morphological image processing techniques. Furthermore, geometric features from human blob are extracted and incorporated into the classification rules for the six human actions i.e., standing, walking, single-hand side wave, single-hand top wave, both hands side wave, and both hands top wave. The performance of the proposed technique has been evaluated using precision, recall, omission error, and commission error. The proposed technique has been comparatively analyzed in terms of overall accuracy with existing approaches showing that it performs well in contrast to its counterparts.}, subject = {Bildanalyse}, language = {en} } @phdthesis{Weissker, author = {Weißker, Tim}, title = {Group Navigation in Multi-User Virtual Reality}, doi = {10.25643/bauhaus-universitaet.4530}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211124-45305}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {148}, abstract = {Multi-user virtual reality systems enable collocated as well as distributed users to perform collaborative activities in immersive virtual environments. A common activity in this context is to move from one location to the next as a group to explore the environment together. The simplest solution to realize these multi-user navigation processes is to provide each participant with a technique for individual navigation. However, this approach entails some potentially undesirable consequences such as the execution of a similar navigation sequence by each participant, a regular need for coordination within the group, and, related to this, the risk of losing each other during the navigation process. To overcome these issues, this thesis performs research on group navigation techniques that move group members together through a virtual environment. The presented work was guided by four overarching research questions that address the quality requirements for group navigation techniques, the differences between collocated and distributed settings, the scalability of group navigation, and the suitability of individual and group navigation for various scenarios. This thesis approaches these questions by introducing a general conceptual framework as well as the specification of central requirements for the design of group navigation techniques. The design, implementation, and evaluation of corresponding group navigation techniques demonstrate the applicability of the proposed framework. As a first step, this thesis presents ideas for the extension of the short-range teleportation metaphor, also termed jumping, for multiple users. It derives general quality requirements for the comprehensibility of the group jumping process and introduces a corresponding technique for two collocated users. The results of two user studies indicate that sickness symptoms are not affected by user roles during group jumping and confirm improved planning accuracy for the navigator, increased spatial awareness for the passenger, and reduced cognitive load for both user roles. Next, this thesis explores the design space of group navigation techniques in distributed virtual environments. It presents a conceptual framework to systematize the design decisions for group navigation techniques based on Tuckman's model of small-group development and introduces the idea of virtual formation adjustments as part of the navigation process. A quantitative user study demonstrates that the corresponding extension of Multi-Ray Jumping for distributed dyads leads to more efficient travel sequences and reduced workload. The results of a qualitative expert review confirm these findings and provide further insights regarding the complementarity of individual and group navigation in distributed virtual environments. Then, this thesis investigates the navigation of larger groups of distributed users in the context of guided museum tours and establishes three central requirements for (scalable) group navigation techniques. These should foster the awareness of ongoing navigation activities as well as facilitate the predictability of their consequences for all group members (Comprehensibility), assist the group with avoiding collisions in the virtual environment (Obstacle Avoidance), and support placing the group in a meaningful spatial formation for the joint observation and discussion of objects (View Optimization). The work suggests a new technique to address these requirements and reports on its evaluation in an initial usability study with groups of five to ten (partially simulated) users. The results indicate easy learnability for navigators and high comprehensibility for passengers. Moreover, they also provide valuable insights for the development of group navigation techniques for even larger groups. Finally, this thesis embeds the previous contributions in a comprehensive literature overview and emphasizes the need to study larger, more heterogeneous, and more diverse group compositions including the related social factors that affect group dynamics. In summary, the four major research contributions of this thesis are as follows: - the framing of group navigation as a specific instance of Tuckman's model of small-group development - the derivation of central requirements for effective group navigation techniques beyond common quality factors known from single-user navigation - the introduction of virtual formation adjustments during group navigation and their integration into concrete group navigation techniques - evidence that appropriate pre-travel information and virtual formation adjustments lead to more efficient travel sequences for groups and lower workloads for both navigators and passengers Overall, the research of this thesis confirms that group navigation techniques are a valuable addition to the portfolio of interaction techniques in multi-user virtual reality systems. The conceptual framework, the derived quality requirements, and the development of novel group navigation techniques provide effective guidance for application developers and inform future research in this area.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{ShamshirbandJoloudariGhasemiGoletal., author = {Shamshirband, Shahaboddin and Joloudari, Javad Hassannataj and GhasemiGol, Mohammad and Saadatfar, Hamid and Mosavi, Amir and Nabipour, Narjes}, title = {FCS-MBFLEACH: Designing an Energy-Aware Fault Detection System for Mobile Wireless Sensor Networks}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {Volume 8, Issue 1, article 28}, publisher = {MDPI}, doi = {10.3390/math8010028}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200107-40541}, pages = {24}, abstract = {Wireless sensor networks (WSNs) include large-scale sensor nodes that are densely distributed over a geographical region that is completely randomized for monitoring, identifying, and analyzing physical events. The crucial challenge in wireless sensor networks is the very high dependence of the sensor nodes on limited battery power to exchange information wirelessly as well as the non-rechargeable battery of the wireless sensor nodes, which makes the management and monitoring of these nodes in terms of abnormal changes very difficult. These anomalies appear under faults, including hardware, software, anomalies, and attacks by raiders, all of which affect the comprehensiveness of the data collected by wireless sensor networks. Hence, a crucial contraption should be taken to detect the early faults in the network, despite the limitations of the sensor nodes. Machine learning methods include solutions that can be used to detect the sensor node faults in the network. The purpose of this study is to use several classification methods to compute the fault detection accuracy with different densities under two scenarios in regions of interest such as MB-FLEACH, one-class support vector machine (SVM), fuzzy one-class, or a combination of SVM and FCS-MBFLEACH methods. It should be noted that in the study so far, no super cluster head (SCH) selection has been performed to detect node faults in the network. The simulation outcomes demonstrate that the FCS-MBFLEACH method has the best performance in terms of the accuracy of fault detection, false-positive rate (FPR), average remaining energy, and network lifetime compared to other classification methods.}, subject = {Vernetzung}, language = {en} } @article{AhmadiBaghbanSadeghzadehetal., author = {Ahmadi, Mohammad Hossein and Baghban, Alireza and Sadeghzadeh, Milad and Zamen, Mohammad and Mosavi, Amir and Shamshirband, Shahaboddin and Kumar, Ravinder and Mohammadi-Khanaposhtani, Mohammad}, title = {Evaluation of electrical efficiency of photovoltaic thermal solar collector}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1734094}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200304-41049}, pages = {545 -- 565}, abstract = {In this study, machine learning methods of artificial neural networks (ANNs), least squares support vector machines (LSSVM), and neuro-fuzzy are used for advancing prediction models for thermal performance of a photovoltaic-thermal solar collector (PV/T). In the proposed models, the inlet temperature, flow rate, heat, solar radiation, and the sun heat have been considered as the input variables. Data set has been extracted through experimental measurements from a novel solar collector system. Different analyses are performed to examine the credibility of the introduced models and evaluate their performances. The proposed LSSVM model outperformed the ANFIS and ANNs models. LSSVM model is reported suitable when the laboratory measurements are costly and time-consuming, or achieving such values requires sophisticated interpretations.}, subject = {Fotovoltaik}, language = {en} } @article{KargarSamadianfardParsaetal., author = {Kargar, Katayoun and Samadianfard, Saeed and Parsa, Javad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir and Chau, Kwok-Wing}, title = {Estimating longitudinal dispersion coefficient in natural streams using empirical models and machine learning algorithms}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1712260}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40775}, pages = {311 -- 322}, abstract = {The longitudinal dispersion coefficient (LDC) plays an important role in modeling the transport of pollutants and sediment in natural rivers. As a result of transportation processes, the concentration of pollutants changes along the river. Various studies have been conducted to provide simple equations for estimating LDC. In this study, machine learning methods, namely support vector regression, Gaussian process regression, M5 model tree (M5P) and random forest, and multiple linear regression were examined in predicting the LDC in natural streams. Data sets from 60 rivers around the world with different hydraulic and geometric features were gathered to develop models for LDC estimation. Statistical criteria, including correlation coefficient (CC), root mean squared error (RMSE) and mean absolute error (MAE), were used to scrutinize the models. The LDC values estimated by these models were compared with the corresponding results of common empirical models. The Taylor chart was used to evaluate the models and the results showed that among the machine learning models, M5P had superior performance, with CC of 0.823, RMSE of 454.9 and MAE of 380.9. The model of Sahay and Dutta, with CC of 0.795, RMSE of 460.7 and MAE of 306.1, gave more precise results than the other empirical models. The main advantage of M5P models is their ability to provide practical formulae. In conclusion, the results proved that the developed M5P model with simple formulations was superior to other machine learning models and empirical models; therefore, it can be used as a proper tool for estimating the LDC in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @masterthesis{Mueller, type = {Bachelor Thesis}, author = {M{\"u}ller, Naira}, title = {Erweiterung von Fliplife mit bauphysikalischen Inhalten}, doi = {10.25643/bauhaus-universitaet.1676}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120704-16763}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {105}, abstract = {In dieser Arbeit wurde ein Konzept erstellt, das Fliplife um einen bauphysikalischen Karriereweg erweitert. In das Spiel wurden beispielhaft bauphysikalische Inhalte sowie spielkonzept-kompatible und wissensvermittelnde Spielmechaniken implementiert.}, subject = {Social Game}, language = {de} } @masterthesis{Held2011, type = {Bachelor Thesis}, author = {Held, Janina}, title = {Entwurf eines Spieler-Modells f{\"u}r eine erweiterbare Spielplattform zur Ausbildung in der Bauphysik}, doi = {10.25643/bauhaus-universitaet.1524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120117-15249}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {65}, year = {2011}, abstract = {Im Projekt Intelligentes Lernen besch{\"a}ftigen sich die Professuren Content Management und Web-Technologien, Systeme der Virtuellen Realit{\"a}t und Bauphysik der Bauhaus- Universit{\"a}t Weimar mit der Entwicklung innovativer Informationstechnologien f{\"u}r eLearning- Umgebungen. In den Teilbereichen Retrieval, Extraktion und Visualisierung großer Dokumentkollektionen, sowie simulations- und planbasierter Wissensvermittlung werden Algorithmen und Werkzeuge erforscht, um eLearning-Systeme leistungsf{\"a}higer zu machen und um somit den Lernerfolg zu optimieren. Ziel des Projekts, auf dem Gebiet des simulationsbasierten Wissenstransfers, ist die Entwicklung eines Multiplayer Online Games (MOG) zur Ausbildungsunterst{\"u}tzung in der Bauphysik. Im Rahmen der vorliegenden Bachelorarbeit wird f{\"u}r diese digitale Lernsoftware ein Spieler- Modell zur Verwaltung der spielerspezifischen Daten entworfen und in das bestehende Framework integriert. Der Schwerpunkt der Arbeit liegt in der Organisation der erlernten F{\"a}higkeiten des Spielers und in der an den Wissensstand angepassten Auswahl geeigneter Spielaufgaben. F{\"u}r die Anwendung im eLearning-Bereich ist die Erweiterbarkeit des Modells um neue Lernkomplexe eine wesentliche Anforderung.}, subject = {Skill}, language = {de} } @phdthesis{Schollmeyer, author = {Schollmeyer, Andre}, title = {Efficient and High-Quality Rendering of Higher-Order Geometric Data Representations}, doi = {10.25643/bauhaus-universitaet.3823}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181120-38234}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {143}, abstract = {Computer-Aided Design (CAD) bezeichnet den Entwurf industrieller Produkte mit Hilfe von virtuellen 3D Modellen. Ein CAD-Modell besteht aus parametrischen Kurven und Fl{\"a}chen, in den meisten F{\"a}llen non-uniform rational B-Splines (NURBS). Diese mathematische Beschreibung wird ebenfalls zur Analyse, Optimierung und Pr{\"a}sentation des Modells verwendet. In jeder dieser Entwicklungsphasen wird eine unterschiedliche visuelle Darstellung ben{\"o}tigt, um den entsprechenden Nutzern ein geeignetes Feedback zu geben. Designer bevorzugen beispielsweise illustrative oder realistische Darstellungen, Ingenieure ben{\"o}tigen eine verst{\"a}ndliche Visualisierung der Simulationsergebnisse, w{\"a}hrend eine immersive 3D Darstellung bei einer Benutzbarkeitsanalyse oder der Designauswahl hilfreich sein kann. Die interaktive Darstellung von NURBS-Modellen und -Simulationsdaten ist jedoch aufgrund des hohen Rechenaufwandes und der eingeschr{\"a}nkten Hardwareunterst{\"u}tzung eine große Herausforderung. Diese Arbeit stellt vier neuartige Verfahren vor, welche sich mit der interaktiven Darstellung von NURBS-Modellen und Simulationensdaten befassen. Die vorgestellten Algorithmen nutzen neue F{\"a}higkeiten aktueller Grafikkarten aus, um den Stand der Technik bez{\"u}glich Qualit{\"a}t, Effizienz und Darstellungsgeschwindigkeit zu verbessern. Zwei dieser Verfahren befassen sich mit der direkten Darstellung der parametrischen Beschreibung ohne Approximationen oder zeitaufw{\"a}ndige Vorberechnungen. Die dabei vorgestellten Datenstrukturen und Algorithmen erm{\"o}glichen die effiziente Unterteilung, Klassifizierung, Tessellierung und Darstellung getrimmter NURBS-Fl{\"a}chen und einen interaktiven Ray-Casting-Algorithmus f{\"u}r die Isofl{\"a}chenvisualisierung von NURBSbasierten isogeometrischen Analysen. Die weiteren zwei Verfahren beschreiben zum einen das vielseitige Konzept der programmierbaren Transparenz f{\"u}r illustrative und verst{\"a}ndliche Visualisierungen tiefenkomplexer CAD-Modelle und zum anderen eine neue hybride Methode zur Reprojektion halbtransparenter und undurchsichtiger Bildinformation f{\"u}r die Beschleunigung der Erzeugung von stereoskopischen Bildpaaren. Die beiden letztgenannten Ans{\"a}tze basieren auf rasterisierter Geometrie und sind somit ebenfalls f{\"u}r normale Dreiecksmodelle anwendbar, wodurch die Arbeiten auch einen wichtigen Beitrag in den Bereichen der Computergrafik und der virtuellen Realit{\"a}t darstellen. Die Auswertung der Arbeit wurde mit großen, realen NURBS-Datens{\"a}tzen durchgef{\"u}hrt. Die Resultate zeigen, dass die direkte Darstellung auf Grundlage der parametrischen Beschreibung mit interaktiven Bildwiederholraten und in subpixelgenauer Qualit{\"a}t m{\"o}glich ist. Die Einf{\"u}hrung programmierbarer Transparenz erm{\"o}glicht zudem die Umsetzung kollaborativer 3D Interaktionstechniken f{\"u}r die Exploration der Modelle in virtuellenUmgebungen sowie illustrative und verst{\"a}ndliche Visualisierungen tiefenkomplexer CAD-Modelle. Die Erzeugung stereoskopischer Bildpaare f{\"u}r die interaktive Visualisierung auf 3D Displays konnte beschleunigt werden. Diese messbare Verbesserung wurde zudem im Rahmen einer Nutzerstudie als wahrnehmbar und vorteilhaft befunden.}, subject = {Rendering}, language = {en} } @misc{Genc, type = {Master Thesis}, author = {Genc, Emir}, title = {Decoding Public Life in Urban Soundscape: The Case of Weimar}, doi = {10.25643/bauhaus-universitaet.2743}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170213-27438}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {INTRODUCTION The research field of sound landscape and public life, initially drew my attention during the master class of 'Media of the Urban', originally 'Medien des Urbanen, which was given by Prof. Dr. Gabriele Schabacher in the 2015 summer semester. For the relevant class, I conducted an conceptual case study in Istanbul, Beyoglu District, with the intention of analysing the perception of the space by urban sound. During the summer 2015 I recorded various sounds of different spatial settings and developed the analysis by comparing the situations. By that time, I realized the inherent property of the sound as a medium for our perception in urban context. In the 2015-2016 winter semester, I participated in the master class of the architectural project, named 'Build Allegory', which was given by Prof. Dipl.-Ing. Heike B{\"u}ttner. The project was situated in Berlin Westkreuz, AVUS north curve, on the highway and was originally a race track from 1921. In this context, the aim of my project was to answer various questions, main of which was, how does the architectural form shape the sound of the place? And, how does the sound of the place shape the architectural from? Since the place is still serving mainly to the vehicles, although the function has differed, the sound objects and the context have remained. Through the existence of contextual references, I started with creating a computational tool for analysing the acoustic characteristics of this urban setting, which is fundamentally providing results as the sound cloud, driven from the sound ray tracing method. Regarding to this soundscape analysis method, which I developed, this computational tool assisted me to find an optimum reciprocal relation between architecture and sound. Since I have been working on soundscape in the context of architecture, urban situations, public life and public space, I was determined to produce a comprehensive research in this field and propound the hypothesis; the existence of the reciprocity between the social behaviours in public space and the sound landscape. In which extent does this reciprocity exist? What are the effects of the public life on the sonic configurations of the space and the other way around?}, subject = {{\"O}ffentlicher Raum}, language = {en} } @phdthesis{PreisDutra, author = {Preis Dutra, Joatan}, title = {Cultural Heritage on Mobile Devices: Building Guidelines for UNESCO World Heritage Sites' Apps}, doi = {10.25643/bauhaus-universitaet.4531}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211129-45319}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {360}, abstract = {Technological improvements and access provide a fertile scenario for creating and developing mobile applications (apps). This scenario results in a myriad of Apps providing information regarding touristic destinations, including those with a cultural profile, such as those dedicated to UNESCO World Heritage Sites (WHS). However, not all of the Apps have the same efficiency. In order to have a successful app, its development must consider usability aspects and features aligned with reliable content. Despite the guidelines for mobile usability being broadly available, they are generic, and none of them concentrates specifically into cultural heritage places, especially on those placed in an open-air scenario. This research aims to fulfil this literature gap and discusses how to adequate and develop specific guidelines for a better outdoor WHS experience. It uses an empirical approach applied to an open-air WHS city: Weimar and its Bauhaus and Classical Weimar sites. In order to build a new set of guidelines applied for open-air WHS, this research used a systematic approach to compare literature-based guidelines to industry-based ones (based on affordances), extracted from the available Apps dedicated to WHS set in Germany. The instructions compiled from both sources have been comparatively tested by using two built prototypes from the distinctive guidelines, creating a set of recommendations collecting the best approach from both sources, plus suggesting new ones the evaluation.}, subject = {Benutzerschnittstellenentwurfssystem}, language = {en} } @article{HassannatajJoloudariHassannatajJoloudariSaadatfaretal., author = {Hassannataj Joloudari, Javad and Hassannataj Joloudari, Edris and Saadatfar, Hamid and GhasemiGol, Mohammad and Razavi, Seyyed Mohammad and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Nadai, Laszlo}, title = {Coronary Artery Disease Diagnosis: Ranking the Significant Features Using a Random Trees Model}, series = {International Journal of Environmental Research and Public Health, IJERPH}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health, IJERPH}, number = {Volume 17, Issue 3, 731}, publisher = {MDPI}, doi = {10.3390/ijerph17030731}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40819}, pages = {24}, abstract = {Heart disease is one of the most common diseases in middle-aged citizens. Among the vast number of heart diseases, coronary artery disease (CAD) is considered as a common cardiovascular disease with a high death rate. The most popular tool for diagnosing CAD is the use of medical imaging, e.g., angiography. However, angiography is known for being costly and also associated with a number of side effects. Hence, the purpose of this study is to increase the accuracy of coronary heart disease diagnosis through selecting significant predictive features in order of their ranking. In this study, we propose an integrated method using machine learning. The machine learning methods of random trees (RTs), decision tree of C5.0, support vector machine (SVM), and decision tree of Chi-squared automatic interaction detection (CHAID) are used in this study. The proposed method shows promising results and the study confirms that the RTs model outperforms other models.}, subject = {Maschinelles Lernen}, language = {en} } @inproceedings{ChirkinKoenig, author = {Chirkin, Artem and K{\"o}nig, Reinhard}, title = {Concept of Interactive Machine Learning in Urban Design Problems : proceedings}, publisher = {ACM New York, NY, USA}, address = {San Jose, CA, USA}, doi = {10.25643/bauhaus-universitaet.2600}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26000}, pages = {10 -- 13}, abstract = {This work presents a concept of interactive machine learning in a human design process. An urban design problem is viewed as a multiple-criteria optimization problem. The outlined feature of an urban design problem is the dependence of a design goal on a context of the problem. We model the design goal as a randomized fitness measure that depends on the context. In terms of multiple-criteria decision analysis (MCDA), the defined measure corresponds to a subjective expected utility of a user. In the first stage of the proposed approach we let the algorithm explore a design space using clustering techniques. The second stage is an interactive design loop; the user makes a proposal, then the program optimizes it, gets the user's feedback and returns back the control over the application interface.}, subject = {Stadtgestaltung}, language = {en} } @phdthesis{AlKhatib2021, author = {Al Khatib, Khalid}, title = {Computational Analysis of Argumentation Strategies}, doi = {10.25643/bauhaus-universitaet.4461}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210719-44612}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {134}, year = {2021}, abstract = {The computational analysis of argumentation strategies is substantial for many downstream applications. It is required for nearly all kinds of text synthesis, writing assistance, and dialogue-management tools. While various tasks have been tackled in the area of computational argumentation, such as argumentation mining and quality assessment, the task of the computational analysis of argumentation strategies in texts has so far been overlooked. This thesis principally approaches the analysis of the strategies manifested in the persuasive argumentative discourses that aim for persuasion as well as in the deliberative argumentative discourses that aim for consensus. To this end, the thesis presents a novel view of argumentation strategies for the above two goals. Based on this view, new models for pragmatic and stylistic argument attributes are proposed, new methods for the identification of the modelled attributes have been developed, and a new set of strategy principles in texts according to the identified attributes is presented and explored. Overall, the thesis contributes to the theory, data, method, and evaluation aspects of the analysis of argumentation strategies. The models, methods, and principles developed and explored in this thesis can be regarded as essential for promoting the applications mentioned above, among others.}, subject = {Argumentation}, language = {en} } @article{MosaviShamshirbandEsmaeilbeikietal., author = {Mosavi, Amir and Shamshirband, Shahaboddin and Esmaeilbeiki, Fatemeh and Zarehaghi, Davoud and Neyshabouri, Mohammadreza and Samadianfard, Saeed and Ghorbani, Mohammad Ali and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Comparative analysis of hybrid models of firefly optimization algorithm with support vector machines and multilayer perceptron for predicting soil temperature at different depths}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, Issue 1}, doi = {10.1080/19942060.2020.1788644}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200911-42347}, pages = {939 -- 953}, abstract = {This research aims to model soil temperature (ST) using machine learning models of multilayer perceptron (MLP) algorithm and support vector machine (SVM) in hybrid form with the Firefly optimization algorithm, i.e. MLP-FFA and SVM-FFA. In the current study, measured ST and meteorological parameters of Tabriz and Ahar weather stations in a period of 2013-2015 are used for training and testing of the studied models with one and two days as a delay. To ascertain conclusive results for validation of the proposed hybrid models, the error metrics are benchmarked in an independent testing period. Moreover, Taylor diagrams utilized for that purpose. Obtained results showed that, in a case of one day delay, except in predicting ST at 5 cm below the soil surface (ST5cm) at Tabriz station, MLP-FFA produced superior results compared with MLP, SVM, and SVM-FFA models. However, for two days delay, MLP-FFA indicated increased accuracy in predicting ST5cm and ST 20cm of Tabriz station and ST10cm of Ahar station in comparison with SVM-FFA. Additionally, for all of the prescribed models, the performance of the MLP-FFA and SVM-FFA hybrid models in the testing phase was found to be meaningfully superior to the classical MLP and SVM models.}, subject = {Bodentemperatur}, language = {en} } @phdthesis{Azari, author = {Azari, Banafsheh}, title = {Bidirectional Texture Functions: Acquisition, Rendering and Quality Evaluation}, doi = {10.25643/bauhaus-universitaet.3779}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180820-37790}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {As one of its primary objectives, Computer Graphics aims at the simulation of fabrics' complex reflection behaviour. Characteristic surface reflectance of fabrics, such as highlights, anisotropy or retro-reflection arise the difficulty of synthesizing. This problem can be solved by using Bidirectional Texture Functions (BTFs), a 2D-texture under various light and view direction. But the acquisition of Bidirectional Texture Functions requires an expensive setup and the measurement process is very time-consuming. Moreover, the size of BTF data can range from hundreds of megabytes to several gigabytes, as a large number of high resolution pictures have to be used in any ideal cases. Furthermore, the three-dimensional textured models rendered through BTF rendering method are subject to various types of distortion during acquisition, synthesis, compression, and processing. An appropriate image quality assessment scheme is a useful tool for evaluating image processing algorithms, especially algorithms designed to leave the image visually unchanged. In this contribution, we present and conduct an investigation aimed at locating a robust threshold for downsampling BTF images without loosing perceptual quality. To this end, an experimental study on how decreasing the texture resolution influences perceived quality of the rendered images has been presented and discussed. Next, two basic improvements to the use of BTFs for rendering are presented: firstly, the study addresses the cost of BTF acquisition by introducing a flexible low-cost step motor setup for BTF acquisition allowing to generate a high quality BTF database taken at user-defined arbitrary angles. Secondly, the number of acquired textures to the perceptual quality of renderings is adapted so that the database size is not overloaded and can fit better in memory when rendered. Although visual attention is one of the essential attributes of HVS, it is neglected in most existing quality metrics. In this thesis an appropriate objective quality metric based on extracting visual attention regions from images and adequate investigation of the influence of visual attention on perceived image quality assessment, called Visual Attention Based Image Quality Metric (VABIQM), has been proposed. The novel metric indicates that considering visual saliency can offer significant benefits with regard to constructing objective quality metrics to predict the visible quality differences in images rendered by compressed and non-compressed BTFs and also outperforms straightforward existing image quality metrics at detecting perceivable differences.}, subject = {Wahrnehmung}, language = {en} } @misc{Alabassy, type = {Master Thesis}, author = {Alabassy, Mohamed Said Helmy}, title = {Automated Approach for Building Information Modelling of Crack Damages via Image Segmentation and Image-based 3D Reconstruction}, doi = {10.25643/bauhaus-universitaet.6416}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230818-64162}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {101}, abstract = {As machine vision-based inspection methods in the field of Structural Health Monitoring (SHM) continue to advance, the need for integrating resulting inspection and maintenance data into a centralised building information model for structures notably grows. Consequently, the modelling of found damages based on those images in a streamlined automated manner becomes increasingly important, not just for saving time and money spent on updating the model to include the latest information gathered through each inspection, but also to easily visualise them, provide all stakeholders involved with a comprehensive digital representation containing all the necessary information to fully understand the structure's current condition, keep track of any progressing deterioration, estimate the reduced load bearing capacity of the damaged element in the model or simulate the propagation of cracks to make well-informed decisions interactively and facilitate maintenance actions that optimally extend the service life of the structure. Though significant progress has been recently made in information modelling of damages, the current devised methods for the geometrical modelling approach are cumbersome and time consuming to implement in a full-scale model. For crack damages, an approach for a feasible automated image-based modelling is proposed utilising neural networks, classical computer vision and computational geometry techniques with the aim of creating valid shapes to be introduced into the information model, including related semantic properties and attributes from inspection data (e.g., width, depth, length, date, etc.). The creation of such models opens the door for further possible uses ranging from more accurate structural analysis possibilities to simulation of damage propagation in model elements, estimating deterioration rates and allows for better documentation, data sharing, and realistic visualisation of damages in a 3D model.}, subject = {Building Information Modeling}, language = {en} } @phdthesis{RadmardRahmani, author = {Radmard Rahmani, Hamid}, title = {Artificial Intelligence Approach for Seismic Control of Structures}, doi = {10.25643/bauhaus-universitaet.4135}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200417-41359}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Abstract In the first part of this research, the utilization of tuned mass dampers in the vibration control of tall buildings during earthquake excitations is studied. The main issues such as optimizing the parameters of the dampers and studying the effects of frequency content of the target earthquakes are addressed. Abstract The non-dominated sorting genetic algorithm method is improved by upgrading generic operators, and is utilized to develop a framework for determining the optimum placement and parameters of dampers in tall buildings. A case study is presented in which the optimal placement and properties of dampers are determined for a model of a tall building under different earthquake excitations through computer simulations. Abstract In the second part, a novel framework for the brain learning-based intelligent seismic control of smart structures is developed. In this approach, a deep neural network learns how to improve structural responses during earthquake excitations using feedback control. Abstract Reinforcement learning method is improved and utilized to develop a framework for training the deep neural network as an intelligent controller. The efficiency of the developed framework is examined through two case studies including a single-degree-of-freedom system and a high-rise building under different earthquake excitation records. Abstract The results show that the controller gradually develops an optimum control policy to reduce the vibrations of a structure under an earthquake excitation through a cyclical process of actions and observations. Abstract It is shown that the controller efficiently improves the structural responses under new earthquake excitations for which it was not trained. Moreover, it is shown that the controller has a stable performance under uncertainties.}, subject = {Erdbeben}, language = {en} } @misc{Lang, type = {Master Thesis}, author = {Lang, Kevin}, title = {Argument Search with Voice Assistants}, doi = {10.25643/bauhaus-universitaet.3935}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190617-39353}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {100}, abstract = {The need for finding persuasive arguments can arise in a variety of domains such as politics, finance, marketing or personal entertainment. In these domains, there is a demand to make decisions by oneself or to convince somebody about a specific topic. To obtain a conclusion, one has to search thoroughly different sources in literature and on the web to compare various arguments. Voice interfaces, in form of smartphone applications or smart speakers, present the user with natural conversations in a comfortable way to make search requests in contrast to a traditional search interface with keyboard and display. Benefits and obstacles of such a new interface are analyzed by conducting two studies. The first one consists of a survey for analyzing the target group with questions about situations, motivations, and possible demanding features. The latter one is a wizard-of-oz experiment to investigate possible queries on how a user formulates requests to such a novel system. The results indicate that a search interface with conversational abilities can build a helpful assistant, but to satisfy the demands of a broader audience some additional information retrieval and visualization features need to be implemented.}, subject = {Amazon Alexa}, language = {en} } @unpublished{RezakazemiMosaviShirazian, author = {Rezakazemi, Mashallah and Mosavi, Amir and Shirazian, Saeed}, title = {ANFIS pattern for molecular membranes separation optimization}, volume = {2018}, doi = {10.25643/BAUHAUS-UNIVERSITAET.3821}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181122-38212}, pages = {1 -- 20}, abstract = {In this work, molecular separation of aqueous-organic was simulated by using combined soft computing-mechanistic approaches. The considered separation system was a microporous membrane contactor for separation of benzoic acid from water by contacting with an organic phase containing extractor molecules. Indeed, extractive separation is carried out using membrane technology where complex of solute-organic is formed at the interface. The main focus was to develop a simulation methodology for prediction of concentration distribution of solute (benzoic acid) in the feed side of the membrane system, as the removal efficiency of the system is determined by concentration distribution of the solute in the feed channel. The pattern of Adaptive Neuro-Fuzzy Inference System (ANFIS) was optimized by finding the optimum membership function, learning percentage, and a number of rules. The ANFIS was trained using the extracted data from the CFD simulation of the membrane system. The comparisons between the predicted concentration distribution by ANFIS and CFD data revealed that the optimized ANFIS pattern can be used as a predictive tool for simulation of the process. The R2 of higher than 0.99 was obtained for the optimized ANFIS model. The main privilege of the developed methodology is its very low computational time for simulation of the system and can be used as a rigorous simulation tool for understanding and design of membrane-based systems. Highlights are, Molecular separation using microporous membranes. Developing hybrid model based on ANFIS-CFD for the separation process, Optimization of ANFIS structure for prediction of separation process}, subject = {Fluid}, language = {en} } @phdthesis{Anderka, author = {Anderka, Maik}, title = {Analyzing and Predicting Quality Flaws in User-generated Content: The Case of Wikipedia}, doi = {10.25643/bauhaus-universitaet.1977}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130709-19778}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Web applications that are based on user-generated content are often criticized for containing low-quality information; a popular example is the online encyclopedia Wikipedia. The major points of criticism pertain to the accuracy, neutrality, and reliability of information. The identification of low-quality information is an important task since for a huge number of people around the world it has become a habit to first visit Wikipedia in case of an information need. Existing research on quality assessment in Wikipedia either investigates only small samples of articles, or else deals with the classification of content into high-quality or low-quality. This thesis goes further, it targets the investigation of quality flaws, thus providing specific indications of the respects in which low-quality content needs improvement. The original contributions of this thesis, which relate to the fields of user-generated content analysis, data mining, and machine learning, can be summarized as follows: (1) We propose the investigation of quality flaws in Wikipedia based on user-defined cleanup tags. Cleanup tags are commonly used in the Wikipedia community to tag content that has some shortcomings. Our approach is based on the hypothesis that each cleanup tag defines a particular quality flaw. (2) We provide the first comprehensive breakdown of Wikipedia's quality flaw structure. We present a flaw organization schema, and we conduct an extensive exploratory data analysis which reveals (a) the flaws that actually exist, (b) the distribution of flaws in Wikipedia, and, (c) the extent of flawed content. (3) We present the first breakdown of Wikipedia's quality flaw evolution. We consider the entire history of the English Wikipedia from 2001 to 2012, which comprises more than 508 million page revisions, summing up to 7.9 TB. Our analysis reveals (a) how the incidence and the extent of flaws have evolved, and, (b) how the handling and the perception of flaws have changed over time. (4) We are the first who operationalize an algorithmic prediction of quality flaws in Wikipedia. We cast quality flaw prediction as a one-class classification problem, develop a tailored quality flaw model, and employ a dedicated one-class machine learning approach. A comprehensive evaluation based on human-labeled Wikipedia articles underlines the practical applicability of our approach.}, subject = {Data Mining}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @unpublished{SteinerBourinetLahmer, author = {Steiner, Maria and Bourinet, Jean-Marc and Lahmer, Tom}, title = {An adaptive sampling method for global sensitivity analysis based on least-squares support vector regression}, doi = {10.25643/BAUHAUS-UNIVERSITAET.3832}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181218-38320}, pages = {1 -- 33}, abstract = {In the field of engineering, surrogate models are commonly used for approximating the behavior of a physical phenomenon in order to reduce the computational costs. Generally, a surrogate model is created based on a set of training data, where a typical method for the statistical design is the Latin hypercube sampling (LHS). Even though a space filling distribution of the training data is reached, the sampling process takes no information on the underlying behavior of the physical phenomenon into account and new data cannot be sampled in the same distribution if the approximation quality is not sufficient. Therefore, in this study we present a novel adaptive sampling method based on a specific surrogate model, the least-squares support vector regresson. The adaptive sampling method generates training data based on the uncertainty in local prognosis capabilities of the surrogate model - areas of higher uncertainty require more sample data. The approach offers a cost efficient calculation due to the properties of the least-squares support vector regression. The opportunities of the adaptive sampling method are proven in comparison with the LHS on different analytical examples. Furthermore, the adaptive sampling method is applied to the calculation of global sensitivity values according to Sobol, where it shows faster convergence than the LHS method. With the applications in this paper it is shown that the presented adaptive sampling method improves the estimation of global sensitivity values, hence reducing the overall computational costs visibly.}, subject = {Approximation}, language = {en} } @phdthesis{Kaltenbrunner, author = {Kaltenbrunner, Martin}, title = {An Abstraction Framework for Tangible Interactive Surfaces}, doi = {10.25643/bauhaus-universitaet.3717}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180205-37178}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {138}, abstract = {This cumulative dissertation discusses - by the example of four subsequent publications - the various layers of a tangible interaction framework, which has been developed in conjunction with an electronic musical instrument with a tabletop tangible user interface. Based on the experiences that have been collected during the design and implementation of that particular musical application, this research mainly concentrates on the definition of a general-purpose abstraction model for the encapsulation of physical interface components that are commonly employed in the context of an interactive surface environment. Along with a detailed description of the underlying abstraction model, this dissertation also describes an actual implementation in the form of a detailed protocol syntax, which constitutes the common element of a distributed architecture for the construction of surface-based tangible user interfaces. The initial implementation of the presented abstraction model within an actual application toolkit is comprised of the TUIO protocol and the related computer-vision based object and multi-touch tracking software reacTIVision, along with its principal application within the Reactable synthesizer. The dissertation concludes with an evaluation and extension of the initial TUIO model, by presenting TUIO2 - a next generation abstraction model designed for a more comprehensive range of tangible interaction platforms and related application scenarios.}, subject = {Informatik}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @unpublished{MosaviTorabiHashemietal., author = {Mosavi, Amir and Torabi, Mehrnoosh and Hashemi, Sattar and Saybani, Mahmoud Reza and Shamshirband, Shahaboddin}, title = {A Hybrid Clustering and Classification Technique for Forecasting Short-Term Energy Consumption}, doi = {10.25643/bauhaus-universitaet.3755}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180907-37550}, abstract = {Electrical energy distributor companies in Iran have to announce their energy demand at least three 3-day ahead of the market opening. Therefore, an accurate load estimation is highly crucial. This research invoked methodology based on CRISP data mining and used SVM, ANN, and CBA-ANN-SVM (a novel hybrid model of clustering with both widely used ANN and SVM) to predict short-term electrical energy demand of Bandarabbas. In previous studies, researchers introduced few effective parameters with no reasonable error about Bandarabbas power consumption. In this research we tried to recognize all efficient parameters and with the use of CBA-ANN-SVM model, the rate of error has been minimized. After consulting with experts in the field of power consumption and plotting daily power consumption for each week, this research showed that official holidays and weekends have impact on the power consumption. When the weather gets warmer, the consumption of electrical energy increases due to turning on electrical air conditioner. Also, con-sumption patterns in warm and cold months are different. Analyzing power consumption of the same month for different years had shown high similarity in power consumption patterns. Factors with high impact on power consumption were identified and statistical methods were utilized to prove their impacts. Using SVM, ANN and CBA-ANN-SVM, the model was built. Sine the proposed method (CBA-ANN-SVM) has low MAPE 5 1.474 (4 clusters) and MAPE 5 1.297 (3 clusters) in comparison with SVM (MAPE 5 2.015) and ANN (MAPE 5 1.790), this model was selected as the final model. The final model has the benefits from both models and the benefits of clustering. Clustering algorithm with discovering data structure, divides data into several clusters based on similarities and differences between them. Because data inside each cluster are more similar than entire data, modeling in each cluster will present better results. For future research, we suggest using fuzzy methods and genetic algorithm or a hybrid of both to forecast each cluster. It is also possible to use fuzzy methods or genetic algorithms or a hybrid of both without using clustering. It is issued that such models will produce better and more accurate results. This paper presents a hybrid approach to predict the electric energy usage of weather-sensitive loads. The presented methodutilizes the clustering paradigm along with ANN and SVMapproaches for accurate short-term prediction of electric energyusage, using weather data. Since the methodology beinginvoked in this research is based on CRISP data mining, datapreparation has received a gr eat deal of attention in thisresear ch. Once data pre-processing was done, the underlyingpattern of electric energy consumption was extracted by themeans of machine learning methods to precisely forecast short-term energy consumption. The proposed approach (CBA-ANN-SVM) was applied to real load data and resulting higher accu-racy comparing to the existing models. 2018 American Institute of Chemical Engineers Environ Prog, 2018 https://doi.org/10.1002/ep.12934}, subject = {Data Mining}, language = {en} } @phdthesis{Vogler, author = {Vogler, Verena}, title = {A framework for artificial coral reef design: Integrating computational modelling and high precision monitoring strategies for artificial coral reefs - an Ecosystem-aware design approach in times of climate change}, isbn = {978-3-00-074495-2}, doi = {10.25643/bauhaus-universitaet.4611}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220322-46115}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {243}, abstract = {Tropical coral reefs, one of the world's oldest ecosystems which support some of the highest levels of biodiversity on the planet, are currently facing an unprecedented ecological crisis during this massive human-activity-induced period of extinction. Hence, tropical reefs symbolically stand for the destructive effects of human activities on nature [4], [5]. Artificial reefs are excellent examples of how architectural design can be combined with ecosystem regeneration [6], [7], [8]. However, to work at the interface between the artificial and the complex and temporal nature of natural systems presents a challenge, i.a. in respect to the B-rep modelling legacy of computational modelling. The presented doctorate investigates strategies on how to apply digital practice to realise what is an essential bulwark to retain reefs in impossibly challenging times. Beyond the main question of integrating computational modelling and high precision monitoring strategies in artificial coral reef design, this doctorate explores techniques, methods, and linking frameworks to support future research and practice in ecology led design contexts. Considering the many existing approaches for artificial coral reefs design, one finds they often fall short in precisely understanding the relationships between architectural and ecological aspects (e.g. how a surface design and material composition can foster coral larvae settlement, or structural three-dimensionality enhance biodiversity) and lack an integrated underwater (UW) monitoring process. Such a process is necessary in order to gather knowledge about the ecosystem and make it available for design, and to learn whether artificial structures contribute to reef regeneration or rather harm the coral reef ecosystem. For the research, empirical experimental methods were applied: Algorithmic coral reef design, high precision UW monitoring, computational modelling and simulation, and validated through parallel real-world physical experimentation - two Artificial Reef Prototypes (ARPs) in Gili Trawangan, Indonesia (2012-today). Multiple discrete methods and sub techniques were developed in seventeen computational experiments and applied in a way in which many are cross valid and integrated in an overall framework that is offered as a significant contribution to the field. Other main contributions include the Ecosystem-aware design approach, Key Performance Indicators (KPIs) for coral reef design, algorithmic design and fabrication of Biorock cathodes, new high precision UW monitoring strategies, long-term real-world constructed experiments, new digital analysis methods and two new front-end web-based tools for reef design and monitoring reefs. The methodological framework is a finding of the research that has many technical components that were tested and combined in this way for the very first time. In summary, the thesis responds to the urgency and relevance in preserving marine species in tropical reefs during this massive extinction period by offering a differentiated approach towards artificial coral reefs - demonstrating the feasibility of digitally designing such 'living architecture' according to multiple context and performance parameters. It also provides an in-depth critical discussion of computational design and architecture in the context of ecosystem regeneration and Planetary Thinking. In that respect, the thesis functions as both theoretical and practical background for computational design, ecology and marine conservation - not only to foster the design of artificial coral reefs technically but also to provide essential criteria and techniques for conceiving them. Keywords: Artificial coral reefs, computational modelling, high precision underwater monitoring, ecology in design.}, subject = {Korallenriff}, language = {en} } @phdthesis{Kunert, author = {Kunert, Andr{\´e}}, title = {3D Interaction Techniques in Multi-User Virtual Reality : towards scalable templates and implementation patterns for cooperative interfaces}, doi = {10.25643/bauhaus-universitaet.4296}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201204-42962}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {147}, abstract = {Multi-user projection systems provide a coherent 3D interaction space for multiple co-located users that facilitates mutual awareness, full-body interaction, and the coordination of activities. The users perceive the shared scene from their respective viewpoints and can directly interact with the 3D content. This thesis reports on novel interaction patterns for collaborative 3D interaction for local and distributed user groups based on such multi-user projection environments. A particular focus of our developments lies in the provision of multiple independent interaction territories in our workspaces and their tight integration into collaborative workflows. The motivation for such multi-focus workspaces is grounded in research on social cooperation patterns, specifically in the requirement for supporting phases of loose and tight collaboration and the emergence of dedicated orking territories for private usage and public exchange. We realized independent interaction territories in the form of handheld virtual viewing windows and multiple co-located hardware displays in a joint workspace. They provide independent views of a shared virtual environment and serve as access points for the exploration and manipulation of the 3D content. Their tight integration into our workspace supports fluent transitions between individual work and joint user engagement. The different affordances of various displays in an exemplary workspace consisting of a large 3D wall, a 3D tabletop, and handheld virtual viewing windows, promote different usage scenarios, for instance for views from an egocentric perspective, miniature scene representations, close-up views, or storage and transfer areas. This work shows that this versatile workspace can make the cooperation of multiple people in joint tasks more effective, e.g. by parallelizing activities, distributing subtasks, and providing mutual support. In order to create, manage, and share virtual viewing windows, this thesis presents the interaction technique of Photoportals, a tangible interface based on the metaphor of digital photography. They serve as configurable viewing territories and enable the individual examination of scene details as well as the immediate sharing of the prepared views. Photoportals are specifically designed to complement other interface facets and provide extended functionality for scene navigation, object manipulation, and for the creation of temporal recordings of activities in the virtual scene. A further objective of this work is the realization of a coherent interaction space for direct 3D input across the independent interaction territories in multi-display setups. This requires the simultaneous consideration of user input in several potential interaction windows as well as configurable disambiguation schemes for the implicit selection of distinct interaction contexts. We generalized the required implementation structures into a high-level software pattern and demonstrated its versatility by means of various multi-context 3D interaction tools. Additionally, this work tackles specific problems related to group navigation in multiuser projection systems. Joint navigation of a collocated group of users can lead to unintentional collisions when passing narrow scene sections. In this context, we suggest various solutions that prevent individual collisions during group navigation and discuss their effect on the perceived integrity of the travel group and the 3D scene. For collaboration scenarios involving distributed user groups, we furthermore explored different configurations for joint and individual travel. Last but not least, this thesis provides detailed information and implementation templates for the realization of the proposed interaction techniques and collaborative workspaces in scenegraph-based VR systems. These contributions to the abstraction of specific interaction patterns, such as group navigation and multi-window interaction, facilitate their reuse in other virtual reality systems and their adaptation to further collaborative scenarios.}, subject = {Virtuelle Realit{\"a}t}, language = {en} }