@misc{Vu2005, type = {Master Thesis}, author = {Vu, Manh Quynh}, title = {M{\"o}glichkeiten zur Bewertung des Zustandes von Straßenbauten (Bau und Rekonstruktion) unter Einbeziehung seismischer Messungen .}, doi = {10.25643/bauhaus-universitaet.649}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-6498}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2005}, abstract = {Central point of this study is to evaluate stiffness properties of pavement, specifically the E or G- modulus determined by different testing methods. Stiffness of soil is both stress and strain dependent property and otherwise different methods usually affect the material in different ways. The Young's modulus E0 and shear modulus G0 correspond to the very small strain level are regarded as the initial or maximal stiffness of the relevant stress-strain curves of a given material. The modulus decay curve is called the degradation curve, which also reviewed in this study. With the results of different measurement methods applied for a reclaimed mining site in Klettwitz for determining of stiffness parameter of subsoil, author have tried to find a unification between the results considering the relationship between stiffness parameter and the range of strain levels. The testing methods executed at plant S9 in Klettwitz-S{\"u}dfeld are: laboratory oedometer test, static plate load test, dynamic plate load test, and seismic testing methods (spectral analysis of surface wave, SASW). Some results getting from this study are: one receives different absolute values of stiffness parameter from different testing methods. The reason is different testing methods produce different range of strain levels in soil during their execution. Conventional and non-destructive testing methods should be combined together for investigating of subsoil characteristics. This means, the soil parameters must be adjusted to the current range of strain level. Especially for settlement calculation it is recommended that different values of stiffness modulus, Es, resulted by different testing methods should be simultaneously utilized along the depth beneath loading surface. Accuracy for determining of stiffness degradation curves depends a lot on the determination of maximal stiffness parameters (E0, G0) at the range of very small strain level, and it still requires much further studies.}, subject = {Straßenbau}, language = {en} } @phdthesis{Potthast, author = {Potthast, Martin}, title = {Technologies for Reusing Text from the Web}, doi = {10.25643/bauhaus-universitaet.1566}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120217-15663}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {237}, abstract = {Texts from the web can be reused individually or in large quantities. The former is called text reuse and the latter language reuse. We first present a comprehensive overview of the different ways in which text and language is reused today, and how exactly information retrieval technologies can be applied in this respect. The remainder of the thesis then deals with specific retrieval tasks. In general, our contributions consist of models and algorithms, their evaluation, and for that purpose, large-scale corpus construction. The thesis divides into two parts. The first part introduces technologies for text reuse detection, and our contributions are as follows: (1) A unified view of projecting-based and embedding-based fingerprinting for near-duplicate detection and the first time evaluation of fingerprint algorithms on Wikipedia revision histories as a new, large-scale corpus of near-duplicates. (2) A new retrieval model for the quantification of cross-language text similarity, which gets by without parallel corpora. We have evaluated the model in comparison to other models on many different pairs of languages. (3) An evaluation framework for text reuse and particularly plagiarism detectors, which consists of tailored detection performance measures and a large-scale corpus of automatically generated and manually written plagiarism cases. The latter have been obtained via crowdsourcing. This framework has been successfully applied to evaluate many different state-of-the-art plagiarism detection approaches within three international evaluation competitions. The second part introduces technologies that solve three retrieval tasks based on language reuse, and our contributions are as follows: (4) A new model for the comparison of textual and non-textual web items across media, which exploits web comments as a source of information about the topic of an item. In this connection, we identify web comments as a largely neglected information source and introduce the rationale of comment retrieval. (5) Two new algorithms for query segmentation, which exploit web n-grams and Wikipedia as a means of discerning the user intent of a keyword query. Moreover, we crowdsource a new corpus for the evaluation of query segmentation which surpasses existing corpora by two orders of magnitude. (6) A new writing assistance tool called Netspeak, which is a search engine for commonly used language. Netspeak indexes the web in the form of web n-grams as a source of writing examples and implements a wildcard query processor on top of it.}, subject = {Information Retrieval}, language = {en} }