2020-06-06T03:11:47.084Z Info:
2020-06-06T03:11:47.084Z Info: ltex.ltex-ls.path not set.
2020-06-06T03:11:47.084Z Info: Searching for ltex-ls in '/home/bagustris/.vscode/extensions/valentjn.vscode-ltex-5.0.0/lib'...
2020-06-06T03:11:47.085Z Info: ltex-ls found in '/home/bagustris/.vscode/extensions/valentjn.vscode-ltex-5.0.0/lib/ltex-ls-5.0.0'.
2020-06-06T03:11:47.085Z Info:
2020-06-06T03:11:47.085Z Info: ltex.java.path not set.
2020-06-06T03:11:47.085Z Info: Using ltex-ls from '/home/bagustris/.vscode/extensions/valentjn.vscode-ltex-5.0.0/lib/ltex-ls-5.0.0'.
2020-06-06T03:11:47.085Z Info: Using Java from PATH or JAVA_HOME (may fail if not installed).
2020-06-06T03:11:47.088Z Info: Testing ltex-ls...
2020-06-06T03:11:47.088Z Info: Command: "/home/bagustris/.vscode/extensions/valentjn.vscode-ltex-5.0.0/lib/ltex-ls-5.0.0/bin/ltex-ls"
2020-06-06T03:11:47.089Z Info: Arguments: ["--version"]
2020-06-06T03:11:47.089Z Info: env['JAVA_HOME']: "/usr/lib/jvm/java-9-oracle"
2020-06-06T03:11:47.089Z Info: env['LTEX_LS_OPTS']: "-Xms64m -Xmx512m"
2020-06-06T03:11:48.003Z Info: Test successful!
2020-06-06T03:11:48.099Z Info:
2020-06-06T03:11:48.108Z Info: Starting ltex-ls...
2020-06-06T03:11:48.108Z Info: Command: "/home/bagustris/.vscode/extensions/valentjn.vscode-ltex-5.0.0/lib/ltex-ls-5.0.0/bin/ltex-ls"
2020-06-06T03:11:48.108Z Info: Arguments: []
2020-06-06T03:11:48.108Z Info: env['JAVA_HOME']: "/usr/lib/jvm/java-9-oracle"
2020-06-06T03:11:48.108Z Info: env['LTEX_LS_OPTS']: "-Xms64m -Xmx512m"
2020-06-06T03:11:48.108Z Info:
[Trace - 12:11:48 PM] Sending request 'initialize - (0)'.
Params: {
"processId": 17259,
"clientInfo": {
"name": "vscode",
"version": "1.45.1"
},
"rootPath": null,
"rootUri": null,
"capabilities": {
"workspace": {
"applyEdit": true,
"workspaceEdit": {
"documentChanges": true,
"resourceOperations": [
"create",
"rename",
"delete"
],
"failureHandling": "textOnlyTransactional"
},
"didChangeConfiguration": {
"dynamicRegistration": true
},
"didChangeWatchedFiles": {
"dynamicRegistration": true
},
"symbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
]
}
},
"executeCommand": {
"dynamicRegistration": true
},
"configuration": true,
"workspaceFolders": true
},
"textDocument": {
"publishDiagnostics": {
"relatedInformation": true,
"versionSupport": false,
"tagSupport": {
"valueSet": [
1,
2
]
}
},
"synchronization": {
"dynamicRegistration": true,
"willSave": true,
"willSaveWaitUntil": true,
"didSave": true
},
"completion": {
"dynamicRegistration": true,
"contextSupport": true,
"completionItem": {
"snippetSupport": true,
"commitCharactersSupport": true,
"documentationFormat": [
"markdown",
"plaintext"
],
"deprecatedSupport": true,
"preselectSupport": true,
"tagSupport": {
"valueSet": [
1
]
}
},
"completionItemKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
]
}
},
"hover": {
"dynamicRegistration": true,
"contentFormat": [
"markdown",
"plaintext"
]
},
"signatureHelp": {
"dynamicRegistration": true,
"signatureInformation": {
"documentationFormat": [
"markdown",
"plaintext"
],
"parameterInformation": {
"labelOffsetSupport": true
}
},
"contextSupport": true
},
"definition": {
"dynamicRegistration": true,
"linkSupport": true
},
"references": {
"dynamicRegistration": true
},
"documentHighlight": {
"dynamicRegistration": true
},
"documentSymbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
]
},
"hierarchicalDocumentSymbolSupport": true
},
"codeAction": {
"dynamicRegistration": true,
"isPreferredSupport": true,
"codeActionLiteralSupport": {
"codeActionKind": {
"valueSet": [
"",
"quickfix",
"refactor",
"refactor.extract",
"refactor.inline",
"refactor.rewrite",
"source",
"source.organizeImports"
]
}
}
},
"codeLens": {
"dynamicRegistration": true
},
"formatting": {
"dynamicRegistration": true
},
"rangeFormatting": {
"dynamicRegistration": true
},
"onTypeFormatting": {
"dynamicRegistration": true
},
"rename": {
"dynamicRegistration": true,
"prepareSupport": true
},
"documentLink": {
"dynamicRegistration": true,
"tooltipSupport": true
},
"typeDefinition": {
"dynamicRegistration": true,
"linkSupport": true
},
"implementation": {
"dynamicRegistration": true,
"linkSupport": true
},
"colorProvider": {
"dynamicRegistration": true
},
"foldingRange": {
"dynamicRegistration": true,
"rangeLimit": 5000,
"lineFoldingOnly": true
},
"declaration": {
"dynamicRegistration": true,
"linkSupport": true
},
"selectionRange": {
"dynamicRegistration": true
}
},
"window": {
"workDoneProgress": true
}
},
"initializationOptions": {
"locale": "en"
},
"trace": "verbose",
"workspaceFolders": null
}
[Trace - 12:11:52 PM] Received response 'initialize - (0)' in 4067ms.
Result: {
"capabilities": {
"textDocumentSync": 1,
"codeActionProvider": {
"codeActionKinds": [
"quickfix.ltex.acceptSuggestion",
"quickfix.ltex.addToDictionary",
"quickfix.ltex.disableRule",
"quickfix.ltex.ignoreRuleInSentence"
]
},
"executeCommandProvider": {
"commands": [
"ltex.addToDictionary",
"ltex.disableRule",
"ltex.ignoreRuleInSentence"
]
}
}
}
[Trace - 12:11:52 PM] Sending notification 'initialized'.
Params: {}
[Trace - 12:11:52 PM] Sending notification 'workspace/didChangeConfiguration'.
Params: {
"settings": {
"ltex": {
"enabled": true,
"language": "en-US",
"dictionary": {},
"disabledRules": {},
"enabledRules": {},
"ltex-ls": {
"path": "",
"languageToolHttpServerUri": ""
},
"java": {
"path": "",
"initialHeapSize": 64,
"maximumHeapSize": 512
},
"commands": {
"ignore": [],
"dummy": []
},
"environments": {
"ignore": []
},
"markdown": {
"ignore": [
"CodeBlock",
"FencedCodeBlock",
"IndentedCodeBlock"
],
"dummy": [
"AutoLink",
"Code"
]
},
"ignoreRuleInSentence": [],
"configurationTarget": {
"addToDictionary": "global",
"disableRule": "workspaceFolder",
"ignoreRuleInSentence": "workspaceFolder"
},
"additionalRules": {
"motherTongue": "",
"languageModel": "",
"neuralNetworkModel": "",
"word2VecModel": ""
},
"sentenceCacheSize": 2000,
"diagnosticSeverity": "information",
"trace": {
"server": "verbose"
},
"javaHome": null,
"performance": {
"initialJavaHeapSize": null,
"maximumHeapSize": null,
"sentenceCacheSize": null
},
"ar": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ar-DZ": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ast-ES": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"be-BY": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"br-FR": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ca-ES": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ca-ES-valencia": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"da-DK": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-AT": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-CH": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-DE": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-DE-x-simple-language": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"el-GR": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-AU": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-CA": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-GB": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-NZ": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-US": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-ZA": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"eo": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"es": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"fa": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"fr": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ga-IE": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"gl-ES": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"it": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ja-JP": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"km-KH": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"nl": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pl-PL": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-AO": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-BR": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-MZ": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-PT": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ro-RO": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ru-RU": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"sk-SK": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"sl-SI": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"sv": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ta-IN": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"tl-PH": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"uk-UA": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"zh-CN": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
}
}
}
}
[Trace - 12:11:52 PM] Sending notification 'textDocument/didOpen'.
Params: {
"textDocument": {
"uri": "file:///media/bagustris/atmaja/s3/mypaper/2020/sc/elsarticle-bagus/sc-bagus.tex",
"languageId": "latex",
"version": 1,
"text": "\\documentclass[review, authoryear]{elsarticle}\n\\usepackage{graphicx}\n\\usepackage[hidelinks]{hyperref}\n\\usepackage{lineno}\n\\usepackage{epstopdf}\n\\usepackage{amsmath}\n\\usepackage{amsfonts}\n\\usepackage{url}\n\\usepackage{adjustbox}\n\\usepackage{textcomp}\n\\usepackage{siunitx,booktabs}\n\n\\usepackage{array} % for \\newcolumntype macro\n\\newcolumntype{L}{>{$}l<{$}} % math-mode version of \"l\" column type\n\n%\\renewcommand\\appendix{\\par\n% \\setcounter{section}{0}%\n% \\setcounter{subsection}{0}%\n% \\setcounter{equation}{0}%\n% \\setcounter{table}{0}%------------ << add\n% \\setcounter{figure}{0}%----------- << add\n% \\gdef\\theequation{\\@Alph\\c@section.\\arabic{equation}}%\n% \\gdef\\thefigure{\\@Alph\\c@section.\\arabic{figure}}%\n% \\gdef\\thetable{\\@Alph\\c@section.\\arabic{table}}%\n% \\gdef\\thesection{\\appendixname\\@Alph\\c@section}%\n% \\@addtoreset{equation}{section}%\n% \\@addtoreset{table}{section}%----- << add\n% \\@addtoreset{figure}{section}%---- << add\n%}\n\n\\modulolinenumbers[5]\n\n\\journal{Speech Communication}\n\n\\bibliographystyle{model2-names.bst}\\biboptions{authoryear}\n\n\\begin{document}\n\n\\begin{frontmatter}\n\n\\title{Two-stage dimensional emotion recognition by fusing predictions of acoustic and text networks using SVM}\n\n\\author[mymainaddress,mysecondaryaddress]{Bagus Tris Atmaja\\corref{mycorrespondingauthor}}\n\\cortext[mycorrespondingauthor]{Corresponding author, \\emph{E-mail address: [email protected]}}\n\n\\author[mysecondaryaddress]{Masato Akagi}\n\n\\address[mymainaddress]{Japan Advanced Institute of Science and Technology, \n 1-1 Asahidai, Nomi, Ishikawa 923-1292, Japan}\n\\address[mysecondaryaddress]{Sepuluh Nopember Insitute of Technology, \n Sukolilo, Surabaya 60111, Indonesia}\n\n\\begin{abstract}\nAutomatic speech emotion recognition (SER) by a computer is a critical component\nfor more natural human-machine interaction. As in human-human interaction, the\ncapability to perceive emotion correctly is essential to taking further steps in\na particular situation. One issue in SER is whether it is necessary to combine\nacoustic features with other data such as facial expressions, text, and motion\ncapture. This research proposes to combine acoustic and text information by\napplying a late-fusion approach consisting of two steps. First, acoustic and\ntext features are trained separately in deep learning systems. Second, the\nprediction results from the deep learning systems are fed into a support vector\nmachine (SVM) to predict the final regression score. Furthermore, the task in\nthis research is dimensional emotion modeling, because it can enable deeper\nanalysis of affective states. Experimental results show that this two-stage,\nlate-fusion approach, obtains higher performance than that of any one-stage\nprocessing, with a linear correlation from one-stage to two-stage processing.\n\\end{abstract}\n\n\\begin{keyword}\nautomatic speech emotion recognition, affective computing, late fusion, multimodal fusion\n\\end{keyword}\n\n\\end{frontmatter}\n\n\\linenumbers\n\n\\section{Introduction}\nUnderstanding human emotion is important for responding properly in a particular\nsituation for both human-human communication and future machine-human\ncommunication. Emotion can be recognized from many modalities: facial\nexpressions, speech, and motion of body parts. In the absence of visual\nfeatures, speech is the only way to recognize emotion, as in the case of a\ntelephone call or a call-center application \\citep{Petrushin1998}. By\nidentifying caller emotion automatically from a system, appropriate feedback can\nbe applied quickly and precisely.\n\nSpeech is a modality in which both acoustic and verbal information can be\nextracted to recognize human emotion. Unfortunately, most speech emotion\nrecognition (SER) systems use only acoustic features for predicting categorical\nemotion. In contrast, this research proposes to use both acoustic and text\nfeatures to improve dimensional SER performance. Text can be extracted from\nspeech, and it may contribute to emotion recognition. For example, an\ninterlocutor can perceive emotion not only from prosodic information but also\nfrom semantics. \\cite{Grice2002} stated in his implicature theory that what is\nimplied derives from what is said. For example, if someone says that he is angry\nbut looks happy, then the implication is that he is indeed angry.\n\nBesides the categorical approach, emotion can also be analyzed via a dimensional\napproach. In dimensional emotion, affective states are lines in a continuous\nspace. Some researchers have used a two-dimensional (2D) space comprising\nvalance (positive or negative) and arousal (excited or apathetic). Other\nresearchers have proposed a 3D emotional space by adding either dominance\n(degree of power over emotion) or liking/disliking. Although it is rare, a 4D\nemotional space has also been studied by adding expectancy or naturalness.\n\\cite{Russell1980a} argued, however, that a 2D emotion model is enough to\ncharacterize all categorical emotions. In this research, we choose a 3D emotion\nmodel with valence, arousal, and dominance as the emotion dimensions/attributes.\n\n% add motivation for dimensional emotion here, add information from the verge\nDarwin argued that the biological category of a species, like emotion\ncategories, does not have an essence due to high variability of individuals\n\\citep{charles1872expression}. Mehrabian and Russell developed pleasure,\narousal, and dominance (PAD) model to assess environmental perception,\nexperience, and psychological responses, as an alternative to categorical\nemotion. This latter emotion view is also known as circumplex model of affect,\nand the pleasure dimension is often replaced by valence (the VAD model).\nAlthough most research used 2D model (valence and arousal), recent research\nshows four dimensions are needed to represent meaning of emotion words\n\\citep{Fontaine2017}. Motivated by these findings, we evaluate the VAD emotion\nmodel since the datasets also gives the labels in 3D space.\n\nThis study aims to evaluate the combination of acoustic and text features to\nimprove the performance of dimensional automatic SER by using two-stage\nprocessing. Current research on pattern recognition has also shown that the use\nof multimodal features from audio, visual, and motion-capture data increases\nperformance as compared to using a single modality \\citep{Hu2018, Yoon2018,\nTripathi2018}. Meanwhile, research on big data has revealed that the use of more\ndata will improve performance for results from the same algorithm\n\\citep{Halevy2009}. By using both acoustic and text features, SER should obtain\nimproved performance over acoustic-only and text-only recognition. This\nassumption is also motivated by the fact that human emotion perception uses\nmultimodal sensing, peculiarly verbal and non-verbal information. Many\ntechnologies, such as human-robot interaction, can potentially benefit from such\nimprovement in emotion recognition. \n\n% shows contribution\n% 1. A proposal of two-stage processing on IEMOCAP dataset for dimensional emotion recognition\n% from acoustic and text features using LSTM and SVM, and compared the results with\n% unimodal results and another fusion method on the same metric and scenario.\n% 2. Evaluation of different acoustic and text features to find the best pair \n% of acoustic-text pair based on evaluated features, including a frame-based\n% feature and statistical functions with and without silent pause features.\n% 3. Evaluation of speaker-dependent vs. speaker-independent scenarios \n% in dimensional speech emotion recognition from bimodal features.\n% 4. Evaluation of using text feature on dataset that contains target sentence\n% after removing utterances containing target sentence.\n\nThe main contribution of this study then are: (1) a proposal of two-stage\nprocessing for dimensional emotion recognition from acoustic and text features\nusing LSTM and SVM, and a comparison of the results with unimodal results and\nanother fusion method on the same metric and dataset scenario; (2) an evaluation\nof different acoustic and text features to find the best pair of acoustic-text\npair based on evaluated features, including a frame-based acoustic feature and\nutterance-based statistical functions with and without silent pause features;\n(3) evaluation of speaker-dependent vs. speaker-independent scenarios in\ndimensional speech emotion recognition from text features; and (4) evaluation\nof using text feature on dataset that originally contains target sentences but\nremoved to avoid the effect of these target sentences.\n\nThe rest of this paper is organized as follows. \"Related work\" reviews \nclosely related work to this research including the difference of this \nstudy from previous research, \"Datasets and features\" outlines the datasets \nand feature sets used in this research, \"Two-stage bimodal emotion recognition\" \nexplains the method to achieve the results, \"Results and discussion\" shows \nthe results and its discussion, and finally \"Conclusions\" concludes this \nstudy and proposes future work.\n\n\\section{Related work}\nSpeech emotion recognition (SER) has been researched since the importance of\nemotion aspects was considered in human-computer interaction\n\\citep{Kleine-cosack2006}. The amount of research has grown as datasets have\nbecome publicly available, including the Berlin EMO-DB, IEMOCAP, MSP-IMPROV, and\nRAVDESS datasets. To enable the analysis and comparison with previous works in\nthis paper, we include the following literature reviews of related work. We\nfocus on comparing previous work that used the same or similar datasets as this\nwork does (specifically, IEMOCAP, MSP-IMPROV, or both), and especially on\nresearch that focused on dimensional rather than categorical emotion. While the\nfocus here is on bimodal emotion recognition using both acoustic and text data,\nsome work on speech-only or text-only emotion recognition is briefly described.\n\n\\subsection{Acoustic emotion recognition}\nRecognition of emotion within speech signals has been actively developed since\nthe success of recognizing emotion via facial expressions. From categorical\nemotion detection, the paradigm of SER has shifted to predicting degrees of\nemotion attributes, or dimensional emotion. One of the earliest papers on\n(categorical) SER \\citep{Petrushin1998} explored how well humans and computers\nrecognize emotion in speech. Since then, research on categorical emotion\nrecognition has grown following the development of affective research in\npsychology.\n\n\\cite{Jin2005} reported a first trial on SER in categorical and two-dimensional\n(2D) spaces. They found that acoustic features are helpful in describing and\ndistinguishing emotion through the conception of emotion modeling (2D space). In\n2009, \\cite{Giannakopoulos2009} re-investigated the association of speech\nsignals with an emotion wheel (continuous space). They proposed a method to\nestimate the degrees of valence and arousal. Their method, including a proposed\nfeature set, could estimate both valence and arousal, with error close to that\nof average human annotation. \\cite{Grimm2007a} used a fuzzy-logic estimator and\na rule base derived from acoustic features in speech, such as pitch, energy,\nspeaking rate, and spectral characteristics, to describe emotion primitives\n(valence, arousal, and dominance). They obtained moderate to high correlation\n(0.42 \\textless ~r \\textless 0.85) between their method and human annotation.\n\nUsing the IEMOCAP dataset, Parthasarathy and Busso tried to train a neural\nnetwork system to predict valence, arousal, and dominance simultaneously\n\\citep{parthasarathy2017jointly}. They proposed a multitask learning (MTL)\nsystem based on the mean squared error (MSE) to balance prediction of the three\nemotion dimensions. They found that, by combining a shared layer and an\nindependent layer, the MTL system's best performance exceeded that of the\ntraditional single-task learning (STL) method.\n\n\\cite{Abdelwahab2018} proposed using a domain-adversarial neural network (DANN)\nto solve the problem of mismatch between training and test data in dimensional\nSER. Using the DANN, they obtained performance that significantly improved on\nthat of a source-trained DNN. Thus, they addressed the importance of minimizing\nthe mismatch between the source (training) and target (test) data. Furthermore,\nusing the DANN showed that creating a flexible, discriminant feature\nrepresentation can reduce the gap in the feature space between the source and\ntarget domains.\n\nSome of the above results on dimensional SER showed that recognizing valence is\nmore difficult than recognizing arousal. To overcome this issue,\n\\cite{Sridhar2018} used higher regularization (dropout) for valence than for the\nother dimensions when training SER through a deep neural network (DNN). Their\nsystem analysis showed that higher dropout is needed for predicting valence. By\nusing higher regularization, models could identify more general acoustic\npatterns that were observed across speakers. \\cite{Elbarougy2014} used a\nthree-layer model based on human perception for the same purpose. \\cite{Li2019}\nimproved on that work by combining acoustic features for multilingual emotion\nrecognition.\n\nAlthough some improvements have been achieved, \\cite{ElAyadi2011} addressed the\nSER issue of whether it suffices to use acoustic features for modeling emotions\nor it is necessary to combine them with other types of features, such as\nlinguistic discourse information or facial features. Text features can be\nobtained through automatic speech recognition (ASR) and may be helpful in\nsignificantly improving SER performance. In particular, text features are\nexpected to improve the performance of valence recognition, for which acoustic\nfeatures have typically failed to achieve high performance. Moreover, text\nfeatures are commonly used for sentiment analysis, which is similar to valence\nprediction.\n\n\\subsection{Text emotion recognition}\nAs mentioned above, one area of research on text processing focuses on sentiment\nanalysis. This area is closely related to recognizing valence, i.e., the\npolarity or semantic orientation of an event, object, or situation\n\\citep{Jurafsky2017}. Although the early research sought to recognize sentiment\nin text, extension to recognize categorical and dimensional emotion has been\nattempted in recent years. As in other areas of research on pattern recognition,\nsome researchers in text processing have used unsupervised learning to detect\nemotion in text \\citep{Mantyla2016, Mohammad2016}, while others have used\nsupervised learning based on machine learning \\citep{Alm2005, Yang2016}.\n\n\\cite{Alm2005} used a bag-of-words (BoW) model and other text features from text\ndatasets to predict emotion within those datasets. Using a multiclass linear\nclassifier, they obtained encouraging results that suggest a potential direction\nfor future research. Their proposed method could predict basic emotion from text\nwith accuracy close to 70\\%. \n\n\\cite{Kim2010} used unsupervised learning to predict categorical emotion from\nthree different datasets: SemEval, ISEAR, and Fairy Tales. Using three different\ntechniques, they found that the best performance was achieved with categorical\nclassification based on non-negative matrix factorization (NMF).\n\\cite{Atmaja2019} used a deep-learning-based classification model and improved\nthe precision, recall, and F-score results for the ISEAR dataset from 0.528,\n0.417, and 0.372 to 0.56, 0.54, and 0.54, respectively. They showed the\neffectiveness of the deep-learning-based method for categorical emotion\nrecognition on a larger dataset, while on a smaller dataset, the unsupervised\napproach achieved better results. Apart from categorical emotion recognition,\n\\cite{Atmaja2019} also performed dimensional emotion recognition on the same\ndataset used for the categorical task. Similarly to the categorical task, the\nresults showed smaller errors obtained by increasing the size of the training\nset.\n\n\\cite{Mantyla2016} used emotion words from an affective lexicon to mine valence,\narousal, and dominance in text communication. Specifically, they used text\ncommunication data from a software development situation, including issues and\ncomments captured through issue repository technology. They used the measure of\nvalence, arousal, and dominance (VAD) to detect the productivity and burnout of\nthe software developers. The results showed that increased emotions in terms of\nVAD correlated with increased productivity. Their results also complemented\nprevious results showing that VAD can be measured from text, though at first\nonly the sentiment (valence) was used in text processing.\n\nResearch on text emotion recognition has usually used written language (from\nchats, Twitter, forum threads, etc.), which differs from spoken language. Also,\nmost such work on text processing has detected only the valence, i.e., only one\nemotion dimension. Because speech transcription converts spoken language to a\nwritten form, it should contain more emotion information than written plain text\ndoes. Evaluation on the other emotion dimensions (arousal and dominance) is also\nnecessary to determine the impact on those dimensions, along with evaluation on\nthe combination with acoustic features for that purpose.\n\n\\subsection{Bimodal emotion recognition}\nUsing bimodal or multimodal features for emotion recognition is not new. Among\nmany modalities, audio and visual features are the most used for extracting\nemotion information. When only speech is conveyed, however, two types of\ninformation can be extracted: acoustic and text features. Among many research\npapers, the reports by \\cite{Eyben2010}, \\cite{Karadogan2012}, \\cite{Ye2014},\n\\cite{Jin2015}, \\cite{Aldeneh2017}, \\cite{Yoon2018}, \\cite{Atmaja2019b}, and\n\\cite{Zhang2019} are the most related to this paper.\n\n\\cite{Eyben2010} proposed an online method to detect not only valence and\narousal but also the time when those emotion attributes are detected. They used\na recurrent neural network (RNN) based on long short-term memory (LSTM) to\nrecognize a framewise valence-arousal continuum with time. By adding a keyword\nspotter, they obtained performance improvement in terms of the Pearson\ncorrelation coefficient (PCC). They also found that keywords like ``again,''\n``angry,'' ``assertive,'' and ``very'' were related to activation, while typical\nkeywords correlated to valence were ``good,'' ``great,'' ``lovely,'' and\n``totally.'' Similarly to that idea, \\cite{Karadogan2012} used affective words\nfrom Affective Norms for English Words (ANEW) to determine a valence-arousal\nvalue and combine it with results from acoustic features. The latter paper also\nobtained similar improvement over using a single modality.\n\n\\cite{Ye2014} used bimodal features from acoustic and text information to\nrecognize emotion within speech. The acoustic features were trained in two\nparallel classifiers: an SVM and a backpropagation network. The text features\nwere trained in two serial classifiers, which were both Naive Bayes classifiers.\nThe second classifier acted as a filter for unreliable parts from the first\nclassifier. Decision-level fusion (late fusion) was then implemented by\ncombining the acoustic and text features with tree-weighting factors for the\nSVM, backpropagation network, and text classifiers. The resulting fusion method\nobtained 93\\% accuracy, as compared to 83\\% from the acoustic features only and\n89\\% from the text features only. The task was categorical emotion detection\nfrom a Chinese database. Similarly to that approach for a categorical task,\n\\cite{Jin2015} used the IEMOCAP dataset to test combinations of acoustic and\ntext features for SER. The novelty of their method was the use of an emotion\nvector for lexical features, which improved the accuracy in four-class emotion\nrecognition from 53.5\\% (acoustic) and 57.4\\% (text) to 69.2\\% (acoustic +\ntext).\n\n\\cite{Aldeneh2017} used acoustic and lexical features to detect the degree of\nvalence from speech. They used 40 mel-filterbanks (MFBs) as acoustic features\nand word vectors as text features. Continuous valence values were then converted\nto three categorical classes: negative, neutral, and positive. Using that\napproach, they improved the weighted accuracy from 64.5\\% (text) and 58.9\\%\n(acoustic) to 69.2\\% (acoustic + text). \n\n\\cite{Yoon2018} used audio and text networks to predict emotion classes from the\nIEMOCAP dataset. Both networks used RNNs with inputs of mel-frequency cepstral\ncoefficients (MFCCs) for audio and word vectors for text. The proposed\nmultimodal dual recurrent encoder (MDRE) improved on the single-modality RNNs\nfrom 54.6\\% (audio) and 63.5\\% (text) to 71.8\\% (audio + text).\n\\cite{Atmaja2019b} obtained a better result by using 34 acoustic features after\nsilence removal and combining them with word embeddings. With LSTM used for the\ntext and dense networks for speech, the latter paper obtained an accuracy of\n75.49\\% on the same dataset and task.\n\nInstead of using lexical features, \\cite{Zhang2019} used the lexical properties\nof phonemes and combined them with acoustic features to recognize valence in\nspeech. They used 39 unique phonemes from the IEMOCAP and MSP-IMPROV datasets\nfor the lexical properties, and a 40-dimensional log-scale MFB energy for the\nacoustic features. Using a scaled version of valence, converted from a 5-point\nscale to three categorical classes, they showed that their multistage fusion\nmodel outperformed all other models on both IEMOCAP and MSP-IMPROV.\n\nAll of the above research confirmed the benefits of bimodal/multimodal\nrecognition over using a single modality. We currently need a new method for\npredicting dimensional SER, however, for the following reasons: (1) some prior\nresearch did not predict all emotion attributes \\citep{Eyben2010,\nKaradogan2012, Zhang2019}, while other works predicted emotion categories\ninstead of attributes; (2) instead of predicting continuous emotion attribute\nscores, some works switched to a categorical task for simplicity\n\\citep{Zhang2019, Aldeneh2017}; and (3) some of the reported results are not\nup to date and showed low improvement \\citep{Eyben2010, Karadogan2012}. \n\nAlthough we have limited our work to using both acoustic and text features,\nother researchers have already proposed another solution to solve the issues\nabove, namely, using audiovisual emotion recognition. Nevertheless, there is\nstill a need to propose and evaluate methods using both acoustic and text\nfeatures, because some target applications only involve speech data. In these\nvoice-based applications, no visual or written-text information is acquired. To\nmaximize the resources for extracting emotion within speech, this paper exploits\nboth acoustic and text features (obtained via speech transcription) and combines\nthem for dimensional emotion regression. By using both kinds of information in a\ntwo-stage process, we expect the proposed method's performance to be close to or\nexceed the performance obtained by using visual information. Note here that\nvisual information, particularly facial expressions, has been reported to have\nmore influence on dimensional emotion than other modalities do\n\\citep{FabienRingeval2018}.\n\n\\section{Data and feature sets}\n\\subsection{Datasets}\nDatasets for investigating our proposal to use two-stage processing for\ndimensional SER must meet some requirements. The requirements for those datasets\nare that (1) the dataset has both speech data and text transcription (to speed\nup text data acquisition); (2) the dataset is already annotated with dimensional\nlabels; and (3) the dataset is publicly available. The following two datasets\nsatisfy these requirements.\n\n\\noindent 1. IEMOCAP \\\\\nIEMOCAP, which stands for interactive emotional dyadic motion capture database,\ncontains recordings dyadic conversations with markers on the face, head, and\nhands. The recordings thus provide detailed information about the actors' facial\nexpressions and hand movements during both scripted and spontaneous spoken\ncommunication scenarios \\citep{Busso2008}. This research only uses the acoustic\nand text features, because the goal is bimodal speech emotion recognition. The\nIEMOCAP dataset is freely available upon request, including its labels for\ncategorical and dimensional emotion. We use the dimensional emotion labels,\nwhich are average scores for two evaluators, because they enable deeper analysis\nof emotional states. The dimensional emotion scores, for valence, arousal, and\ndominance, are meant to range from 1 to 5 as a result of Self-Assessment Manikin\n(SAM) evaluation. We have found some labels with scores lower than 1 or higher\nthan 5, however, we remove those data (seven samples). All\nlabels are then converted from the 5-point scale to a floating-point values in\nrange [-1, 1] when they are fed to a DNN system.\n\nThe total length of the IEMOCAP dataset is about 12 hours, or 10039\nturns/utterances, from ten actors in five dyadic sessions (two actors each). The\nspeech modality used to extract acoustic features is a set of files in the\ndataset with a single channel per sentence. The sampling rate of the speech data\nwas 16 kHz. For text data, we use the manual transcription in the dataset\nwithout additional preprocessing.\n\n\\noindent 2. MSP-IMPROV \\\\\nMSP-IMPROV, developed by the Multimodal Signal Processing (MSP) Lab at the\nUniversity of Texas, Dallas, is a multimodal emotional database obtained by\napplying lexical and emotion control in the recording process while also\npromoting naturalness. The dataset provides audio and visual recordings, while\ntext transcriptions are obtained via automatic speech recognition (ASR) provided\nby the authors. As with IEMOCAP, we use the speech and text data with\ndimensional emotion labels. The annotation method for the recordings was the\nsame as for IEMOCAP, i.e., SAM evaluation, with rating by at least five\nevaluators. We treat missing evaluations as neutral speech (i.e., a score of 3\nfor valence, arousal, and dominance). Also as with IEMOCAP, all labels are\nconverted to floating-point values in range [-1, 1] from the original 5-point\nscale.\n\nThe MSP-IMPROV dataset is recorded from 12 actors, two actors for each session.\nFrom a total of four scenarios in six sessions, we only use two scenarios,\nbecause the others are not relevant to our goal. These two scenarios are\n``Other-improvised'' and ``Natural-interaction,'' while the two excluded\nscenarios are ``Target-improvised'' and ``Target-read.'' The latter two\nscenarios were recorded with a \\emph{target sentence}, meaning that the same\nsentence could elicit different emotions and thus making the linguistic\ninformation ineffective for emotion recognition. Hence, following another paper\n\\cite{Zhang2019}, we refer to this subset of the MSP-IMPROV dataset as MSP-I+N\n(MSP improvised and natural interaction), or MSPIN. In contrast to the number of\nutterances used in that paper, we use all utterances from the two scenarios,\nwith missing text transcriptions obtained using Mozilla's DeepSpeech\n\\citep{DeepSpeech2019}. We thus use 7166 utterances from a total of 8438. The\nspeech data in the dataset was sampled in mono at 44.1 kHz, with one file per\nutterance/sentence.\n\nWe split each dataset into two partitions to observe any differences between a\nspeaker-dependent (SD) partition and a speaker-independent partition made by\nleaving one session out (LOSO) for each dataset. For example, for the IEMOCAP\ndataset, the last session (i.e., session 5) which is recorded from two different\nactors (out of 10) is only used for testing. Similarly, for MSP-I+N, all\nutterances from session 6 (two speakers out of 12) are used for the test set.\nOur rule for data splitting is to divide between the training + development and\ntest sets in a ratio close to 80:20. This rule is applied for both the SD and\nLOSO partitions. Then, of the training + development data, 80\\% is used for\ntraining and the remaining 20\\% is used for development. Figure\n\\ref{fig:data_partition}. Both methods are evaluated with the same unseen test\nsets to compare the performance and measure the improvement. Note that we did\nnot use cross validation (but divide into training and test data) for evaluation\nsince the number of samples for both datasets is adequate (10039 and 7166\nsamples). This strategy is also utilized to keep the same test set for LSTM\n(one-stage processing) and SVM (two-stage processing) which is hard if the\nsamples is shuffled/cross-validated.\n\n\\begin{figure}\n\\includegraphics[width=5in]{../fig/csl_partition.pdf}\n\\caption{Proportions of data splitting for each partition of each dataset. In\none-stage LSTM processing, the output of the model are both development and\ntest data. In the second stage SVM processing, the input data is the prediction\nfrom development set of the previous stage and the output is the prediction of\ntest data.}\n\\label{fig:data_partition}\n\\end{figure}\n\n\\subsection{Feature sets} \n\\label{sect:feature_sets}\nMost research on SER, or generally on pattern classification, focuses on two\nmain topics: feature extraction, like in \\cite{Batliner2011}, and\nclassification/regression methods, like in \\cite{Albornoz2011}. While this\nresearch focuses on the second topic, we also evaluate state-of-the-art feature\nsets used for SER. For acoustic features, we evaluate three feature sets: the\nGeneva Minimalistic Acoustic Parameter Set (GeMAPS), statistical functions from\nGeMAPS, and the same functions from GeMAPS with a silence feature. For text\nfeatures, aside from the original word vectors extracted from the text\ntranscription, we also evaluate two word embeddings that are pretrained on a\nlarger corpus: the Word2Vec embedding \\citep{Mikolov} and GloVe embedding\n\\citep{Pennington2014}. These feature sets are explained below.\n\n\\paragraph{Acoustic features} The type of acoustic features extracted from a\nspeech signal is the most important part of an SER system. GeMAPS is an effort\nto standardize the acoustic features used for voice research and affective\ncomputing \\citep{Eyben}. The feature set consists of 23 acoustic low-level\ndescriptors (LLDs) such as fundamental frequency ($f_0$), jitter, shimmer, and\nformants, as listed in Table \\ref{tab:aco_feature}. As an extension of GeMAPS,\neGeMAPS includes statistical functions derived from the LLDs, such as the\nminimum, maximum, mean, and other values. Since these features are extracted on\nframe-based processing, the size of feature becomes large for one utterance\n(e.g., 3409 $\\times$ 23 for IEMOCAP), which is suitable for deep learning\nmethods like LSTM. Including the LLDs, the total number of features in eGeMAPS\nis 88. These statistical values are often called high-level statistical\nfunctions (HSF). \\cite{Schmitt2018} found, however, that using only the mean and\nstandard deviation (std) from the LLDs achieved a better result than using\neGeMAPS and audiovisual features. These global features may represent more\nemotion information within speech than frame-based features. We thus coded these\ntwo statistical functions (47 values) from the LLDs as the HSF1 feature set. We\nalso investigate the effect of including a silence feature in this SER research,\nas explained below. We define the combination of HSF1 with the silence feature\nas HSF2.\n\nSilence, in this paper, is defined as the proportion of silent frames among all\nframes in an utterance. In human communication, the proportion of silence in\nspeaking depends on the speaker's emotion. For example, a happy speaker may have\nfewer silences (or pauses) than a sad speaker. The proportion of silence in an\nutterance can be calculated as\n\n\\begin{equation}\n \\label{eq:sil_ratio}\n S = \\frac{N_{s}}{N_{t}},\n\\end{equation}\nwhere $N_s$ is the number of frames categorized as silence (silent frames), and $N_t$ is the total number of frames. A frame is categorized as silent if it does not exceed a threshold value defined by multiplying a factor by a root mean square (RMS) energy, $X_{rms}$. Mathematically, this is formulated as\n\n\\begin{equation}\n th = \\alpha \\times \\overline{X_{rms}},\n\\end{equation}\nwhere $X_{rms}$ is defined as\n\n\\begin{equation}\n X_{rms} = \\sqrt{\\frac{1}{n}\\sum_{i=1}^{n}x[i]^2}.\n\\end{equation}\n\nThis silence feature is similar to the disfluency feature proposed in\n\\cite{moore2014word}. In that paper, the author divided the total duration of\ndisfluency by the total utterance length for $n$ words. Figure\n\\ref{fig:silence_fig} illustrates the calculation of our silence feature. If\n$X_{rms}$ from a frame is below $th$, then it is categorized as silent, and the\ncalculation of equation \\ref{eq:sil_ratio} is applied.\n\n\\begin{table}\n \\centering\n \\caption{Acoustic feature sets derived from the GeMAPS features by \\cite{Eyben} and the statistical functions used for dimensional SER in this research.}\n \\begin{tabular}{p{5cm} p{2.9cm} p{2.9cm}}\n \\hline\n LLDs & HSF1 & HSF2 \\\\\n \\hline\n intesity, alpha ratio, Hammarberg index, spectral slope 0-500 Hz, spectral slope \n 500-1500 Hz, spectral flux, 4 MFCCs, F0, jitter, shimmer, harmonics-to-noise ratio (HNR), \n harmonic difference H1-H2, harmonic difference H1-A3, F1, F1 bandwidth, F1 \n amplitude, F2, F2 amplitude, F3, and F3 amplitude. \n & mean (of LLDs), standard deviation (of LLDs) \n & \n mean (of LLDs), standard deviation (of LLDs), silence \\\\\n \\hline\n \\end{tabular}\n \\label{tab:aco_feature}\n\\end{table}\n\n\\begin{figure}[htpb]\n\\centering\n\\includegraphics[width=0.8\\textwidth]{../fig/silence_calc.pdf}\n\\caption{Moving frame to calculate the silence feature.}\n\\label{fig:silence_fig}\n\\end{figure}\n\n\\paragraph{Text features} To process a word sequence in a computational model,\nthe text must be converted to numerical values. The resulting text feature is\ncommonly known as a word embedding and is a vector representation of a word.\nNumerical values in the form of a vector are used to enable a computer to\nprocess text data, as it can only process numerical values. The values are\npoints (numeric data) in a space whose number of dimensions is equal to the\nvocabulary size. The word representations embed those points in a feature space\nof lower dimension. In the original space, every word is represented by a\none-hot vector, with a value of 1 for the corresponding word and 0 for other\nwords. The element with a value of 1 is converted to a point in the range of the\nvocabulary size. \n\nIn addition to directly converting the text in the transcriptions of the\ndatasets (IEMOCAP and MSP-I+N) to sequences, two pretrained word embeddings are\nused to weight the original word embeddings. As mentioned above, the two\nword-embedding models are Word2Vec \\citep{Mikolov} and GloVe\n\\citep{Pennington2014}. Hence, we have three different text features for word\nembeddings: (1) a text sequence, or word embedding (WE), without any weighting,\n(2) a WE weighted by a pretrained Word2Vec model, and (3) a WE weighted by a\npretrained GloVe embedding model. Word2Vec is trained on large datasets to model\nits vector respresentation by using either continuous bag-of-words or continuous\nskip-gram model. GloVe is a another model to represent vector in words by\nanalyzing linear direction by using global log-bilinear regression method. All\nthese features are fed into the same embedding layer in the text network.\n\n% todo: add the differences between word2vec, GloVe, and FastText\n\n\\section{Two-stage bimodal emotion recognition}\n\\subsection{Acoustic emotion recognition system}\nMost SER research uses only acoustic features. Our approach to acoustic SER is\nsimilar to that research. The contribution of our acoustic network is the\nevaluation of mean + std features at the utterance level and the use of a\nsilence feature with the statistical functions to investigate any improvement.\nThis evaluation is continuation of \\citep{Atmaja2020e} with extension \non different feature sets and datasets.\nAs explained in section \\ref{sect:feature_sets}, we evaluate three acoustic\nfeature sets: LLDs, HSF1, and HSF2.\n\nThe LLD features are the 23 acoustic features listed in Table\n\\ref{tab:aco_feature}. For each frame (25 ms), these 23 acoustic features are\nextracted. With a hop size of 10 ms, the maximum number of sequences is 3409 for\nthe IEMOCAP dataset and 3812 for the MSP-I+N dataset. Hence, the size of the\ninput is 3409 $\\times$ 23 for IEMOCAP and 3812 $\\times$ 23 for MSP-I+N. The\nextraction process uses the openSMILE toolkit \\citep{Eyben2016open}.\n\nFigure \\ref{fig:acoustic_model} shows an overview of the acoustic network. LSTM\nis chosen because the number of training samples is adequate ($> 5000$ samples)\nand it shows good result on the previous research \\citep{Schmitt2018}. Before\nentering the LSTM layers, the LLD features at the input layer are fed into a\nbatch normalization layer to speed up the computation process. The three\nsubsequent LSTM layers are stacked with 256 nodes in each layer. Instead of\nreturning the last output of the last LSTM layer, we designed the network to\nreturn the full sequence and flatten it before inputting it to three dense\nlayers that represent valence, arousal, and dominance. The outputs of these last\ndense layers are then the predictions for those emotion attributes, i.e., the\ndegrees of valence, arousal, and dominance in the range [-1, 1]. \n\nFor the HSF1 and HSF2 inputs, the same setup applies. These two feature sets are\nvery small as compared to the LLDs: HSF1 has a size of 1 $\\times$ 46, while HSF2\nhas a size of 1 $\\times$ 47. This big difference in input size (1:1800) leads to\nfaster computation on HSF1 and HSF2 than on the LLDs. Note that, although Figure\n\\ref{fig:acoustic_model} shows HSF2 as the input feature, the same architecture\nalso applies for the LLDs and HSF1.\n\n\\begin{figure}[htpb]\n\\centering\n\\includegraphics[width=0.8\\textwidth]{../fig/model_acoustic.pdf}\n\\caption{Structure of acoustic network to process acoustic features.}\n\\label{fig:acoustic_model}\n\\end{figure}\n\n\\subsection{Text emotion recognition system}\nThe text network, shown in Figure \\ref{fig:text_model} for the MSP-I+N dataset,\nuses the same input size for the three different text features. The WE, WE with\npretrained Word2Vec, and WE with pretrained GloVe embedding each have 300\ndimensions for each word. The longest sequence in the IEMOCAP dataset is 100\nsequences (words), while for MSP-I+N the longest is 300 sequences. Hence, the\ninput features sizes for the LSTM layers are 100 $\\times$ 300 for IEMOCAP and\n300 $\\times$ 300 for MSP-I+N with its corresponding number of samples. The same\nthree LSTM layers are stacked as in the acoustic network, but the last LSTM\nlayer only returns the last output. A dense layer with a size of 128 nodes is\nadded after the LSTM layers and before the last three dense layers. Between the\ndense layers is a dropout layer with the same probability of 0.3 to avoid\noverfitting.\n\n\\begin{figure}[htpb]\n\\centering\n\\includegraphics[width=0.8\\textwidth]{../fig/model_text.pdf}\n\\caption{Structure of text network to process word embeddings/vectors.}\n\\label{fig:text_model}\n\\end{figure}\n\n\\subsection{Multitask learning}\n\\label{sub:mtl}\nThe task here in dimensional emotion recognition is to simultaneously predict\nthe degrees of three emotion attributes, i.e., the degrees of valence, arousal,\nand dominance, for any given utterance. As the main target metric is the\nconcordance correlation coefficient (CCC), the loss function is the CCC loss\n(CCCL), which computes the score difference between the labels and predicted\nvalues for the three attributes. The CCC loss is formulated as the following:\n\\begin{align} \nCCC &= \\dfrac{2 \\rho \\sigma_x \\sigma_y} {\\sigma_x^2 + \\sigma_y^2 + (\\mu_x - \\mu_y)^2}, \\\\\nCCCL &= 1 - CCC,\n\\end{align}\nwhere $\\rho$ is the Pearson correlation coefficient between the predicted\nemotion degree $x$ and the true emotion degree $y$, $\\sigma^2$ is the variance,\nand $\\mu$ is the mean. As the learning process minimizes three variables, we use\nthe following multitask learning approach to optimize the CCC score:\n\\begin{equation}\nCCCL_{tot} = \\alpha CCCL_{V} + \\beta CCCL_{A} + (1-\\alpha-\\beta) CCCL_{D},\n\\end{equation}\nwhere $\\alpha$ and $\\beta$ are respective CCCL parameters for valence (V) and\narousal (A). The parameter for dominance (D) is obtained by subtracting $\\alpha$\nand $\\beta$ from 1. The same parameter range [0, 1] with 0.1 step is\ninvestigated for $\\alpha$ and $\\beta$ for both the acoustic and text networks,\nresulting in different optimal parameters, which are obtained by using linear\nsearch. Note that only positive values of $CCCL_{D}$'s parameters are used to\ninvestigate the optimal parameters.\n\n\\subsection{SVM-based late fusion}\n% add how many datapoints was used for SVM training\nWe choose an SVM as the final classifier to fuse the outputs of the acoustic and\ntext networks because of its effectiveness in handling smaller data (as compared\nto a DNN) and its computation speed. The datapoints produced by LSTM processing\nas the input of SVM is small; i.e., 1600, 1538, 1147, and 1148 for IEMOCAP-SD,\nIEMOCAP-LOSO, MSPIN-SD and MSPIN-LOS0, respectively. The SVM then applies\nregression to map them to the given labels. Figure \\ref{fig:csl_system} shows\nthe architecture of this two-stage emotion recognition system using DNNs and an\nSVM. Each prediction from the acoustic and text networks is fed into the SVM.\nFrom two values (e.g., valence predictions from the acoustic and text networks),\nthe SVM learns to generate a final predicted degree (e.g., for valence). The\nconcept of using the SVM as the final classifier can be summarized as follows.\n\nSuppose that two valence prediction outputs from the acoustic and text networks,\n$x_i = [x_{ser}[i], x_{ter}[i]]$, are generated by the DNNs, and that $y_i$ is\nthe corresponding valence label. The problem in dimensional SER fusing acoustic\nand text results is to minimize the following: \n\n\\begin{equation}\n\\begin{aligned}\n& \\underset{w, b, \\zeta}{\\text{min}}\n& & \\frac{1}{2} w^Tw + C \\sum_{i=1}^n \\zeta_i \\\\\n& \\text{subject to}\n& & y_{i} (w^T \\phi (x_i)+b) \\geq 1 - \\zeta_i, \\\\\n&&& \\zeta_i \\geq 0, i = 1, \\ldots, n,\n\\end{aligned}\n\\end{equation}\nwhere $w$ is a weighting vector, $C$ is a penalty parameter, and $\\zeta$ is the\ndistance between misclassified points and the corresponding marginal boundary.\nHere, $\\phi$ is the kernel function. We choose a radial basis function (RBF)\nkernel because of its flexibility to model a nonlinear process with a\ndimensional emotion model close to this kernel. The function $\\phi$ for the RBF\nkernel is formulated as\n\n\\begin{equation}\n K(x_i, x_j) = e^{\\gamma(x_i - x_j)^2},\n \\label{tab:label}\n\\end{equation}\nwhere $\\gamma$ defines how much influence a single training has on the model.\nAll parameters in this SVM are obtained empirically via linear search in a\nspecific range. Although the explanation above uses valence, the same also\napplies for arousal and dominance. \n\n\\begin{figure}\n\\includegraphics[width=\\textwidth]{../fig/csl_system.pdf}\n\\caption{Proposed two-stage dimensional emotion recognition method using DNNs \nand an SVM. The inputs are acoustic features (af) and text features (tf); \nthe outputs are valence (v), arousal (a), and dominance (d).}\n\\label{fig:csl_system}\n\\end{figure}\n\n\\subsection{Reproducibility}\nThe experimental code was written in Python and, for the sake of research\nreproducibility, it is available in the following repository:\n\\url{https://github.com/bagustris/two-stage-ser}. The DNN part was implemented\nusing Keras by \\cite{chollet2015keras} and Tensorflow, while the SVM-based\nfusion was implemented using the scikit-learn toolkit by \\cite{scikit-learn}. To\nobtain consistent results for each run, some fixed numbers are initialized at\nthe beginning, as can be found in the repository above.\n\n\\section{Results and discussions}\n\\subsection{Results from single modality}\nBefore presenting the bimodal feature-fusion results, it is important to show\nthe results of unimodal emotion recognition. The goals here are (1) to observe\nthe (relative) improvement of bimodal feature fusion over using a single\nmodality, and (2) to observe the effects of different features on different\nemotion attributes.\n\nTables \\ref{tab:ser-test} and \\ref{tab:ter-test} list the single-modality\nresults of dimensional emotion recognition from the acoustic and text networks,\nrespectively. In general, acoustic-based SER gave better results than text-based\nSER in terms of the average CCC score. For particular emotion attributes, the\ntext network gave a higher CCC score for valence prediction than those obtained\nby the acoustic network (except on the MSPIN-SD dataset). This confirms the\nprevious finding by \\cite{Karadogan2012} that valence is better estimated by\nsemantic features, while arousal is better predicted by acoustic features. In\naddition, we found that the dominance dimension was better predicted by acoustic\nfeatures than by text features. This finding can be inferred from both tables,\nin which the CCC scores for the dominance dimension are frequently higher from\nthe acoustic network than from the text network.\n\nThe exception of a higher valence score on the MSPIN-SD dataset by the acoustic\nnetwork can be seen as the effect of either the DNN architecture or the\ndataset's characteristics. In \\cite{chen2017multimodal}, the obtained score was\nhigher for valence than for arousal or liking (the third dimension, instead of\ndominance) with their strategy on acoustic features. In contrast,\n\\cite{Abdelwahab2018} obtained a lower score for valence than for arousal and\ndominance by using their proposed DANN method on the same MSP-IMPROV dataset\n(whole data, all four scenarios). Given this comparison, we conclude that the\nhigher valence score obtained here was an effect of the DNN architecture,\nbecause of the multitask learning. Our result on a single modality (acoustic\nnetwork) outperformed the DANN result on MSP-IMPROV, where their highest CCC\nscores were (0.303, 0.176, 0.476) as compared to our scores of (0.403, 0.603,\n\n\\begin{table}[htpb]\n \\centering\n \\caption{CCC score results of dimensional emotion recognition using an acoustic\n network. The best results on the test set are in bold. LLDs: low-level\n descriptors from GeMAPS \\citep{Eyben}; HSF1: mean + std of LLDs; HSF2: mean\n + std + silence.}\n \\label{tab:ser-test}\n \\begin{tabular}{l c c c c}\n \\hline\n Feature set & V & A & D & Mean \\\\\n \\hline\n \\multicolumn{5}{c}{IEMOCAP-SD} \\\\\n LLD\t& 0.153\t& 0.522\t& 0.534\t& \\textbf{0.403} \\\\ \n HSF1\t& 0.186\t& 0.535\t& 0.466\t& 0.396 \\\\\n HSF2\t& 0.192\t& 0.539\t& 0.469\t& 0.400 \\\\\n \\hline\n \\multicolumn{5}{c}{MSPIN-SD} \\\\ \n LLD\t& 0.299\t& 0.545\t& 0.441\t& 0.428 \\\\\nHSF1\t& 0.400\t& 0.603\t& 0.506\t& 0.503 \\\\\nHSF2\t& 0.404\t& 0.605\t& 0.517\t& \\textbf{0.508} \\\\\n \\hline\n \\multicolumn{5}{c}{IEMOCAP-LOSO} \\\\\n LLD\t& 0.168\t& 0.486\t& 0.442\t& 0.365 \\\\\n HSF1\t& 0.206\t& 0.526\t& 0.442\t& 0.391 \\\\\n HSF2\t& 0.204\t& 0.543\t& 0.442\t& \\textbf{0.396} \\\\ \n \\hline\n \\multicolumn{5}{c}{MSPIN-LOSO} \\\\\nLLD\t & 0.176\t& 0.454\t& 0.369\t& 0.333 \\\\ \nHSF1\t& 0.201\t& 0.506\t& 0.357\t& \\textbf{0.355} \\\\\nHSF2\t& 0.206\t& 0.503\t& 0.346\t& 0.352 \\\\\n \\hline\n \\end{tabular}\n\\end{table} \n\n\\begin{table}[htpb]\n \\centering\n \\caption{CCC score results of dimensional emotion recognition using text\n networks; each score is an averaged score of 20 runs with its standard\n deviation. WE: word embedding; Word2Vec: WE weighted by pretrained word\n vector \\citep{Mikolov}; GloVe: WE weighted by pretrained global vector\n \\citep{Pennington2014}.}\n \\label{tab:ter-test}\n \\begin{tabular}{l c c c c}\n \\hline\n Feature set & V & A & D & Mean \\\\\n \\hline\n \\multicolumn{5}{c}{IEMOCAP-SD} \\\\\nWE\t & 0.389 $\\pm$ 0.008 & 0.373 $\\pm$ 0.010 & 0.398 $\\pm$ 0.017 &\t0.387 $\\pm$ 0.010 \\\\\nWord2Vec\t& 0.393 $\\pm$ 0.012 & 0.371 $\\pm$ 0.018 & 0.366 $\\pm$ 0.024 &\t0.377 $\\pm$ 0.016 \\\\\nGloVe\t & 0.410 $\\pm$ 0.007 & 0.381 $\\pm$ 0.013 & 0.393 $\\pm$ 0.016 &\t\\textbf{0.395 $\\pm$ 0.010} \\\\\n\n \\hline\n \\multicolumn{5}{c}{MSPIN-SD} \\\\\nWE\t & 0.120 $\\pm$ 0.047 &\t0.148 $\\pm$ 0.023\t& 0.084 $\\pm$ 0.024 &\t0.105 $\\pm$ 0.026 \\\\\nWord2Vec\t& 0.138 $\\pm$ 0.031 &\t0.108 $\\pm$ 0.024\t& 0.101 $\\pm$ 0.024 &\t0.116 $\\pm$ 0.017 \\\\\nGloVe\t & 0.147 $\\pm$ 0.043 &\t0.141 $\\pm$ 0.019\t& 0.098 $\\pm$ 0.017 &\t\\textbf{0.128 $\\pm$ 0.015} \\\\\n \\hline\n \\multicolumn{5}{c}{IEMOCAP-LOSO} \\\\\nWE\t & 0.376 $\\pm$ 0.008 &\t0.359 $\\pm$ 0.018 & 0.370 $\\pm$\t0.020 & 0.368 $\\pm$ 0.013 \\\\\nWord2Vec\t& 0.375 $\\pm$ 0.058 &\t0.357 $\\pm$ 0.058 & 0.365 $\\pm$\t0.065 & 0.366 $\\pm$ 0.059 \\\\\nGloVe\t & 0.405 $\\pm$ 0.009 &\t0.382 $\\pm$ 0.020 & 0.378 $\\pm$\t0.021 & \\textbf{0.389 $\\pm$ 0.014} \\\\\n \\hline\n \\multicolumn{5}{c}{MSPIN-LOSO} \\\\\nWE\t & 0.076 $\\pm$ 0.013 &\t0.196 $\\pm$ 0.011 & 0.136 $\\pm$\t0.015 &\t0.136 $\\pm$ 0.009 \\\\\nWord2Vec\t& 0.162 $\\pm$ 0.008 &\t0.202 $\\pm$ 0.005 & 0.147 $\\pm$\t0.003 &\t\\textbf{0.170 $\\pm$ 0.000} \\\\\nGloVe\t & 0.192 $\\pm$ 0.004 &\t0.189 $\\pm$ 0.007 & 0.129 $\\pm$\t0.004 &\t\\textbf{0.170 $\\pm$ 0.003} \\\\\n \\hline\n \\end{tabular}\n\\end{table} \n\nTo find the optimal parameter values for $\\alpha$ and $\\beta$, linear search was\nperformed on the scale [0.0, 1.0] with a step of 0.1. Using this conventional\ntechnique, we found four sets of optimal parameters for the acoustic and text\nnetworks. Note that, while only the improvised and natural scenarios (MSP-I+N)\nwere used to find the optimal text-network parameters for the MSP-IMPROV\ndataset, the whole dataset was used to find the optimal acoustic-network\nparameters. Table \\ref{tab:optim_params} lists the optimal parameter values for\n$\\alpha$ and $\\beta$.\n\n\\begin{table}[htpb]\n \\centering\n \\caption{Optimal parameters for multitask learning.}\n \\label{tab:optim_params}\n \\begin{tabular}{l l c c}\n \\hline\n Dataset & Modality & $\\alpha$ & $\\beta$ \\\\\n \\hline\n IEMOCAP & acoustic & 0.1 & 0.5 \\\\\n & text & 0.7 & 0.2 \\\\\n MSP-IMPROV & acoustic & 0.3 & 0.6 \\\\\n & text & 0.1 & 0.6 \\\\\n \\hline\n \\end{tabular}\n\\end{table} \n\nTo summarize the single-modality results, avarage CCC scores from three emotion\ndimensions can be used to justify which feature perform better among others. The\nresults clearly show that HSF2 was the most useful of the acoustic feature sets,\nwhile the word embedding (WE) with pretrained GloVe embedding was the most\nuseful of the text feature sets. Although the performance of dimensional emotion\nrecognition in the speaker-independent (LOSO) case was lower than in the\nspeaker-dependent (SD) case, as predicted, the trend remained the same; that is,\nHSF2 and GloVe consistently achieved the highest performance for the acoustic\nand text modalities, respectively.\n\n\\subsection{Results from SVM-based fusion}\nThe main proposal of this research is the late-fusion approach combining the\nresults from acoustic and text networks for dimensional emotion recognition.\nThis subsection presents the results for the late-fusion approach, including the\nobtained performances, comparison with the single-modality results, which pairs\nof acoustic-text results performed better, and our overall findings.\n\nFor each dataset (IEMOCAP-SD, MSPIN-SD, IEMOCAP-LOSO, MSPIN-LOSO), nine\ncombinations of acoustic-text result pairs could be fed to the SVM system.\nTables \\ref{tab:svm-iemocap-sd}, \\ref{tab:svm-mspin-sd},\n\\ref{tab:svm-iemocap-loso}, and \\ref{tab:svm-mspin-loso} list the respective CCC\nresults for these datasets. Generally, our proposed two-stage dimensional\nemotion recognition improved the CCC score from single-modality emotion\nrecognition. The pair of results from HSF2 (acoustic) and Word2Vec (text) gave\nthe highest CCC score on speaker-dependent scenarios.\n\nOn the speaker-independent IEMOCAP dataset (IEMOCAP-LOSO), the result from the\npair of HSF2 and GloVe gave the highest CCC score. This result linearly\ncorrelated with the single-modality results for that dataset, in which HSF2\nobtained the highest CCC score among the acoustic features, and GloVe was the\nbest among the text features. On the four datasets, the results from HSF2\nobtained the highest CCC score for two out of four datasets while GloVe obtained\nthe highest CCC score for all four datasets. Hence, we conclude that the highest\nresult from a single modality, when paired with the highest result from another\nmodality, will achieve the highest performance among possible pairs.\n\n\\begin{table}[htpb]\n \\centering\n \\caption{CCC score results after late fusion using an SVM on the IEMOCAP-SD test set.}\n \\label{tab:svm-iemocap-sd}\n \\begin{tabular}{l c c c c}\n \\hline\n Inputs & V & A & D & Mean \\\\\n \\hline\n LLD\t+ WE\t & 0.520 & 0.602 & 0.519 & 0.547 \\\\\n LLD\t+ Word2Vec\t& 0.552 & 0.613 & 0.524 & 0.563 \\\\\n LLD\t+ GloVe\t & 0.546 & 0.606 & 0.520 & 0.557 \\\\\n HSF1\t+ WE\t & 0.578 & 0.575 & 0.490 & 0.548 \\\\\n HSF1\t+ Word2Vec\t& 0.599 & 0.590 & 0.491 & 0.560 \\\\\n HSF1\t+ GloVe\t & 0.595 & 0.582 & 0.495 & 0.557 \\\\\n HSF2\t+ WE\t & 0.598 & 0.591 & 0.502 & 0.564 \\\\\n HSF2\t+ Word2Vec\t& 0.595 & 0.601 & 0.499 & \\textbf{0.565} \\\\\n HSF2\t+ GloVe\t & 0.598 & 0.591 & 0.502 & 0.564 \\\\ \n \\hline\n \\end{tabular}\n\\end{table} \n\n\\begin{table}[htpb]\n \\centering\n \\caption{CCC score results after late fusion using an SVM on the MSPIN-SD dataset.}\n \\label{tab:svm-mspin-sd}\n \\begin{tabular}{l c c c c}\n \\hline\n Inputs & V & A & D & Mean \\\\\n \\hline\nLLD\t + WE\t & 0.344 & 0.591 & 0.447 & 0.461 \\\\\nLLD\t + Word2Vec\t& 0.326 & 0.586 & 0.439 & 0.450 \\\\\nLLD\t + GloVe\t & 0.344 & 0.585 & 0.439 & 0.456 \\\\\nHSF1\t+ WE\t & 0.461 & 0.637 & 0.517 & 0.538 \\\\\nHSF1\t+ Word2Vec\t& 0.464 & 0.634 & 0.518 & 0.539 \\\\\nHSF1\t+ GloVe\t & 0.466 & 0.630 & 0.510 & 0.535 \\\\\nHSF2\t+ WE\t & 0.475 & 0.640 & 0.522 & 0.546 \\\\\nHSF2\t+ Word2Vec\t& 0.486 & 0.641 & 0.524 & \\textbf{0.550} \\\\\nHSF2\t+ GloVe\t & 0.485 & 0.638 & 0.523 & 0.549 \\\\\n \\hline\n \\end{tabular}\n\\end{table} \n\n\\begin{table}[htpb]\n \\centering\n \\caption{CCC score results after late fusion using an SVM on the IEMOCAP-LOSO test set.}\n \\label{tab:svm-iemocap-loso}\n \\begin{tabular}{l c c c c}\n \\hline \n Inputs & V & A & D & Mean \\\\\n \\hline\nLLD\t + WE\t & 0.537 & 0.583 & 0.431 & 0.517 \\\\\nLLD\t + Word2Vec\t& 0.528 & 0.580 & 0.421 & 0.510 \\\\\nLLD\t + GloVe\t & 0.539 & 0.587 & 0.430 & 0.518 \\\\\nHSF1\t+ WE\t & 0.565 & 0.565 & 0.453 & 0.528 \\\\\nHSF1\t+ Word2Vec\t& 0.536 & 0.559 & 0.434 & 0.510 \\\\\nHSF1\t+ GloVe\t & 0.559 & 0.570 & 0.452 & 0.527 \\\\\nHSF2\t+ WE\t & 0.524 & 0.566 & 0.452 & 0.514 \\\\\nHSF2\t+ Word2Vec\t& 0.531 & 0.571 & 0.445 & 0.516 \\\\\nHSF2\t+ GloVe\t & 0.553 & 0.579 & 0.465 & \\textbf{0.532} \\\\\n \\hline\n\\end{tabular}\n\\end{table} \n\n\\begin{table}[htpb]\n \\centering\n \\caption{CCC score results after late fusion using an SVM on the MSPIN-LOSO test set.}\n \\label{tab:svm-mspin-loso}\n \\begin{tabular}{l c c c c}\n \\hline \n Inputs & V & A & D & Mean \\\\\n \\hline\n LLD\t+ WE\t & 0.204 & 0.485\t& 0.387 & 0.358 \\\\\n LLD\t+ Word2Vec\t& 0.267 & 0.487\t& 0.386 & 0.380 \\\\\n LLD\t+ GloVe\t & 0.269 & 0.482\t& 0.375 & 0.376 \\\\\n HSF1\t+ WE\t & 0.224 & 0.565\t& 0.410 & 0.400 \\\\\n HSF1\t+ Word2Vec\t& 0.286 & 0.558\t& 0.411 & 0.418 \\\\\n HSF1\t+ GloVe\t & 0.282 & 0.555\t& 0.409 & 0.415 \\\\\n HSF2\t+ WE\t & 0.232 & 0.566\t& 0.421 & 0.406 \\\\\n HSF2\t+ Word2Vec\t& 0.287 & 0.562\t& 0.411 & 0.420 \\\\\n HSF2\t+ GloVe\t & 0.291 & 0.570\t& 0.405 & \\textbf{0.422} \\\\ \n \\hline\n\\end{tabular}\n\\end{table} \n\n% todo: explain why we did not use p value to evaluate the difference\nTo evaluate the improvement obtained by SVM-based late fusion, average CCC\nscores again can be used as a single metric. The most right column in the Table\n\\ref{tab:svm-iemocap-sd}, \\ref{tab:svm-mspin-sd}, \\ref{tab:svm-iemocap-loso},\nand \\ref{tab:svm-mspin-loso} shows the average CCC results obtained for the nine\npairs of acoustic and text results on the four different datasets. Comparing\nthese bimodal results to unimodal results (Table \\ref{tab:ser-test} and\n\\ref{tab:ter-test}) shows the difference. All results from SVM improved unimodal\nresutls. In speaker-independent (LOSO) results (which more appropriate for\nreal-life analysis), the scores resulted by pairs of HSF with any word vector\nobtain remarkable improvements, particularly in MSPIN-LOSO dataset. For any\nother pair involving LLDs, the obtained score was also lower as compared to\nother pairs. Considering all low scores involved LLD results, improving the\nperformance of dimensional emotion recognition by using LLDs is more complicated\nthan by using HSF1 and HSF2 apart from the longer training time. The large\nnetwork size created by an LLD input as a result of its much bigger feature\ndimension (e.g., 3409 $\\times$ 23 on IEMOCAP) did not help either the\nsingle-modality or late-fusion performance. In contrast, the small sizes of the\nfunctional features (HSF1 and HSF2) enabled better performance on a single\nmodality, which led to better performance for the late-fusion score. To obtain\nfunctional features, however, a set of LLD features must be obtained first. This\nproblem is a challenging future research direction, especially for implementing\ndimensional emotion recognition with real-time processing. \n\nBesides the fact that a speaker-independent dataset is usually more difficult\nthan a speaker-dependent dataset, the low score on MSPIN-LOSO was due to its low\nscores on a single modality. In another word, lower pair performance from a\nsingle modality will result in low performance in late fusion. In particular,\nthese low results derive from low CCC scores from the text modality (Table\n\\ref{tab:ter-test}). The average CCC score for the text modality on the\nMSPIN-LOSO dataset was less than 0.16, compared an average score higher than\n0.34 for the acoustic modality. All nine pairs in late-fusion approaches\nimproved on the single-modality results because of the two-stage DNN and SVM\nregression. Thus, out of 36 trials (9 pairs $\\times$ 4 datasets), our proposed\ntwo-stage dimensional emotion recognition performs all pairs.\n\nThe low score on MSPIN for the text modality can be tracked to the origin of the\ndataset. Although we tried to remove the \\emph{target sentences} by choosing\nonly the improvised and natural interaction scenarios, the influence of the\ntarget sentence may still have affected the actors. As a result, some utterances\nstill suffered from the target sentences. We confirmed this mismatch by manually\nchecking the provided transcription and our automatic transcription. Instead of\nremoving the mismatched data, we used all data from the improvised and natural\ninteraction scenarios, because they were designed to elicit improvised and\nnatural emotions. A similarly low result on this MSPIN dataset was also shown in\n\\cite{Zhang2019}. Compared to the IEMOCAP dataset, the MSPIN dataset suffers\nfrom low accuracy in recognizing the valance category by using acoustic and\nlexical properties. Those authors also did not show improvement on the IEMOCAP\nscripted dataset, another text-based session in which lexical/text features do\nnot contribute significantly.\n\nTo measure the improvement by our proposed two-stage late fusion, we calculated\nthe relative improvement obtained by late fusion from the highest CCC scores for\na single modality. For example, the pair of LLD + WE used the results from the\nLLDs in the acoustic network and the WE in the text network. We compared the\nresult for LLD + WE with that for the LLDs, as it had a higher score than the WE\ndid. Figure \\ref{fig:relative-improvement} thus shows the relative improvement\nfor all nine pairs. As discussed previously, only 1 of the 36 trials did not\nshow improvement. The other 35 trials (97.2\\%) showed improvements ranging from\n4.5\\% to 40.89\\%. Table \\ref{tab:relative-improvement} lists the statistics for\nthe obtained relative improvement. Our results show higher relative accuracy\nimprovement as compared to those obtained by \\cite{Zhang2019} for valence\nprediction, which ranged from 6\\% to 9\\%. Nevertheless, their multistage fusion\nmethod also showed benefits over the multimodal and single-modality approaches.\nThese findings confirm the benefits of using bimodal/multimodal fusion instead\nof single-modality processing for valence, arousal, and dominance prediction.\n\n\\begin{figure}[htpb]\n\\centering\n\\includegraphics[width=\\textwidth]{../fig/improvement.pdf}\n\\caption{Relative improvement in average CCC scores from late fusion using an \nSVM as compared to the highest average CCC scores from a single modality.}\n\\label{fig:relative-improvement}\n\\end{figure}\n\n\\begin{table}[htpb]\n \\centering\n \\caption{Statistics of relative improvement by late fusion using an SVM as\n compared to the highest scores for a single modality across datasets. The\n scores were extracted from the data shown in Figure\n \\ref{fig:relative-improvement}.}\n \\label{tab:relative-improvement}\n \\begin{tabular}{l c c c c}\n \\hline\n Statistic & IEMOCAP-SD & MSPIN-SD & IEMOCAP-LOSO & MSPIN-LOSO \\\\\n \\hline\n Average\t& 39.73\\%\t& 7.01\\%\t& 34.15\\%\t& 15.23\\% \\\\\n Max\t& 41.45\\%\t& 8.22\\%\t& 40.32\\%\t& 19.93\\% \\\\\n Min\t& 35.80\\%\t& 5.11\\%\t& 29.69\\%\t& 7.64\\% \\\\\n Std\t& 1.90\\%\t& 0.93\\%\t& 3.84\\%\t& 3.90\\% \\\\\n \\hline\n \\end{tabular}\n\\end{table} \n\n% todo: analysis speaker-dependent vs speaker-independent\n\\subsection{Speaker-dependent vs. speaker-independent text emotion recognition}\nWhile speech-based emotion recognition is performed with a fixed random seed to\ngenerate the same result for each run, text-based emotion recognition resulted\nin different scores for each run. This different results on text emotion\nrecognition probably is caused by initiation of weightings on embedding layers.\nIn this case, statistical tests can be performed on text emotion results \nto observe the difference between speaker-dependent and speaker-independent \nscenario.\n\nTable \\ref{tab:test_sd_loso} shows if there is significantly different between\nspeaker-dependent and speaker-independent results on the same feature set. We\nassure $p-$value = 0.05 with two-tail paired t-test between mean scores of\nspeaker-dependent and speaker-independent results. This paired t-test was based\non assumption that there are no outliers (after pre-processing) and two\ndifferent inputs are fed into the same system. Only one result from text emotion\nrecognition shows no significant difference on IEMOCAP dataset while all results\nfrom on MSPIN dataset shows significant difference between speaker-dependent and\nspeaker-independent results. This result reveals that almost there is difference\non evaluating speaker-dependent and speaker-independent data. The results from\nspeaker-dependent data did not represent speaker-independent data. In other words, results from speaker-dependent data cannot be used to justify speaker-independent or whole data.\n\n\\begin{table}\n \\centering\n \\caption{Significant difference between speaker-dependent and speaker-independent scenario on the same text feature set; statistical tests were performed using two-tail paired $t-$test with $p-$value = 0.005}\n \\label{tab:test_sd_loso}\n \\begin{tabular}{l c c}\n \\hline\n Feature & IEMOCAP & MSPIN \\\\\n \\hline\n WE & Yes & Yes \\\\\n Word2Vec & No & Yes \\\\\n GloVe & Yes & Yes \\\\\n \\hline\n \\end{tabular}\n\\end{table}\n\n% In comparing different text features for text emotion recognition, we found \n% significant difference on LOSO scenario on both datasets. In speaker-dependent\n% scenarios, only MSPIN dataset shows significant difference between\n% speaker-dependent and speaker-independent scenarios. This result suggests that \n% evaluating speaker-independent is more relevant since there is difference\n% among emotion predictions resulted by different text features (as a result of \n% speaker individualities). Table \\ref{tab:t_test_features} shows if there is \n% significant difference between different text features's results.\n\n% \\begin{table}\n% \\centering\n% \\caption{Significant difference between text emotion recognition results by different text features; statistical tests were performed using two-tail paired $t-$test with $p-$value = 0.005}\n% \\label{tab:t_test_features}\n% \\begin{tabular}{l l c c}\n% \\hline\n% Dataset\t& Features\t & SD\t& LOSO \\\\\n% \\hline\n% IEMOCAP\t& WE - Word2Vec \t& No\t& No \\\\\n% \t & Word2Vec - GloVe\t& No\t& Yes \\\\\n% \t & GloVe - WE\t & No\t& Yes \\\\\n% MSPIN\t& WE - Word2Vec\t & Yes\t& Yes \\\\\n% \t & Word2Vec - GloVe\t& Yes\t& No \\\\\n% \t & GloVe - WE\t & Yes\t& Yes \\\\\n% \\hline\n% \\end{tabular}\n% \\end{table}\n\n\\subsection{Effect of removing target sentence from MSPIN dataset}\nSince the target of this research is to evaluate contribution of both acoustic\nand linguistic information in affective expressions, it is necessary to have\nsentence in the dataset that free from any stimuli control. However, the\noriginal MSP-IMPROV dataset contains 20 \"target\" sentences; a same sentence that\nis elicited for different emotions. These parts of MSP-IMPROV dataset is\nirrelevant to this study; hence, we remove it from the dataset (\\textit{Target -\nimprovised} and \\textit{Target - read} parts). However, we found that the\nresults shows low CCC scores (Table \\ref{tab:ter-test}) indicating the\ninfluences of target sentence. These results may be explained by the fact that\nalthough the actors were prevented from playing similar improvisation for a\ntarget sentence across emotion, there are still similar improvisations from\ntarget sentence recordings.\n\n\n\n\\subsection{Benchmarking results and final remarks}\nWe tried to use the same datasets and metrics to benchmark our results with\nother results. We used the MSPIN partition to represent MSP-IMPROV, because it\ncontains 84.92\\% of the total MSP-IMPROV dataset. The results of the proposed\ntwo-stage late fusion may be lower on the whole MSP-IMPROV dataset, as it has\nmore target sentences because of the nature of the dataset design. Although we\nused bimodal features from acoustic and text information, we did not limit our\nbenchmarking to a single modality. Any modality among audio, visual, text, and\nother measurements can be compared, because the goal is to benchmark various\nmethods used in dimensional emotion recognition. Unfortunately, only a few\nevaluations using the CCC score have been reported using cross-corpus or\nmixed-corpus of IEMOCAP and MSP-IMPROV. In fact, to the best our knowledge, the\nonly reference reported bimodal emotion recognition from acoustic and text\nfeatures is done by using early-fusion approach \\citep{Atmaja2020d}, in contrast\nof our late-fusion approach. Table \\ref{tab:benchmark} shows a comparison of\ncurrent and previous results on the same data split scenario (IEMOCAP-LOSO).\n\n\\begin{table}\n \\centering\n \\caption{Benchmarking with previous result on the same data split scenario}\n \\label{tab:benchmark}\n \\begin{tabular}{l c c c c}\n \\hline\n Method & V & A & D & Mean \\\\\n \\hline\n Early fusion \\citep{Atmaja2019d}\t& 0.446\t& 0.594\t& 0.485\t& 0.508 \\\\\n Late fusion (ours)\t& 0.553\t& 0.579\t&0.465\t& 0.532 \\\\\n \\hline\n \\end{tabular}\n\\end{table}\n\nClearly, our proposed method with two-state processing outperforms previous\none-stage processing in terms of averaged CCC scores. These high results may\ncome from the similarity between our model with how human fuses multimodal\ninformation. Late-fusion approach, in this case, is more appropriate to fuse\nacoustic and linguistic information than early-fusion approach. This late-fusion\napproach also can be embedded with current speech technology, i.e., automatic\nspeech recognition, in which the text output can be processed to weigh emotion\nprediction from acoustic features.\n\n\\cite{Abdelwahab2018} used MSP-Podcast \\citep{Lotfian2019} as a target corpora,\nwhich is not available for public yet, and IEMOCAP with MSP-IMPROV as source\ncorpus to implement their DANN for cross-corpus speech emotion recognition.\nAlthough the goal is different, we observed similar patterns between their and\nour acoustic-only speech emotion recognition. First, the order of highest to\nlowest CCC scores is arousal, dominance, and valence. This pattern is also\nconsistent when IEMOCAP is mixed with MSP-IMPROV as reported by\n\\cite{parthasarathy2017jointly} (in Table 2). Second, the CCC scores obtained in\nIEMOCAP is higher than obtained in MSP-IMPROV; we believe that this lower score\nin MSP-IMPROV was due to the smaller size of the dataset. \n\nIn addition to our SVM architecture, we also explored the parameters $C$ and\n$\\gamma$, because both parameters are important for an RBF-kernel-based SVM\narchitecture \\citep{scikit-learn}. Linear search was used in the ranges of\n[$10^-2, 1, 10^2, 2 \\times 10^2, 3 \\times 10^2$] for $C$ and [$10-2, 10-1, 1,\n10, 10^2$] for $\\gamma$ with a fixed value of $\\epsilon$, i.e., 0.01. The best\nparameter values were $C=200$ and $\\gamma=0.1$. The repository includes the\ndetailed implementation of the SVM architecture.\n\nWe accomplished the goal of investigating our proposal to apply two-stage\nprocessing by using DNNs and an SVM for dimensional emotion recognition from\nacoustic and text features on four different datasets. We found that the\ncombination of mean + std + silence from the acoustic features and word\nembeddings weighted by pretrained GloVe embeddings achieved the highest result\namong the nine pairs of acoustic-text results from DNNs trained with multitask\nlearning. When the performance in obtaining one input to the SVM is very low,\nthe resulting relative improvement due to the SVM is also low. For instance, the\nlowest improvement on MSPIN-LOSO was from LLD + WE features, in which WE\nobtained low score ($\\hat{CCC}=0.136$) on text network. This phenomenon suggests\na challenging future research direction to accommodate very little information,\nparticularly linguistic information, in the fusion strategy. One strategy\napplied in this research was to use a pretrained GloVe embedding on text\nfeatures with HSF2 on acoustic features, which improved the CCC score from 0.358\n(relative improvement = 7.64\\%) to 0.422 (relative improvement = 19.93\\%). Other\nstrategies should also be proposed, such as how to handle the data differently\nwhen the same sentence elicits different emotions (i.e., whole MSP-IMPROV\ndataset). In contrast, the current word-embedding feature treats the same\nsentence as the same representation, even when it conveys different emotions.\n\n\\section{Conclusions}\nIn conclusion, we can summarize several findings. First, we found a linear\ncorrelation between the single-modality and late-fusion methods in dimensional\nemotion recognition. The best results from each modality, when they were paired,\ngave the best fusion result. In the same way, the worst results obtained from\neach network, when they were paired, gave the worst fusion results for bimodal\nemotion recognition. This finding differs from that reported in\n\\cite{Atmaja2019b}, which used an early-fusion approach for categorical emotion\nrecognition. \n\nSecond, text features strongly influenced the score of dimensional SER on the\nvalence dimension, while acoustic features strongly influenced arousal and\ndominance. Accordingly, the proposed two-stage processing can take advantage of\ntext features, which are commonly used in predicting sentiment (valence) for the\ndimensional emotion recognition task. The proposed fusion method improves all\nthree emotion dimensions without attenuating the performance of any dimension.\nThat is, the proposed method elevates the scores for valence, arousal, and\ndominance subsequently from the highest to the lowest gain.\n\nThird, the combination of input pairs does not matter in the proposed fusion\nmethod, as indicated by the low deviation in relative improvement across the\nnine possible input pairs. What does matter is the performance on the input in\nthe DNN stage. If the performance of a feature set in the DNN stage is low\n($\\overline{CCC} \\leq 0.2$), it will also result in low performance when paired\nwith another low-performance input in the SVM stage. \n\nFourth, regarding the third finding, there may be a tradeoff in the requirement\nfor SVM-based fusion to obtain significant improvement. In detail, a minimum\nperformance in the DNN stage may be required for the SVM to gain significant\nimprovement.\n\nFinally, this bimodal approach can be extended to a multimodal approach. All\nnine feature sets can be combined, or both acoustic and text features can be\ncombined with visual and motion-capture measurements that have advantages in\nspecific emotion dimensions (liking or naturalness). The SVM stage itself can be\nperformed many times to obtain such improvements. These broad research\ndirections are open challenges for researchers in human-computer interaction.\n\n\\section*{References}\n\\bibliography{CSL2020}\n\n\\end{document}\n"
}
}
[Trace - 12:11:52 PM] Sending request 'textDocument/codeAction - (1)'.
Params: {
"textDocument": {
"uri": "file:///media/bagustris/atmaja/s3/mypaper/2020/sc/elsarticle-bagus/sc-bagus.tex"
},
"range": {
"start": {
"line": 912,
"character": 27
},
"end": {
"line": 912,
"character": 27
}
},
"context": {
"diagnostics": []
}
}
[Trace - 12:11:52 PM] Received request 'workspace/configuration - (1)'.
Params: {
"items": [
{
"scopeUri": "file:///media/bagustris/atmaja/s3/mypaper/2020/sc/elsarticle-bagus/sc-bagus.tex",
"section": "ltex"
}
]
}
[Trace - 12:11:52 PM] Sending response 'workspace/configuration - (1)'. Processing request took 5ms
Result: [
{
"enabled": true,
"language": "en-US",
"dictionary": {},
"disabledRules": {},
"enabledRules": {},
"ltex-ls": {
"path": "",
"languageToolHttpServerUri": ""
},
"java": {
"path": "",
"initialHeapSize": 64,
"maximumHeapSize": 512
},
"commands": {
"ignore": [],
"dummy": []
},
"environments": {
"ignore": []
},
"markdown": {
"ignore": [
"CodeBlock",
"FencedCodeBlock",
"IndentedCodeBlock"
],
"dummy": [
"AutoLink",
"Code"
]
},
"ignoreRuleInSentence": [],
"configurationTarget": {
"addToDictionary": "global",
"disableRule": "workspaceFolder",
"ignoreRuleInSentence": "workspaceFolder"
},
"additionalRules": {
"motherTongue": "",
"languageModel": "",
"neuralNetworkModel": "",
"word2VecModel": ""
},
"sentenceCacheSize": 2000,
"diagnosticSeverity": "information",
"trace": {
"server": "verbose"
},
"javaHome": null,
"performance": {
"initialJavaHeapSize": null,
"maximumHeapSize": null,
"sentenceCacheSize": null
},
"ar": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ar-DZ": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ast-ES": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"be-BY": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"br-FR": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ca-ES": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ca-ES-valencia": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"da-DK": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-AT": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-CH": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-DE": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"de-DE-x-simple-language": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"el-GR": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-AU": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-CA": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-GB": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-NZ": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-US": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"en-ZA": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"eo": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"es": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"fa": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"fr": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ga-IE": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"gl-ES": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"it": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ja-JP": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"km-KH": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"nl": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pl-PL": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-AO": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-BR": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-MZ": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"pt-PT": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ro-RO": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ru-RU": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"sk-SK": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"sl-SI": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"sv": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"ta-IN": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"tl-PH": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"uk-UA": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
},
"zh-CN": {
"dictionary": [],
"enabledRules": [],
"disabledRules": []
}
}
]
[Trace - 12:11:52 PM] Received notification 'telemetry/event'.
Params: {
"type": "progress",
"uri": "file:///media/bagustris/atmaja/s3/mypaper/2020/sc/elsarticle-bagus/sc-bagus.tex",
"operation": "checkDocument",
"progress": 0
}
[Trace - 12:11:52 PM] Received response 'textDocument/codeAction - (1)' in 91ms.
Result: []
[Trace - 12:12:26 PM] Received notification 'telemetry/event'.
Params: {
"type": "progress",
"uri": "file:///media/bagustris/atmaja/s3/mypaper/2020/sc/elsarticle-bagus/sc-bagus.tex",
"operation": "checkDocument",
"progress": 1
}
[Trace - 12:12:26 PM] Received notification 'textDocument/publishDiagnostics'.
Params: {
"uri": "file:///media/bagustris/atmaja/s3/mypaper/2020/sc/elsarticle-bagus/sc-bagus.tex",
"diagnostics": [
{
"range": {
"start": {
"line": 34,
"character": 48
},
"end": {
"line": 34,
"character": 58
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 42,
"character": 8
},
"end": {
"line": 42,
"character": 21
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 42,
"character": 21
},
"end": {
"line": 42,
"character": 40
}
},
"severity": 3,
"source": "LTeX - COMMA_PARENTHESIS_WHITESPACE",
"message": "Put a space after the comma"
},
{
"range": {
"start": {
"line": 42,
"character": 42
},
"end": {
"line": 42,
"character": 47
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 42,
"character": 48
},
"end": {
"line": 42,
"character": 52
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 42,
"character": 53
},
"end": {
"line": 42,
"character": 88
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 43,
"character": 9
},
"end": {
"line": 43,
"character": 30
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 45,
"character": 8
},
"end": {
"line": 45,
"character": 26
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 45,
"character": 28
},
"end": {
"line": 45,
"character": 34
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 45,
"character": 35
},
"end": {
"line": 45,
"character": 40
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 47,
"character": 9
},
"end": {
"line": 47,
"character": 22
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 48,
"character": 5
},
"end": {
"line": 48,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 48,
"character": 15
},
"end": {
"line": 48,
"character": 19
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 48,
"character": 21
},
"end": {
"line": 48,
"character": 29
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 49,
"character": 9
},
"end": {
"line": 49,
"character": 27
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 49,
"character": 29
},
"end": {
"line": 49,
"character": 36
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 49,
"character": 37
},
"end": {
"line": 49,
"character": 45
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 49,
"character": 46
},
"end": {
"line": 49,
"character": 54
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 50,
"character": 1
},
"end": {
"line": 50,
"character": 9
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 70,
"character": 0
},
"end": {
"line": 70,
"character": 9
}
},
"severity": 3,
"source": "LTeX - UPPERCASE_SENTENCE_START",
"message": "This sentence does not start with an uppercase letter"
},
{
"range": {
"start": {
"line": 93,
"character": 48
},
"end": {
"line": 93,
"character": 56
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 94,
"character": 47
},
"end": {
"line": 94,
"character": 58
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 112,
"character": 31
},
"end": {
"line": 112,
"character": 40
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 115,
"character": 51
},
"end": {
"line": 115,
"character": 61
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 116,
"character": 61
},
"end": {
"line": 116,
"character": 64
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 119,
"character": 67
},
"end": {
"line": 119,
"character": 70
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 150,
"character": 6
},
"end": {
"line": 150,
"character": 10
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 150,
"character": 57
},
"end": {
"line": 150,
"character": 65
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 160,
"character": 48
},
"end": {
"line": 160,
"character": 49
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart opening quote here: '“'."
},
{
"range": {
"start": {
"line": 160,
"character": 61
},
"end": {
"line": 160,
"character": 62
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart closing quote here: '”'."
},
{
"range": {
"start": {
"line": 162,
"character": 30
},
"end": {
"line": 162,
"character": 31
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart opening quote here: '“'."
},
{
"range": {
"start": {
"line": 162,
"character": 52
},
"end": {
"line": 162,
"character": 53
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart closing quote here: '”'."
},
{
"range": {
"start": {
"line": 163,
"character": 40
},
"end": {
"line": 163,
"character": 41
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart opening quote here: '“'."
},
{
"range": {
"start": {
"line": 163,
"character": 78
},
"end": {
"line": 163,
"character": 79
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart closing quote here: '”'."
},
{
"range": {
"start": {
"line": 164,
"character": 44
},
"end": {
"line": 164,
"character": 45
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart opening quote here: '“'."
},
{
"range": {
"start": {
"line": 164,
"character": 67
},
"end": {
"line": 164,
"character": 68
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart closing quote here: '”'."
},
{
"range": {
"start": {
"line": 165,
"character": 44
},
"end": {
"line": 165,
"character": 45
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart opening quote here: '“'."
},
{
"range": {
"start": {
"line": 165,
"character": 56
},
"end": {
"line": 165,
"character": 57
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart closing quote here: '”'."
},
{
"range": {
"start": {
"line": 172,
"character": 56
},
"end": {
"line": 172,
"character": 63
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 172,
"character": 65
},
"end": {
"line": 172,
"character": 75
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 173,
"character": 0
},
"end": {
"line": 173,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 176,
"character": 25
},
"end": {
"line": 176,
"character": 32
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 176,
"character": 34
},
"end": {
"line": 176,
"character": 44
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 204,
"character": 10
},
"end": {
"line": 204,
"character": 17
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 204,
"character": 27
},
"end": {
"line": 204,
"character": 40
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 204,
"character": 45
},
"end": {
"line": 204,
"character": 50
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 212,
"character": 74
},
"end": {
"line": 212,
"character": 78
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 214,
"character": 15
},
"end": {
"line": 214,
"character": 19
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 215,
"character": 25
},
"end": {
"line": 215,
"character": 28
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 217,
"character": 10
},
"end": {
"line": 217,
"character": 14
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 221,
"character": 0
},
"end": {
"line": 221,
"character": 11
}
},
"severity": 3,
"source": "LTeX - SOME_OF_THE",
"message": "If the text is a generality, 'of the' is not necessary. Did you mean 'some'?"
},
{
"range": {
"start": {
"line": 224,
"character": 66
},
"end": {
"line": 224,
"character": 69
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 254,
"character": 36
},
"end": {
"line": 254,
"character": 39
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 255,
"character": 59
},
"end": {
"line": 255,
"character": 69
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 261,
"character": 26
},
"end": {
"line": 261,
"character": 33
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 261,
"character": 35
},
"end": {
"line": 261,
"character": 40
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 265,
"character": 51
},
"end": {
"line": 265,
"character": 56
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 279,
"character": 33
},
"end": {
"line": 279,
"character": 36
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 281,
"character": 0
},
"end": {
"line": 281,
"character": 3
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 282,
"character": 30
},
"end": {
"line": 282,
"character": 33
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 305,
"character": 66
},
"end": {
"line": 305,
"character": 70
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 306,
"character": 12
},
"end": {
"line": 306,
"character": 21
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 318,
"character": 35
},
"end": {
"line": 318,
"character": 50
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 320,
"character": 0
},
"end": {
"line": 320,
"character": 3
}
},
"severity": 3,
"source": "LTeX - ENGLISH_WORD_REPEAT_BEGINNING_RULE",
"message": "Three successive sentences begin with the same word. Consider rewording the sentence or use a thesaurus to find a synonym."
},
{
"range": {
"start": {
"line": 323,
"character": 5
},
"end": {
"line": 323,
"character": 20
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 327,
"character": 24
},
"end": {
"line": 327,
"character": 31
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 334,
"character": 34
},
"end": {
"line": 334,
"character": 49
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 334,
"character": 51
},
"end": {
"line": 334,
"character": 55
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 341,
"character": 0
},
"end": {
"line": 341,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 341,
"character": 36
},
"end": {
"line": 341,
"character": 40
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 341,
"character": 56
},
"end": {
"line": 341,
"character": 69
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 341,
"character": 70
},
"end": {
"line": 341,
"character": 78
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 342,
"character": 14
},
"end": {
"line": 342,
"character": 19
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 343,
"character": 35
},
"end": {
"line": 343,
"character": 39
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 343,
"character": 73
},
"end": {
"line": 343,
"character": 77
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 346,
"character": 63
},
"end": {
"line": 346,
"character": 67
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 352,
"character": 46
},
"end": {
"line": 352,
"character": 53
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 352,
"character": 58
},
"end": {
"line": 352,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 356,
"character": 44
},
"end": {
"line": 356,
"character": 51
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 356,
"character": 56
},
"end": {
"line": 356,
"character": 66
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 358,
"character": 0
},
"end": {
"line": 358,
"character": 10
}
},
"severity": 3,
"source": "LTeX - ALL_OF_THE",
"message": "Consider using 'all the'."
},
{
"range": {
"start": {
"line": 365,
"character": 40
},
"end": {
"line": 365,
"character": 51
}
},
"severity": 3,
"source": "LTeX - SOME_OF_THE",
"message": "If the text is a generality, 'of the' is not necessary. Did you mean 'some'?"
},
{
"range": {
"start": {
"line": 392,
"character": 13
},
"end": {
"line": 393,
"character": 7
}
},
"severity": 3,
"source": "LTeX - ENGLISH_WORD_REPEAT_RULE",
"message": "Possible typo: you repeated a word"
},
{
"range": {
"start": {
"line": 399,
"character": 0
},
"end": {
"line": 399,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 407,
"character": 37
},
"end": {
"line": 407,
"character": 40
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 409,
"character": 24
},
"end": {
"line": 409,
"character": 31
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 412,
"character": 44
},
"end": {
"line": 412,
"character": 47
}
},
"severity": 3,
"source": "LTeX - ENGLISH_WORD_REPEAT_BEGINNING_RULE",
"message": "Three successive sentences begin with the same word. Consider rewording the sentence or use a thesaurus to find a synonym."
},
{
"range": {
"start": {
"line": 416,
"character": 13
},
"end": {
"line": 417,
"character": 10
}
},
"severity": 3,
"source": "LTeX - ENGLISH_WORD_REPEAT_RULE",
"message": "Possible typo: you repeated a word"
},
{
"range": {
"start": {
"line": 417,
"character": 59
},
"end": {
"line": 417,
"character": 62
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 422,
"character": 24
},
"end": {
"line": 422,
"character": 31
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 424,
"character": 12
},
"end": {
"line": 424,
"character": 19
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 426,
"character": 38
},
"end": {
"line": 426,
"character": 42
}
},
"severity": 3,
"source": "LTeX - SENT_START_CONJUNCTIVE_LINKING_ADVERB_COMMA",
"message": "Did you forget a comma after a conjunctive/linking adverb?"
},
{
"range": {
"start": {
"line": 426,
"character": 51
},
"end": {
"line": 426,
"character": 58
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 430,
"character": 4
},
"end": {
"line": 430,
"character": 14
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 438,
"character": 49
},
"end": {
"line": 438,
"character": 59
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 438,
"character": 71
},
"end": {
"line": 438,
"character": 76
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 439,
"character": 1
},
"end": {
"line": 439,
"character": 4
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 439,
"character": 45
},
"end": {
"line": 439,
"character": 50
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 441,
"character": 58
},
"end": {
"line": 441,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 448,
"character": 25
},
"end": {
"line": 448,
"character": 29
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 448,
"character": 70
},
"end": {
"line": 448,
"character": 77
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 450,
"character": 60
},
"end": {
"line": 450,
"character": 65
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 454,
"character": 0
},
"end": {
"line": 454,
"character": 4
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 460,
"character": 72
},
"end": {
"line": 460,
"character": 76
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 467,
"character": 10
},
"end": {
"line": 467,
"character": 14
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 481,
"character": 44
},
"end": {
"line": 481,
"character": 50
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 482,
"character": 0
},
"end": {
"line": 482,
"character": 6
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 482,
"character": 36
},
"end": {
"line": 482,
"character": 42
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 485,
"character": 58
},
"end": {
"line": 485,
"character": 63
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 489,
"character": 59
},
"end": {
"line": 489,
"character": 65
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 492,
"character": 13
},
"end": {
"line": 492,
"character": 17
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 492,
"character": 58
},
"end": {
"line": 492,
"character": 64
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 493,
"character": 0
},
"end": {
"line": 493,
"character": 8
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 493,
"character": 71
},
"end": {
"line": 493,
"character": 77
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 494,
"character": 0
},
"end": {
"line": 494,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 494,
"character": 56
},
"end": {
"line": 494,
"character": 60
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 497,
"character": 28
},
"end": {
"line": 497,
"character": 35
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 498,
"character": 13
},
"end": {
"line": 498,
"character": 17
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 498,
"character": 33
},
"end": {
"line": 498,
"character": 37
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 498,
"character": 71
},
"end": {
"line": 498,
"character": 78
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 501,
"character": 34
},
"end": {
"line": 501,
"character": 38
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 502,
"character": 0
},
"end": {
"line": 502,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 504,
"character": 47
},
"end": {
"line": 504,
"character": 51
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 506,
"character": 20
},
"end": {
"line": 506,
"character": 22
}
},
"severity": 3,
"source": "LTeX - ENGLISH_WORD_REPEAT_BEGINNING_RULE",
"message": "Three successive sentences begin with the same word. Consider rewording the sentence or use a thesaurus to find a synonym."
},
{
"range": {
"start": {
"line": 513,
"character": 28
},
"end": {
"line": 513,
"character": 30
}
},
"severity": 3,
"source": "LTeX - PUNCTUATION_PARAGRAPH_END",
"message": "Please add a punctuation mark at the end of paragraph"
},
{
"range": {
"start": {
"line": 519,
"character": 227
},
"end": {
"line": 519,
"character": 243
}
},
"severity": 3,
"source": "LTeX - EN_COMPOUNDS",
"message": "This word is normally spelled with hyphen."
},
{
"range": {
"start": {
"line": 530,
"character": 39
},
"end": {
"line": 530,
"character": 49
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 532,
"character": 0
},
"end": {
"line": 532,
"character": 10
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 539,
"character": 49
},
"end": {
"line": 539,
"character": 55
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 542,
"character": 1
},
"end": {
"line": 542,
"character": 5
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 544,
"character": 1
},
"end": {
"line": 544,
"character": 9
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 544,
"character": 24
},
"end": {
"line": 544,
"character": 34
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 545,
"character": 31
},
"end": {
"line": 545,
"character": 36
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 545,
"character": 42
},
"end": {
"line": 545,
"character": 48
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 548,
"character": 3
},
"end": {
"line": 548,
"character": 7
}
},
"severity": 3,
"source": "LTeX - UPPERCASE_SENTENCE_START",
"message": "This sentence does not start with an uppercase letter"
},
{
"range": {
"start": {
"line": 548,
"character": 12
},
"end": {
"line": 548,
"character": 16
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 548,
"character": 42
},
"end": {
"line": 548,
"character": 46
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 550,
"character": 10
},
"end": {
"line": 550,
"character": 14
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 550,
"character": 40
},
"end": {
"line": 550,
"character": 44
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 550,
"character": 47
},
"end": {
"line": 550,
"character": 54
}
},
"severity": 3,
"source": "LTeX - PUNCTUATION_PARAGRAPH_END",
"message": "Please add a punctuation mark at the end of paragraph"
},
{
"range": {
"start": {
"line": 576,
"character": 10
},
"end": {
"line": 576,
"character": 17
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 576,
"character": 22
},
"end": {
"line": 576,
"character": 27
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 578,
"character": 55
},
"end": {
"line": 578,
"character": 60
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 581,
"character": 4
},
"end": {
"line": 581,
"character": 8
}
},
"severity": 3,
"source": "LTeX - DT_PRP",
"message": "Possible typo. Did you mean 'a' or 'WE'?"
},
{
"range": {
"start": {
"line": 581,
"character": 58
},
"end": {
"line": 581,
"character": 62
}
},
"severity": 3,
"source": "LTeX - DT_PRP",
"message": "Possible typo. Did you mean 'a' or 'WE'?"
},
{
"range": {
"start": {
"line": 582,
"character": 11
},
"end": {
"line": 582,
"character": 16
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 583,
"character": 11
},
"end": {
"line": 583,
"character": 26
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 584,
"character": 17
},
"end": {
"line": 584,
"character": 22
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 584,
"character": 26
},
"end": {
"line": 584,
"character": 27
}
},
"severity": 3,
"source": "LTeX - EN_A_VS_AN",
"message": "Use 'an' instead of 'a' if the following word starts with a vowel sound, e.g. 'an article', 'an hour'"
},
{
"range": {
"start": {
"line": 599,
"character": 0
},
"end": {
"line": 599,
"character": 7
}
},
"severity": 3,
"source": "LTeX - CD_NN",
"message": "Possible agreement error. The noun 'feature' seems to be countable, so consider using: 'features'."
},
{
"range": {
"start": {
"line": 599,
"character": 14
},
"end": {
"line": 599,
"character": 18
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 604,
"character": 4
},
"end": {
"line": 604,
"character": 11
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 604,
"character": 37
},
"end": {
"line": 604,
"character": 42
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 605,
"character": 30
},
"end": {
"line": 605,
"character": 37
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 605,
"character": 63
},
"end": {
"line": 605,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 606,
"character": 28
},
"end": {
"line": 606,
"character": 37
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 608,
"character": 76
},
"end": {
"line": 608,
"character": 80
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 611,
"character": 13
},
"end": {
"line": 611,
"character": 17
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 613,
"character": 11
},
"end": {
"line": 613,
"character": 15
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 614,
"character": 38
},
"end": {
"line": 614,
"character": 42
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 621,
"character": 30
},
"end": {
"line": 621,
"character": 34
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 623,
"character": 48
},
"end": {
"line": 623,
"character": 52
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 625,
"character": 21
},
"end": {
"line": 625,
"character": 25
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 635,
"character": 63
},
"end": {
"line": 635,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 636,
"character": 64
},
"end": {
"line": 636,
"character": 70
}
},
"severity": 3,
"source": "LTeX - DT_PRP",
"message": "Possible typo. Did you mean 'The' or 'WE'?"
},
{
"range": {
"start": {
"line": 637,
"character": 44
},
"end": {
"line": 637,
"character": 49
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 638,
"character": 26
},
"end": {
"line": 638,
"character": 29
}
},
"severity": 3,
"source": "LTeX - ENGLISH_WORD_REPEAT_BEGINNING_RULE",
"message": "Three successive sentences begin with the same word. Consider rewording the sentence or use a thesaurus to find a synonym."
},
{
"range": {
"start": {
"line": 638,
"character": 54
},
"end": {
"line": 638,
"character": 61
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 639,
"character": 29
},
"end": {
"line": 639,
"character": 34
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 640,
"character": 29
},
"end": {
"line": 640,
"character": 33
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 640,
"character": 66
},
"end": {
"line": 640,
"character": 73
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 641,
"character": 21
},
"end": {
"line": 641,
"character": 26
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 642,
"character": 6
},
"end": {
"line": 642,
"character": 10
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 642,
"character": 71
},
"end": {
"line": 642,
"character": 75
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 644,
"character": 16
},
"end": {
"line": 644,
"character": 20
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 646,
"character": 0
},
"end": {
"line": 646,
"character": 11
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 661,
"character": 1
},
"end": {
"line": 661,
"character": 5
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 674,
"character": 42
},
"end": {
"line": 674,
"character": 46
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 686,
"character": 5
},
"end": {
"line": 686,
"character": 8
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 686,
"character": 41
},
"end": {
"line": 686,
"character": 51
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 686,
"character": 64
},
"end": {
"line": 686,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 687,
"character": 67
},
"end": {
"line": 687,
"character": 77
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 688,
"character": 0
},
"end": {
"line": 688,
"character": 12
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 688,
"character": 14
},
"end": {
"line": 688,
"character": 22
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 690,
"character": 68
},
"end": {
"line": 690,
"character": 72
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 697,
"character": 55
},
"end": {
"line": 697,
"character": 59
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 715,
"character": 21
},
"end": {
"line": 715,
"character": 23
}
},
"severity": 3,
"source": "LTeX - PUNCTUATION_PARAGRAPH_END",
"message": "Please add a punctuation mark at the end of paragraph"
},
{
"range": {
"start": {
"line": 728,
"character": 73
},
"end": {
"line": 728,
"character": 77
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 729,
"character": 46
},
"end": {
"line": 729,
"character": 48
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 729,
"character": 69
},
"end": {
"line": 729,
"character": 71
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 737,
"character": 50
},
"end": {
"line": 737,
"character": 53
}
},
"severity": 3,
"source": "LTeX - ENGLISH_WORD_REPEAT_BEGINNING_RULE",
"message": "Three successive sentences begin with the same word. Consider rewording the sentence or use a thesaurus to find a synonym."
},
{
"range": {
"start": {
"line": 737,
"character": 54
},
"end": {
"line": 737,
"character": 57
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 738,
"character": 6
},
"end": {
"line": 738,
"character": 11
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 738,
"character": 43
},
"end": {
"line": 738,
"character": 53
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 739,
"character": 33
},
"end": {
"line": 739,
"character": 45
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 746,
"character": 15
},
"end": {
"line": 746,
"character": 23
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 756,
"character": 39
},
"end": {
"line": 756,
"character": 47
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 764,
"character": 4
},
"end": {
"line": 764,
"character": 16
}
},
"severity": 3,
"source": "LTeX - EXCEPTION_OF_TO",
"message": "The usual collocation for \"exception\" in this context is \"to\" not \"of\". Did you mean 'exception to'?"
},
{
"range": {
"start": {
"line": 764,
"character": 47
},
"end": {
"line": 764,
"character": 55
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 765,
"character": 48
},
"end": {
"line": 765,
"character": 51
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 770,
"character": 34
},
"end": {
"line": 770,
"character": 38
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 770,
"character": 58
},
"end": {
"line": 770,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 772,
"character": 56
},
"end": {
"line": 772,
"character": 59
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 774,
"character": 26
},
"end": {
"line": 774,
"character": 30
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 774,
"character": 41
},
"end": {
"line": 774,
"character": 51
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 775,
"character": 63
},
"end": {
"line": 775,
"character": 64
}
},
"severity": 3,
"source": "LTeX - EN_UNPAIRED_BRACKETS",
"message": "Unpaired symbol: ')' seems to be missing"
},
{
"range": {
"start": {
"line": 780,
"character": 60
},
"end": {
"line": 780,
"character": 64
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 781,
"character": 22
},
"end": {
"line": 781,
"character": 28
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 781,
"character": 64
},
"end": {
"line": 781,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 788,
"character": 20
},
"end": {
"line": 788,
"character": 30
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 793,
"character": 20
},
"end": {
"line": 793,
"character": 28
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 798,
"character": 20
},
"end": {
"line": 798,
"character": 32
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 803,
"character": 20
},
"end": {
"line": 803,
"character": 30
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 816,
"character": 29
},
"end": {
"line": 816,
"character": 34
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 823,
"character": 20
},
"end": {
"line": 823,
"character": 30
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 826,
"character": 0
},
"end": {
"line": 826,
"character": 5
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 829,
"character": 20
},
"end": {
"line": 829,
"character": 28
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 832,
"character": 0
},
"end": {
"line": 832,
"character": 5
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 834,
"character": 20
},
"end": {
"line": 834,
"character": 32
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 837,
"character": 0
},
"end": {
"line": 837,
"character": 5
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 839,
"character": 20
},
"end": {
"line": 839,
"character": 30
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 842,
"character": 0
},
"end": {
"line": 842,
"character": 5
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 850,
"character": 70
},
"end": {
"line": 850,
"character": 75
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 851,
"character": 62
},
"end": {
"line": 851,
"character": 72
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 864,
"character": 1
},
"end": {
"line": 864,
"character": 8
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 866,
"character": 1
},
"end": {
"line": 866,
"character": 11
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 872,
"character": 42
},
"end": {
"line": 872,
"character": 49
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 875,
"character": 46
},
"end": {
"line": 875,
"character": 51
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 877,
"character": 40
},
"end": {
"line": 877,
"character": 44
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 879,
"character": 9
},
"end": {
"line": 879,
"character": 14
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 889,
"character": 18
},
"end": {
"line": 889,
"character": 28
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 889,
"character": 30
},
"end": {
"line": 889,
"character": 38
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 889,
"character": 40
},
"end": {
"line": 889,
"character": 52
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 889,
"character": 54
},
"end": {
"line": 889,
"character": 64
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 898,
"character": 27
},
"end": {
"line": 898,
"character": 34
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 898,
"character": 44
},
"end": {
"line": 898,
"character": 56
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 899,
"character": 17
},
"end": {
"line": 899,
"character": 22
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 901,
"character": 64
},
"end": {
"line": 901,
"character": 69
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 903,
"character": 66
},
"end": {
"line": 903,
"character": 71
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 910,
"character": 66
},
"end": {
"line": 910,
"character": 76
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 918,
"character": 7
},
"end": {
"line": 918,
"character": 12
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 921,
"character": 8
},
"end": {
"line": 921,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 924,
"character": 8
},
"end": {
"line": 924,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 931,
"character": 66
},
"end": {
"line": 931,
"character": 74
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 939,
"character": 10
},
"end": {
"line": 939,
"character": 15
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 942,
"character": 7
},
"end": {
"line": 942,
"character": 12
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 945,
"character": 7
},
"end": {
"line": 945,
"character": 12
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 952,
"character": 66
},
"end": {
"line": 952,
"character": 78
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 960,
"character": 10
},
"end": {
"line": 960,
"character": 15
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 963,
"character": 7
},
"end": {
"line": 963,
"character": 12
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 966,
"character": 7
},
"end": {
"line": 966,
"character": 12
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 973,
"character": 66
},
"end": {
"line": 973,
"character": 76
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 981,
"character": 7
},
"end": {
"line": 981,
"character": 12
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 984,
"character": 8
},
"end": {
"line": 984,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 987,
"character": 8
},
"end": {
"line": 987,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 998,
"character": 25
},
"end": {
"line": 998,
"character": 33
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 999,
"character": 72
},
"end": {
"line": 999,
"character": 80
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1000,
"character": 0
},
"end": {
"line": 1000,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1000,
"character": 33
},
"end": {
"line": 1000,
"character": 37
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1002,
"character": 48
},
"end": {
"line": 1002,
"character": 58
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1003,
"character": 21
},
"end": {
"line": 1003,
"character": 25
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1003,
"character": 61
},
"end": {
"line": 1003,
"character": 63
}
},
"severity": 3,
"source": "LTeX - COMP_THAN",
"message": "Comparison requires 'than', not 'then' nor 'as'."
},
{
"range": {
"start": {
"line": 1005,
"character": 56
},
"end": {
"line": 1005,
"character": 60
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1008,
"character": 37
},
"end": {
"line": 1008,
"character": 44
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1017,
"character": 51
},
"end": {
"line": 1017,
"character": 61
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1022,
"character": 0
},
"end": {
"line": 1022,
"character": 10
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1024,
"character": 65
},
"end": {
"line": 1024,
"character": 68
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1028,
"character": 17
},
"end": {
"line": 1028,
"character": 22
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1036,
"character": 49
},
"end": {
"line": 1036,
"character": 54
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1037,
"character": 34
},
"end": {
"line": 1037,
"character": 41
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1037,
"character": 55
},
"end": {
"line": 1037,
"character": 60
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1039,
"character": 71
},
"end": {
"line": 1039,
"character": 78
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1046,
"character": 0
},
"end": {
"line": 1046,
"character": 4
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1046,
"character": 33
},
"end": {
"line": 1046,
"character": 39
}
},
"severity": 3,
"source": "LTeX - DT_PRP",
"message": "Possible typo. Did you mean 'the' or 'WE'?"
},
{
"range": {
"start": {
"line": 1047,
"character": 38
},
"end": {
"line": 1047,
"character": 42
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1047,
"character": 74
},
"end": {
"line": 1047,
"character": 80
}
},
"severity": 3,
"source": "LTeX - DT_PRP",
"message": "Possible typo. Did you mean 'the' or 'WE'?"
},
{
"range": {
"start": {
"line": 1076,
"character": 13
},
"end": {
"line": 1076,
"character": 23
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1076,
"character": 26
},
"end": {
"line": 1076,
"character": 34
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1076,
"character": 37
},
"end": {
"line": 1076,
"character": 49
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1076,
"character": 52
},
"end": {
"line": 1076,
"character": 62
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1100,
"character": 48
},
"end": {
"line": 1100,
"character": 62
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1102,
"character": 47
},
"end": {
"line": 1102,
"character": 54
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1103,
"character": 8
},
"end": {
"line": 1103,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1114,
"character": 24
},
"end": {
"line": 1114,
"character": 31
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1114,
"character": 38
},
"end": {
"line": 1114,
"character": 43
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1118,
"character": 8
},
"end": {
"line": 1118,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1150,
"character": 52
},
"end": {
"line": 1150,
"character": 57
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1154,
"character": 9
},
"end": {
"line": 1154,
"character": 19
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1154,
"character": 40
},
"end": {
"line": 1154,
"character": 41
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart opening quote here: '“'."
},
{
"range": {
"start": {
"line": 1154,
"character": 47
},
"end": {
"line": 1154,
"character": 48
}
},
"severity": 3,
"source": "LTeX - EN_QUOTES",
"message": "Use a smart closing quote here: '”'."
},
{
"range": {
"start": {
"line": 1155,
"character": 51
},
"end": {
"line": 1155,
"character": 61
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1156,
"character": 79
},
"end": {
"line": 1156,
"character": 80
}
},
"severity": 3,
"source": "LTeX - DASH_RULE",
"message": "Consider using an m-dash if you do not want to join two words."
},
{
"range": {
"start": {
"line": 1157,
"character": 31
},
"end": {
"line": 1157,
"character": 32
}
},
"severity": 3,
"source": "LTeX - DASH_RULE",
"message": "Consider using an m-dash if you do not want to join two words."
},
{
"range": {
"start": {
"line": 1168,
"character": 27
},
"end": {
"line": 1168,
"character": 32
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1168,
"character": 56
},
"end": {
"line": 1168,
"character": 66
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1169,
"character": 30
},
"end": {
"line": 1169,
"character": 40
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1170,
"character": 48
},
"end": {
"line": 1170,
"character": 58
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1177,
"character": 16
},
"end": {
"line": 1177,
"character": 23
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1177,
"character": 28
},
"end": {
"line": 1177,
"character": 38
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1181,
"character": 62
},
"end": {
"line": 1181,
"character": 74
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1206,
"character": 27
},
"end": {
"line": 1206,
"character": 38
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1207,
"character": 43
},
"end": {
"line": 1207,
"character": 50
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1207,
"character": 56
},
"end": {
"line": 1207,
"character": 66
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1208,
"character": 26
},
"end": {
"line": 1208,
"character": 30
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1211,
"character": 0
},
"end": {
"line": 1211,
"character": 6
}
},
"severity": 3,
"source": "LTeX - THE_SUPERLATIVE",
"message": "A determiner is probably missing here: 'the lowest'."
},
{
"range": {
"start": {
"line": 1212,
"character": 16
},
"end": {
"line": 1212,
"character": 23
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1212,
"character": 38
},
"end": {
"line": 1212,
"character": 48
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1214,
"character": 0
},
"end": {
"line": 1214,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1214,
"character": 35
},
"end": {
"line": 1214,
"character": 45
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1215,
"character": 3
},
"end": {
"line": 1215,
"character": 13
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1226,
"character": 20
},
"end": {
"line": 1226,
"character": 24
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1229,
"character": 34
},
"end": {
"line": 1229,
"character": 39
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1230,
"character": 51
},
"end": {
"line": 1230,
"character": 55
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1233,
"character": 22
},
"end": {
"line": 1233,
"character": 32
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1233,
"character": 51
},
"end": {
"line": 1233,
"character": 59
}
},
"severity": 3,
"source": "LTeX - NON3PRS_VERB",
"message": "The pronoun 'WE' must be used with a non-third-person form of a verb: 'feature'"
},
{
"range": {
"start": {
"line": 1237,
"character": 49
},
"end": {
"line": 1237,
"character": 54
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1241,
"character": 63
},
"end": {
"line": 1241,
"character": 73
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1267,
"character": 4
},
"end": {
"line": 1267,
"character": 7
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1267,
"character": 58
},
"end": {
"line": 1267,
"character": 61
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1271,
"character": 52
},
"end": {
"line": 1271,
"character": 60
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
},
{
"range": {
"start": {
"line": 1273,
"character": 19
},
"end": {
"line": 1273,
"character": 22
}
},
"severity": 3,
"source": "LTeX - MORFOLOGIK_RULE_EN_US",
"message": "Possible spelling mistake found."
}
]
}
[Trace - 12:12:26 PM] Sending request 'textDocument/codeAction - (2)'.
Params: {
"textDocument": {
"uri": "file:///media/bagustris/atmaja/s3/mypaper/2020/sc/elsarticle-bagus/sc-bagus.tex"
},
"range": {
"start": {
"line": 912,
"character": 27
},
"end": {
"line": 912,
"character": 27
}
},
"context": {
"diagnostics": []
}
}
[Trace - 12:12:26 PM] Received response 'textDocument/codeAction - (2)' in 2ms.
Result: []