From 5845b2cd572baeee26127a167d2b15348ae83757 Mon Sep 17 00:00:00 2001 From: Tristan Naumann Date: Wed, 27 Nov 2013 13:34:25 -0500 Subject: [PATCH 001/393] Adding a handful of comments and todos. --- code/evaluate.py | 1 + code/helper.py | 14 ++++++------ code/libml.py | 8 +++++++ code/model.py | 6 ++++++ code/note.py | 56 +++++++++++++++++++++++++++++++++++++++++++++++- code/runall.py | 1 + code/train.py | 11 +++++++++- 7 files changed, 89 insertions(+), 8 deletions(-) diff --git a/code/evaluate.py b/code/evaluate.py index 2a18590..3911d8b 100644 --- a/code/evaluate.py +++ b/code/evaluate.py @@ -1,3 +1,4 @@ +"""Evaluate predictions against a gold standard.""" import os import os.path import sys diff --git a/code/helper.py b/code/helper.py index 3b7b2dd..ae689e0 100644 --- a/code/helper.py +++ b/code/helper.py @@ -1,17 +1,19 @@ - +"""Utility methods.""" import os import os.path import errno def map_files(files): - output = {} - for f in files: - basename = os.path.splitext(os.path.basename(f))[0] - output[basename] = f - return output + """Maps a list of files to basename -> path.""" + output = {} + for f in files: + basename = os.path.splitext(os.path.basename(f))[0] + output[basename] = f + return output def mkpath(path): + """Alias for mkdir -p.""" try: os.makedirs(path) except OSError as exc: diff --git a/code/libml.py b/code/libml.py index c1963b4..01945d3 100644 --- a/code/libml.py +++ b/code/libml.py @@ -1,3 +1,11 @@ +"""Interface to ML libraries -- libsvm, liblinear, and crfsuite. + +Much of this code performs parameter selection a la libsvm's easy.py. However, +the remainder provides an interface for the ML libraries so that the can be +called using a single function within the remainder of the code. +""" +# TODO: this code should leverage bindings rather than fork processes +# TODO: parallelization at a high level could be obtained with task abstraction import multiprocessing import os diff --git a/code/model.py b/code/model.py index 814f08d..a18c87c 100644 --- a/code/model.py +++ b/code/model.py @@ -1,3 +1,9 @@ +"""Contains Model class and all feature generation methods. +""" +# TODO: feature generation should be separated into a separate file. +# TODO: implement feature extraction using scikit-learn +# TODO: make model serializable so that reading/writing is trivial + from __future__ import with_statement import time diff --git a/code/note.py b/code/note.py index 64eabb3..a821317 100644 --- a/code/note.py +++ b/code/note.py @@ -1,12 +1,38 @@ +"""Internal note representation. + +Note class reads in i2b2 formatted .txt and .con files and aligns them. After- +ward they can be accessed by iterating over the class. + +Three auxiliary methods, read_txt(), read_con(), and write_con() streamline +help manipulate such files as well. +""" +# TODO: clean up interface +# TODO: add support for non-i2b2 formats + from __future__ import with_statement class Note: + """Note representation. + + Notes are represented as an iterable of tuples (word, concept). + """ + # TODO: more efficient internal representation + def __init__(self, txt, con=None): + """Read in the note from file(s). + + Args: + txt: File containing i2b2-formatted text. + con: File containing i2b2-formatted concepts. + """ + + # Read in list of tokens, each with "none" concept self.sents = [] with open(txt) as f: for line in f: self.sents.append([[w, "none"] for w in line.split()]) - + + # Assign appropriate concept if a concept file was given if con: with open(con) as f: for line in f: @@ -24,9 +50,20 @@ def __init__(self, txt, con=None): self.sents[l][i][1] = t def __iter__(self): + """Yield the (word, concept) pairs.""" return iter(self.sents) def read_txt(txt): + """Get a list of words from an i2b2-formatted text file. + + Ags: + txt: The i2b2-formatted text file. + + Returns: + A list of the words in the text file. + """ + # TODO: this method is misleading, should include concept for consistency + note = [] with open(txt) as f: for line in f: @@ -34,6 +71,17 @@ def read_txt(txt): return note def read_con(con, txt): + """Get a list of (word, concept) pairs from a txt and con file. + + Args: + con: File containing the i2b2-formatted concepts. + txt: File containing the i2b2-formatted text. + + Returns: + A list of (word, concept) pairs. + """ + # TODO: this method obviates the Note class, should complement + label = [['none'] * len(line) for line in txt] with open(con) as f: for line in f: @@ -52,6 +100,12 @@ def read_con(con, txt): return label def write_con(con, data, labels): + """Writes concept file. + + From data and labels, this create a .con file as output. + """ + # TODO: method should call a serialization primitive of Note class + with open(con, 'w') as f: for i, tmp in enumerate(zip(data, labels)): datum, label = tmp diff --git a/code/runall.py b/code/runall.py index e38c34d..dd32a90 100644 --- a/code/runall.py +++ b/code/runall.py @@ -1,3 +1,4 @@ +"""Run a full grid of experiments.""" import itertools import os diff --git a/code/train.py b/code/train.py index 3f6c81f..d4f547e 100644 --- a/code/train.py +++ b/code/train.py @@ -1,3 +1,11 @@ +"""Train a model on supplied data. + +It is generally necessary to specify the following parameters: + -t: the files containing the training text, and + -c: the files containing the training concepts. +However, the remainder of the options supply information about how to execute +(e.g. which ML methods to use, where to put the models, &c.) +""" import os import os.path import sys @@ -72,11 +80,12 @@ def main(): txt_files_map = helper.map_files(txt_files) con_files_map = helper.map_files(con_files) + # line up .txt with .con files for k in txt_files_map: if k in con_files_map: training_list.append((txt_files_map[k], con_files_map[k])) - type = 0 + type = 0 # type is a flag set if not args.no_svm: type = type | libml.SVM From d1646a4b7564f8084b6d3033ba8e77981d98058c Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Wed, 4 Dec 2013 00:48:54 -0500 Subject: [PATCH 002/393] I am trying to push this to a branch. It is a test. --- code/note.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/code/note.py b/code/note.py index a821317..717b232 100644 --- a/code/note.py +++ b/code/note.py @@ -11,6 +11,9 @@ from __future__ import with_statement + +# This is a test + class Note: """Note representation. @@ -113,4 +116,4 @@ def write_con(con, data, labels): datum, label = tmp if label != 'none': idx = "%d:%d" % (i + 1, j) - print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx, idx, label) \ No newline at end of file + print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx, idx, label) From 7aaafa7dbe0e06b31fcd421216bb8683c524ddc2 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Wed, 4 Dec 2013 00:51:28 -0500 Subject: [PATCH 003/393] Now interacts with the Note class --- code/evaluate.py | 225 +++++++++++++++++++++++++---------------------- 1 file changed, 122 insertions(+), 103 deletions(-) diff --git a/code/evaluate.py b/code/evaluate.py index 3911d8b..3a1cd3c 100644 --- a/code/evaluate.py +++ b/code/evaluate.py @@ -1,4 +1,3 @@ -"""Evaluate predictions against a gold standard.""" import os import os.path import sys @@ -10,130 +9,150 @@ from note import * def main(): - parser = argparse.ArgumentParser() - - parser.add_argument("-t", - help = "Test files that were used to generate predictions", - dest = "txt", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') - ) - - parser.add_argument("-c", - help = "The directory that contains predicted concept files organized into subdirectories for svm, lin, srf", - dest = "con", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions/') - ) - - parser.add_argument("-r", - help = "The directory that contains reference gold standard concept files", - dest = "ref", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/reference_standard_for_test_data/concepts/') - ) - - parser.add_argument("-o", - help = "Write the evaluation to a file rather than STDOUT", - dest = "output", - default = None - ) - args = parser.parse_args() + parser = argparse.ArgumentParser() + + parser.add_argument("-t", + help = "Test files that were used to generate predictions", + dest = "txt", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') + ) + + parser.add_argument("-c", + help = "The directory that contains predicted concept files organized into subdirectories for svm, lin, srf", + dest = "con", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions/') + ) + + parser.add_argument("-r", + help = "The directory that contains reference gold standard concept files", + dest = "ref", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/reference_standard_for_test_data/concepts/') + ) + + parser.add_argument("-o", + help = "Write the evaluation to a file rather than STDOUT", + dest = "output", + default = None + ) + + # Parse command line arguments + args = parser.parse_args() - # output - if args.output: - args.output = open(args.output, "w") - else: - args.output = sys.stdout - txt_files = glob.glob(args.txt) - ref_files = os.listdir(args.ref) - ref_files = map(lambda f: os.path.join(args.ref, f), ref_files) + # Is output destination specified + if args.output: + args.output = open(args.output, "w") + else: + args.output = sys.stdout + - txt_files_map = helper.map_files(txt_files) - ref_files_map = helper.map_files(ref_files) + txt_files = glob.glob(args.txt) + ref_files = os.listdir(args.ref) + ref_files = map(lambda f: os.path.join(args.ref, f), ref_files) - con_directories = os.listdir(args.con) + txt_files_map = helper.map_files(txt_files) + ref_files_map = helper.map_files(ref_files) - for con_directory in con_directories: - files = [] - directory_name = os.path.basename(con_directory) + con_directories = os.listdir(args.con) - if directory_name not in ["svm", "crf", "lin"]: - continue + for con_directory in con_directories: + files = [] + directory_name = os.path.basename(con_directory) - con_files = os.listdir(os.path.join(args.con, con_directory)) - con_files = map(lambda f: os.path.join(args.con, con_directory, f), con_files) + if directory_name not in ["svm", "crf", "lin"]: + continue + + con_files = os.listdir(os.path.join(args.con, con_directory)) + con_files = map(lambda f: os.path.join(args.con, con_directory, f), con_files) - con_files_map = helper.map_files(con_files) + con_files_map = helper.map_files(con_files) + + for k in txt_files_map: + if k in con_files_map and k in ref_files_map: + files.append((txt_files_map[k], con_files_map[k], ref_files_map[k])) + + + + # Compute the confusion matrix + labels = Model.labels # hash tabble: label -> index + confusion = [[0] * len(labels) for e in labels] + + # txt <- medical text + # con <- model predictions + # ref <- actual labels + for txt, con, ref in files: - for k in txt_files_map: - if k in con_files_map and k in ref_files_map: - files.append((txt_files_map[k], con_files_map[k], ref_files_map[k])) + # A note that represents the model's predictions + cnote = Note() + cnote.read_i2b2( txt, con ) + #cnote.read_plain( txt, con ) # in case in plain format + # A note that is the actual concept labels + rnote = Note() + rnote.read_i2b2( txt, ref ) + #rnote.read_plain(txt, ref ) # in case in plain format - # Compute the confusion matrix - labels = Model.labels - confusion = [[0] * len(labels) for e in labels] - for txt, con, ref in files: - txt = read_txt(txt) - for c, r in zip(read_con(con, txt), read_con(ref, txt)): - for c, r in zip(c, r): - confusion[labels[r]][labels[c]] += 1 + # Get corresponding concept labels (prediction vs. actual) + for c, r in zip( cnote.conlist(), rnote.conlist() ): + for c, r in zip(c, r): + confusion[labels[r]][labels[c]] += 1 - # Display the confusion matrix - print >>args.output, "" - print >>args.output, "" - print >>args.output, "" - print >>args.output, "================" - print >>args.output, directory_name.upper() + " RESULTS" - print >>args.output, "================" - print >>args.output, "" - print >>args.output, "Confusion Matrix" - pad = max(len(l) for l in labels) + 6 - print >>args.output, "%s %s" % (' ' * pad, "\t".join(Model.labels.keys())) - for act, act_v in labels.items(): - print >>args.output, "%s %s" % (act.rjust(pad), "\t".join([str(confusion[act_v][pre_v]) for pre, pre_v in labels.items()])) - print >>args.output, "" + # Display the confusion matrix + print >>args.output, "" + print >>args.output, "" + print >>args.output, "" + print >>args.output, "================" + print >>args.output, directory_name.upper() + " RESULTS" + print >>args.output, "================" + print >>args.output, "" + print >>args.output, "Confusion Matrix" + pad = max(len(l) for l in labels) + 6 + print >>args.output, "%s %s" % (' ' * pad, "\t".join(Model.labels.keys())) + for act, act_v in labels.items(): + print >>args.output, "%s %s" % (act.rjust(pad), "\t".join([str(confusion[act_v][pre_v]) for pre, pre_v in labels.items()])) + print >>args.output, "" - # Compute the analysis stuff - precision = [] - recall = [] - specificity = [] - f1 = [] + # Compute the analysis stuff + precision = [] + recall = [] + specificity = [] + f1 = [] - tp = 0 - fp = 0 - fn = 0 - tn = 0 + tp = 0 + fp = 0 + fn = 0 + tn = 0 - print >>args.output, "Analysis" - print >>args.output, " " * pad, "Precision\tRecall\tF1" + print >>args.output, "Analysis" + print >>args.output, " " * pad, "Precision\tRecall\tF1" - for lab, lab_v in labels.items(): - tp = confusion[lab_v][lab_v] - fp = sum(confusion[v][lab_v] for k, v in labels.items() if v != lab_v) - fn = sum(confusion[lab_v][v] for k, v in labels.items() if v != lab_v) - tn = sum(confusion[v1][v2] for k1, v1 in labels.items() - for k2, v2 in labels.items() if v1 != lab_v and v2 != lab_v) - precision += [float(tp) / (tp + fp + 1e-100)] - recall += [float(tp) / (tp + fn + 1e-100)] - specificity += [float(tn) / (tn + fp + 1e-100)] - f1 += [float(2 * tp) / (2 * tp + fp + fn + 1e-100)] - print >>args.output, "%s %.4f\t%.4f\t%.4f\t%.4f" % (lab.rjust(pad), precision[-1], recall[-1], specificity[-1], f1[-1]) - - print >>args.output, "--------" - - precision = sum(precision) / len(precision) - recall = sum(recall) / len(recall) - specificity = sum(specificity) / len(specificity) - f1 = sum(f1) / len(f1) - - print >>args.output, "Average: %.4f\t%.4f\t%.4f\t%.4f" % (precision, recall, specificity, f1) + for lab, lab_v in labels.items(): + tp = confusion[lab_v][lab_v] + fp = sum(confusion[v][lab_v] for k, v in labels.items() if v != lab_v) + fn = sum(confusion[lab_v][v] for k, v in labels.items() if v != lab_v) + tn = sum(confusion[v1][v2] for k1, v1 in labels.items() + for k2, v2 in labels.items() if v1 != lab_v and v2 != lab_v) + precision += [float(tp) / (tp + fp + 1e-100)] + recall += [float(tp) / (tp + fn + 1e-100)] + specificity += [float(tn) / (tn + fp + 1e-100)] + f1 += [float(2 * tp) / (2 * tp + fp + fn + 1e-100)] + print >>args.output, "%s %.4f\t%.4f\t%.4f\t%.4f" % (lab.rjust(pad), precision[-1], recall[-1], specificity[-1], f1[-1]) + + print >>args.output, "--------" + + precision = sum(precision) / len(precision) + recall = sum(recall) / len(recall) + specificity = sum(specificity) / len(specificity) + f1 = sum(f1) / len(f1) + + print >>args.output, "Average: %.4f\t%.4f\t%.4f\t%.4f" % (precision, recall, specificity, f1) if __name__ == '__main__': - main() \ No newline at end of file + main() From db0b84f77185421fe8b137d8c3cee505638f6767 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Wed, 4 Dec 2013 00:51:37 -0500 Subject: [PATCH 004/393] Now interacts with the Note class --- code/train.py | 209 +++++++++++++++++++++++++++----------------------- 1 file changed, 112 insertions(+), 97 deletions(-) diff --git a/code/train.py b/code/train.py index d4f547e..aee4d07 100644 --- a/code/train.py +++ b/code/train.py @@ -1,11 +1,3 @@ -"""Train a model on supplied data. - -It is generally necessary to specify the following parameters: - -t: the files containing the training text, and - -c: the files containing the training concepts. -However, the remainder of the options supply information about how to execute -(e.g. which ML methods to use, where to put the models, &c.) -""" import os import os.path import sys @@ -19,102 +11,125 @@ from note import * def main(): - parser = argparse.ArgumentParser() - - parser.add_argument("-t", - dest = "txt", - help = "The files that contain the training examples", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') - ) + parser = argparse.ArgumentParser() + + parser.add_argument("-t", + dest = "txt", + help = "The files that contain the training examples", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/txt/record-33.txt') + ) - parser.add_argument("-c", - dest = "con", - help = "The files that contain the labels for the training examples", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') - ) + parser.add_argument("-c", + dest = "con", + help = "The files that contain the labels for the training examples", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/concept/record-33.txt') + ) - parser.add_argument("-m", - dest = "model", - help = "Path to the model that should be generated", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') - ) - - parser.add_argument("-d", - dest = "disabled_features", - help = "The features that should not be used", - nargs = "+", - default = None - ) - - parser.add_argument("-e", - dest = "enabled_features", - help = "The features that should be used. This option trumps -d", - nargs = "+", - default = None - ) - - parser.add_argument("--no-svm", - dest = "no_svm", - action = "store_true", - help = "Disable SVM model generation", - ) - - parser.add_argument("--no-lin", - dest = "no_lin", - action = "store_true", - help = "Disable LIN model generation", - ) - - parser.add_argument("--no-crf", - dest = "no_crf", - action = "store_true", - help = "Disable CRF model generation", - ) - - args = parser.parse_args() - - training_list = [] - txt_files = glob.glob(args.txt) - con_files = glob.glob(args.con) - - txt_files_map = helper.map_files(txt_files) - con_files_map = helper.map_files(con_files) - - # line up .txt with .con files - for k in txt_files_map: - if k in con_files_map: - training_list.append((txt_files_map[k], con_files_map[k])) - - type = 0 # type is a flag set - if not args.no_svm: - type = type | libml.SVM - - if not args.no_lin: - type = type | libml.LIN + parser.add_argument("-m", + dest = "model", + help = "Path to the model that should be generated", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') + ) + + parser.add_argument("-d", + dest = "disabled_features", + help = "The features that should not be used", + nargs = "+", + default = None + ) + + parser.add_argument("-e", + dest = "enabled_features", + help = "The features that should be used. This option trumps -d", + nargs = "+", + default = None + ) + + parser.add_argument("--no-svm", + dest = "no_svm", + action = "store_true", + help = "Disable SVM model generation", + ) + + parser.add_argument("--no-lin", + dest = "no_lin", + action = "store_true", + help = "Disable LIN model generation", + ) + + parser.add_argument("--no-crf", + dest = "no_crf", + action = "store_true", + help = "Disable CRF model generation", + ) + + + # Parse the command line arguments + args = parser.parse_args() + + + # A list of text file paths + # A list of concept file paths + txt_files = glob.glob(args.txt) + con_files = glob.glob(args.con) + + + # ex. {'record-13': 'record-13.txt'} + # ex. {'record-13': 'record-13.con'} + txt_files_map = helper.map_files(txt_files) + con_files_map = helper.map_files(con_files) + + + # ex. training_list = [ ('record-13.txt', 'record-13.con') ] + training_list = [] + for k in txt_files_map: + if k in con_files_map: + training_list.append((txt_files_map[k], con_files_map[k])) + + # TEMP - useful for when I was reading in XML files + #training_list.append(txt_files_map[k]) + + + + # What kind of model should be used? (ex. SVM vs. CRF) + type = 0 + if not args.no_svm: + type = type | libml.SVM + if not args.no_lin: + type = type | libml.LIN + if not args.no_crf: + type = type | libml.CRF - if not args.no_crf: - type = type | libml.CRF + + # Read the data into a Note object + notes = [] + for txt, con in training_list: + #for txt in training_list: + # Alternative data formats + #note_tmp.read_plain(txt, con) # plain + #note_tmp.read_xml(txt) # xml + + note_tmp = Note() # Create Note + note_tmp.read_i2b2(txt, con) # Read data into Note + notes.append( note_tmp ) # Add the Note to the list + + + # Create a Machine Learning model + model = Model(filename = args.model, type = type) - # Get data and labels from files - data = [] - labels = [] - for txt, con in training_list: - datum = read_txt(txt) - data += datum - labels += read_con(con, datum) - - # Train a model on the data and labels - model = Model(filename = args.model, type = type) - - if args.disabled_features != None: - model.enabled_features = model.enabled_features - Set(args.disabled_features) + if args.disabled_features != None: + model.enabled_features = model.enabled_features - Set(args.disabled_features) + if args.enabled_features != None: + model.enabled_features = Set(args.enabled_features) - if args.enabled_features != None: - model.enabled_features = Set(args.enabled_features) + # Train the model using the Note's data + model.train(notes[0]) - model.train(data, labels) if __name__ == '__main__': - main() \ No newline at end of file + main() From f04de90ea85a832b26b71dee1a4446e8adfba606 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Wed, 4 Dec 2013 00:51:54 -0500 Subject: [PATCH 005/393] Now interacts with the Note class --- code/predict.py | 155 +++++++++++++++++++++++++++--------------------- 1 file changed, 86 insertions(+), 69 deletions(-) diff --git a/code/predict.py b/code/predict.py index adedfd1..83f92bb 100644 --- a/code/predict.py +++ b/code/predict.py @@ -10,78 +10,95 @@ from note import * def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-i", - dest = "input", - help = "The input files to predict", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') - ) - - parser.add_argument("-o", - dest = "output", - help = "The directory to write the output", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') - ) - - parser.add_argument("-m", - dest = "model", - help = "The model to use for prediction", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') - ) + parser = argparse.ArgumentParser() + parser.add_argument("-i", + dest = "input", + help = "The input files to predict", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') + ) + + parser.add_argument("-o", + dest = "output", + help = "The directory to write the output", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') + ) + + parser.add_argument("-m", + dest = "model", + help = "The model to use for prediction", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') + ) - parser.add_argument("--no-svm", - dest = "no_svm", - action = "store_true", - help = "Disable SVM model generation", - ) - - parser.add_argument("--no-lin", - dest = "no_lin", - action = "store_true", - help = "Disable LIN model generation", - ) - - parser.add_argument("--no-crf", - dest = "no_crf", - action = "store_true", - help = "Disable CRF model generation", - ) + parser.add_argument("--no-svm", + dest = "no_svm", + action = "store_true", + help = "Disable SVM model generation", + ) + + parser.add_argument("--no-lin", + dest = "no_lin", + action = "store_true", + help = "Disable LIN model generation", + ) + + parser.add_argument("--no-crf", + dest = "no_crf", + action = "store_true", + help = "Disable CRF model generation", + ) - args = parser.parse_args() - - # Locate the test files - files = glob.glob(args.input) - - # Load a model and make a prediction for each file - path = args.output - helper.mkpath(args.output) - - model = Model.load(args.model) - if args.no_svm: - model.type &= ~libml.SVM - if args.no_lin: - model.type &= ~libml.LIN - if args.no_crf: - model.type &= ~libml.CRF + args = parser.parse_args() + + # Locate the test files + files = glob.glob(args.input) + + # Load a model and make a prediction for each file + path = args.output + helper.mkpath(args.output) + + # Determine what type of models to use (ex SVM vs. CRF) + model = Model.load(args.model) + if args.no_svm: + model.type &= ~libml.SVM + if args.no_lin: + model.type &= ~libml.LIN + if args.no_crf: + model.type &= ~libml.CRF - for txt in files: - data = read_txt(txt) - labels = model.predict(data) - con = os.path.split(txt)[-1] - con = con[:-3] + 'con' + + for txt in files: + + # Read the data into a Note object + note = Note() + note.read_i2b2(txt) + #note.read_plain(txt) # TEMP - in case of plain format + + # Use the model to predict the concept labels + labels = model.predict(note) + + # labels (above) is a hash table + # the keys are 1,2,4 (SVM, LIN, and CRF) + # each value is a list of concept labels, like from the Note class + + + con = os.path.split(txt)[-1] + con = con[:-3] + 'con' - for t in libml.bits(model.type): - if t == libml.SVM: - helper.mkpath(os.path.join(args.output, "svm")) - con_path = os.path.join(path, "svm", con) - if t == libml.LIN: - helper.mkpath(os.path.join(args.output, "lin")) - con_path = os.path.join(path, "lin", con) - if t == libml.CRF: - helper.mkpath(os.path.join(args.output, "crf")) - con_path = os.path.join(path, "crf", con) - - write_con(con_path, data, labels[t]) + for t in libml.bits(model.type): + if t == libml.SVM: + helper.mkpath(os.path.join(args.output, "svm")) + con_path = os.path.join(path, "svm", con) + if t == libml.LIN: + helper.mkpath(os.path.join(args.output, "lin")) + con_path = os.path.join(path, "lin", con) + if t == libml.CRF: + helper.mkpath(os.path.join(args.output, "crf")) + con_path = os.path.join(path, "crf", con) + + # Output the concept predictions + note.write_i2b2(con_path, labels[t]) + #note.write_plain(con_path, labels[t]) # in case of plain format + if __name__ == '__main__': - main() \ No newline at end of file + main() From 75337711640c69f5daf8bf0dc971d097c043f62a Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Wed, 4 Dec 2013 00:52:50 -0500 Subject: [PATCH 006/393] Moved functionality from helper functions to Note class. Also, added support for plain format and attempted xml --- code/note.py | 344 +++++++++++++++++++++++++++++++++++---------------- 1 file changed, 235 insertions(+), 109 deletions(-) diff --git a/code/note.py b/code/note.py index 717b232..75c773f 100644 --- a/code/note.py +++ b/code/note.py @@ -1,119 +1,245 @@ -"""Internal note representation. +from __future__ import with_statement -Note class reads in i2b2 formatted .txt and .con files and aligns them. After- -ward they can be accessed by iterating over the class. +import re -Three auxiliary methods, read_txt(), read_con(), and write_con() streamline -help manipulate such files as well. -""" -# TODO: clean up interface -# TODO: add support for non-i2b2 formats -from __future__ import with_statement +class Note: + # Constructor + def __init__(self): + # data - A list of lines directly from the file + # concepts - A one-to-one correspondence of each word's concept + self.data = [] + self.concepts = [] -# This is a test -class Note: - """Note representation. - - Notes are represented as an iterable of tuples (word, concept). - """ - # TODO: more efficient internal representation - - def __init__(self, txt, con=None): - """Read in the note from file(s). - - Args: - txt: File containing i2b2-formatted text. - con: File containing i2b2-formatted concepts. - """ - - # Read in list of tokens, each with "none" concept - self.sents = [] - with open(txt) as f: - for line in f: - self.sents.append([[w, "none"] for w in line.split()]) - - # Assign appropriate concept if a concept file was given - if con: - with open(con) as f: - for line in f: - c, t = line.split('||') - t = t[3:-2] - c = c.split() - start = c[-2].split(':') - end = c[-1].split(':') - assert "concept spans one line", start[0] == end[0] - l = int(start[0]) - 1 - start = int(start[1]) - end = int(end[1]) - - for i in range(start, end + 1): - self.sents[l][i][1] = t - - def __iter__(self): - """Yield the (word, concept) pairs.""" - return iter(self.sents) - -def read_txt(txt): - """Get a list of words from an i2b2-formatted text file. - - Ags: - txt: The i2b2-formatted text file. - - Returns: - A list of the words in the text file. - """ - # TODO: this method is misleading, should include concept for consistency - - note = [] + + # Note::read_i2b2() + # + # @param txt. A file path for the i2b2 tokenized medical record + # @param con. A file path for the i2b2 annotated concepts associated with txt + def read_i2b2(self, txt, con=None): + + # Read in the medical text with open(txt) as f: + for line in f: + # Add sentence to the data list + self.data.append(line) + + # For each word, store a corresponding concept label + tmp = [] + for word in line.split(): + tmp.append('none') + self.concepts.append(tmp) + + + + # If an accompanying concept file was specified, read it + if con: + with open(con) as f: for line in f: - note.append([w for w in line.split()]) - return note - -def read_con(con, txt): - """Get a list of (word, concept) pairs from a txt and con file. - - Args: - con: File containing the i2b2-formatted concepts. - txt: File containing the i2b2-formatted text. - - Returns: - A list of (word, concept) pairs. - """ - # TODO: this method obviates the Note class, should complement - - label = [['none'] * len(line) for line in txt] - with open(con) as f: - for line in f: - c, t = line.split('||') - t = t[3:-2] - c = c.split() - start = c[-2].split(':') - end = c[-1].split(':') - assert "concept spans one line", start[0] == end[0] - l = int(start[0]) - 1 - start = int(start[1]) - end = int(end[1]) - - for i in range(start, end + 1): - label[l][i] = t - return label - -def write_con(con, data, labels): - """Writes concept file. - - From data and labels, this create a .con file as output. - """ - # TODO: method should call a serialization primitive of Note class + c, t = line.split('||') + t = t[3:-2] + c = c.split() + start = c[-2].split(':') + end = c[-1].split(':') + assert "concept spans one line", start[0] == end[0] + l = int(start[0]) - 1 + start = int(start[1]) + end = int(end[1]) + + for i in range(start, end + 1): + self.concepts[l][i] = t + + + + # Note::write_i2b2() + # + # @param con. A path to the file of where to write the prediction. + # @param labels. A list of predictions of labels for the given text. + # + # Write the concept predictions to a given file in i2b2 format + def write_i2b2(self, con, labels): with open(con, 'w') as f: - for i, tmp in enumerate(zip(data, labels)): - datum, label = tmp - for j, tmp in enumerate(zip(datum, label)): - datum, label = tmp - if label != 'none': - idx = "%d:%d" % (i + 1, j) - print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx, idx, label) + for i, tmp in enumerate(zip(self.txtlist(), labels)): + datum, label = tmp + for j, tmp in enumerate(zip(datum, label)): + datum, label = tmp + if label != 'none': + idx = "%d:%d" % (i + 1, j) + print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx, idx, label) + + + + # Note::read_plain() + # + # @param txt. A file path for the plain tokenized medical record + # @param con. A file path for the annotated concepts associated with txt + def read_plain(self, txt, con=None): + + # Read in the medical text + with open(txt) as f: + + for line in f: + # Add sentence to the data list + self.data.append(line) + + # For each word, store a corresponding concept label + tmp = [] + for word in line.split(): + tmp.append('none') + self.concepts.append(tmp) + + + # If an accompanying concept file was specified, read it + if con: + with open(con) as f: + for line in f: + c, t = line.split('||') + t = t[3:-2] + c = c.split() + + start = c[-2].split(':') + end = c[-1].split(':') + + assert "concept spans one line", start[0] == end[0] + l = int(start[0]) - 1 + start = int(start[1]) + end = int(end[1]) + + # Tokenize the input intervals + stok = len(self.data[l][:start].split()) + etok = len(self.data[l][start:end+1].split()) + stok - 1 + + # Update the corresponding concept labels + for i in range(stok, etok + 1): + self.concepts[l][i] = t + + + + # Note::write_plain() + # + # @param con. A path to the file of where to write the prediction. + # @param labels. A list of predictions of labels for the given text. + # + # Write the concept predictions to a given file in plain format + def write_plain(self, con, labels): + + with open(con, 'w') as f: + + # Search every token + for i, line in enumerate(labels): + for j, label in enumerate(line): + + # Only print non-trivial tokens + if label != 'none': + + words = self.data[i].split() + + # Get the untokenized starting index + start = 0 + for k in range(j): + start += len(words[k]) + 1 + + # Untokenized ending index + end = start + len(words[j]) - 1 + + # Format the starting and ending indices + sidx = "%d:%d" % (i + 1, start) + eidx = "%d:%d" % (i + 1, end) + + # Get the string stored in the plaintext representation + datum = self.data[i][start:end+1] + + print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, sidx, eidx, label) + + + + # Note::read_xml() + # + # @param txt. A file path for the xml formatted medical record + def read_xml(self, txt): + + # FIXME: By storing the 'data' as a list of lines from the file + # instead of a list of list of words, xml does not fit nicely + # Possible solution: Switch back to list of list of words + # Alternative: Store edited sentences from the file to remove <> + + + # Read in the medical text + with open(txt) as f: + + for line in f: + # Add sentence to the data list + self.data.append(line) + + + # For each word, store a corresponding concept label + tmp = [] + for i, group in enumerate(line.split('<')): + # All odd groups have label info (because of line split) + # ex. 'treatment>discharge medications' + if (i%2): + # Get concept label + match = re.search('(\w+)>(.+)', group) + if not match: + print "\nUnexpected file format\n" + exit(1) + + label = match.group(1) + words = match.group(2) + + # Add the label once for every word + for word in words.split(): + tmp.append(label) + # If even group , then process with 'none' labels + else: + for word in group.split(): + # / closes the xml tag of a previous label (skip) + if word[0] == '/': + continue + else: + tmp.append('none') + # Add line of labels to the 'concepts' data member + self.concepts.append(tmp) + + + + # Note::write_xml() + # + # @param con. A path to the file of where to write the prediction. + # @param labels. A list of predictions of labels for the given text. + # + # Write the concept predictions to a given file in xml format + def write_xml(self, con, labels): + # xml formats do not have associated concept files + return + + + + + # txtlist() + # + # @return the data from the medical text broken into line-word format + def txtlist( self ): + # Goal: Break self.data sentences into lists of words + ans = [] + for sent in self.data: + ans.append(sent.split()) + + return ans + + + + # conlist() + # + # @return a list of lists of the concepts associated with each word from data + def conlist( self ): + return self.concepts + + + + # For iterating + def __iter__(self): + return iter(self.data) From b1a4403c6a6056fa73fbff1404c1b85192ca6298 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Wed, 11 Dec 2013 15:17:29 -0500 Subject: [PATCH 007/393] passed the Notes to model.train() as one argument --- code/model.py | 108 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 81 insertions(+), 27 deletions(-) diff --git a/code/model.py b/code/model.py index a18c87c..002b162 100644 --- a/code/model.py +++ b/code/model.py @@ -1,9 +1,3 @@ -"""Contains Model class and all feature generation methods. -""" -# TODO: feature generation should be separated into a separate file. -# TODO: implement feature extraction using scikit-learn -# TODO: make model serializable so that reading/writing is trivial - from __future__ import with_statement import time @@ -47,6 +41,7 @@ def load(filename='awesome.model'): model.filename = filename return model + # Constructor def __init__(self, filename='awesome.model', type=libml.ALL): model_directory = os.path.dirname(filename) @@ -59,54 +54,104 @@ def __init__(self, filename='awesome.model', type=libml.ALL): self.enabled_features = Model.sentence_features | Model.word_features - def train(self, data, labels): + + # Model::train() + # + # @param note. A Note object that has data for training the model + def train(self, note): + + # Get the data and annotations from the Note object + + # data - A list of list of the medical text's words + # labels - A list of list of concepts (1:1 with data) + data = note.txtlist() + labels = note.conlist() + + + # rows is a list of a list of hash tables rows = [] for sentence in data: rows.append(self.features_for_sentence(sentence)) - + + # each list of hash tables for row in rows: + # each hash table for features in row: + # each key in hash table for feature in features: + # I think new word encountered if feature not in self.vocab: self.vocab[feature] = len(self.vocab) + 1 + # A list of a list encodings of concept labels (ex. 'none' => 0) + # [ [0, 0, 0], [0], [0, 0, 0], [0], [0, 0, 0, 0, 0, 2, 2, 0, 1] ] label_lu = lambda l: Model.labels[l] labels = [map(label_lu, x) for x in labels] - + + + # list of a list of hash tables (all keys & values now numbers) feat_lu = lambda f: {self.vocab[item]:f[item] for item in f} rows = [map(feat_lu, x) for x in rows] + libml.write_features(self.filename, rows, labels, self.type) with open(self.filename, "w") as model: pickle.dump(self, model) + # Train the model libml.train(self.filename, self.type) + - def predict(self, data): - rows = [] - for sentence in data: - rows.append(self.features_for_sentence(sentence)) + # Model::predict() + # + # @param note. A Note object that contains the training data + def predict(self, note): - feat_lu = lambda f: {self.vocab[item]:f[item] for item in f if item in self.vocab} - rows = [map(feat_lu, x) for x in rows] - libml.write_features(self.filename, rows, None, self.type); + # data - A list of list of the medical text's words + data = note.txtlist() + + + # Something to do with calibrating the model + rows = [] # rows <- list of a list of hash tables (feature vectors) + for sentence in data: + rows.append(self.features_for_sentence(sentence)) + + + feat_lu = lambda f: {self.vocab[item]:f[item] for item in f if item in self.vocab} + rows = [map(feat_lu, x) for x in rows] + libml.write_features(self.filename, rows, None, self.type); + + # Use the trained model to make predictions + libml.predict(self.filename, self.type) - libml.predict(self.filename, self.type) - labels_list = libml.read_labels(self.filename, self.type) + # A hash table + # the keys are 1,2,4 (SVM, LIN, and CRF) + # each value is a list of concept labels encodings + labels_list = libml.read_labels(self.filename, self.type) - for t, labels in labels_list.items(): - tmp = [] - for sentence in data: - tmp.append([labels.pop(0) for i in range(len(sentence))]) - tmp[-1] = map(lambda l: l.strip(), tmp[-1]) - tmp[-1] = map(lambda l: Model.reverse_labels[int(l)], tmp[-1]) - labels_list[t] = tmp - return labels_list + # translate labels_list into a readable format + # ex. change all occurences of 0 -> 'none' + for t, labels in labels_list.items(): + tmp = [] + for sentence in data: + tmp.append([labels.pop(0) for i in range(len(sentence))]) + tmp[-1] = map(lambda l: l.strip(), tmp[-1]) + tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) + labels_list[t] = tmp + + + # The new labels_list is a translated version + return labels_list + + + + # input: A sentence from a medical text file (list of words) + # output: A list of hash tables def features_for_sentence(self, sentence): features_list = [] @@ -169,10 +214,19 @@ def features_for_sentence(self, sentence): return features_list + + + # input: a single word, like + # Admission + # output: A hash table of features + # features include: word, length, mitre, stem_porter def features_for_word(self, word): features = {'dummy':1} # always have >0 dimensions + # word_shape, word, length, mitre, stem_porter, stem_lancaster for feature in Model.word_features: + + # word_shape, test_result, word, pos, next, length, stem_wordnet, mitre, stem_porter, prev, stem_lancaster if feature not in self.enabled_features: continue @@ -319,4 +373,4 @@ def get_def_class (self, word): elif word.lower() in treatment_terms: return 3 return 0 - \ No newline at end of file + From 78058a6199e460626cb4d6ff7c57ff55e6e74ebf Mon Sep 17 00:00:00 2001 From: Tristan Naumann Date: Thu, 19 Dec 2013 13:30:30 -0500 Subject: [PATCH 008/393] Fixing leading and trailing whitespace issues per PEP8 Roughly the equivalent of the following in vim: ```vim :set tabstop=4 shiftwidth=4 expandtab :retab :%s/\s\+$// ``` --- code/evaluate.py | 187 +++++++------- code/helper.py | 11 +- code/libml.py | 538 ++++++++++++++++++++-------------------- code/model.py | 598 ++++++++++++++++++++++----------------------- code/predict.py | 88 +++---- code/runall.py | 122 ++++----- code/statistics.py | 70 +++--- code/train.py | 88 ++++--- code/wordshape.py | 168 +++++++------ 9 files changed, 938 insertions(+), 932 deletions(-) diff --git a/code/evaluate.py b/code/evaluate.py index 3a1cd3c..ed1b4b6 100644 --- a/code/evaluate.py +++ b/code/evaluate.py @@ -13,38 +13,38 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("-t", - help = "Test files that were used to generate predictions", - dest = "txt", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') + help = "Test files that were used to generate predictions", + dest = "txt", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') ) parser.add_argument("-c", - help = "The directory that contains predicted concept files organized into subdirectories for svm, lin, srf", - dest = "con", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions/') + help = "The directory that contains predicted concept files organized into subdirectories for svm, lin, srf", + dest = "con", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions/') ) parser.add_argument("-r", - help = "The directory that contains reference gold standard concept files", - dest = "ref", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/reference_standard_for_test_data/concepts/') + help = "The directory that contains reference gold standard concept files", + dest = "ref", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/reference_standard_for_test_data/concepts/') ) - + parser.add_argument("-o", - help = "Write the evaluation to a file rather than STDOUT", - dest = "output", - default = None + help = "Write the evaluation to a file rather than STDOUT", + dest = "output", + default = None ) # Parse command line arguments args = parser.parse_args() - + # Is output destination specified if args.output: - args.output = open(args.output, "w") + args.output = open(args.output, "w") else: - args.output = sys.stdout + args.output = sys.stdout txt_files = glob.glob(args.txt) @@ -57,31 +57,30 @@ def main(): con_directories = os.listdir(args.con) for con_directory in con_directories: - files = [] - directory_name = os.path.basename(con_directory) + files = [] + directory_name = os.path.basename(con_directory) - if directory_name not in ["svm", "crf", "lin"]: - continue + if directory_name not in ["svm", "crf", "lin"]: + continue - con_files = os.listdir(os.path.join(args.con, con_directory)) - con_files = map(lambda f: os.path.join(args.con, con_directory, f), con_files) - - con_files_map = helper.map_files(con_files) + con_files = os.listdir(os.path.join(args.con, con_directory)) + con_files = map(lambda f: os.path.join(args.con, con_directory, f), con_files) - for k in txt_files_map: - if k in con_files_map and k in ref_files_map: - files.append((txt_files_map[k], con_files_map[k], ref_files_map[k])) + con_files_map = helper.map_files(con_files) + for k in txt_files_map: + if k in con_files_map and k in ref_files_map: + files.append((txt_files_map[k], con_files_map[k], ref_files_map[k])) - # Compute the confusion matrix - labels = Model.labels # hash tabble: label -> index - confusion = [[0] * len(labels) for e in labels] + # Compute the confusion matrix + labels = Model.labels # hash tabble: label -> index + confusion = [[0] * len(labels) for e in labels] - # txt <- medical text - # con <- model predictions - # ref <- actual labels - for txt, con, ref in files: + # txt <- medical text + # con <- model predictions + # ref <- actual labels + for txt, con, ref in files: # A note that represents the model's predictions cnote = Note() @@ -94,65 +93,65 @@ def main(): #rnote.read_plain(txt, ref ) # in case in plain format # Get corresponding concept labels (prediction vs. actual) - for c, r in zip( cnote.conlist(), rnote.conlist() ): - for c, r in zip(c, r): - confusion[labels[r]][labels[c]] += 1 - - - - # Display the confusion matrix - print >>args.output, "" - print >>args.output, "" - print >>args.output, "" - print >>args.output, "================" - print >>args.output, directory_name.upper() + " RESULTS" - print >>args.output, "================" - print >>args.output, "" - print >>args.output, "Confusion Matrix" - pad = max(len(l) for l in labels) + 6 - print >>args.output, "%s %s" % (' ' * pad, "\t".join(Model.labels.keys())) - for act, act_v in labels.items(): - print >>args.output, "%s %s" % (act.rjust(pad), "\t".join([str(confusion[act_v][pre_v]) for pre, pre_v in labels.items()])) - print >>args.output, "" - - - - # Compute the analysis stuff - precision = [] - recall = [] - specificity = [] - f1 = [] - - tp = 0 - fp = 0 - fn = 0 - tn = 0 - - print >>args.output, "Analysis" - print >>args.output, " " * pad, "Precision\tRecall\tF1" - - - - for lab, lab_v in labels.items(): - tp = confusion[lab_v][lab_v] - fp = sum(confusion[v][lab_v] for k, v in labels.items() if v != lab_v) - fn = sum(confusion[lab_v][v] for k, v in labels.items() if v != lab_v) - tn = sum(confusion[v1][v2] for k1, v1 in labels.items() - for k2, v2 in labels.items() if v1 != lab_v and v2 != lab_v) - precision += [float(tp) / (tp + fp + 1e-100)] - recall += [float(tp) / (tp + fn + 1e-100)] - specificity += [float(tn) / (tn + fp + 1e-100)] - f1 += [float(2 * tp) / (2 * tp + fp + fn + 1e-100)] - print >>args.output, "%s %.4f\t%.4f\t%.4f\t%.4f" % (lab.rjust(pad), precision[-1], recall[-1], specificity[-1], f1[-1]) - - print >>args.output, "--------" - - precision = sum(precision) / len(precision) - recall = sum(recall) / len(recall) - specificity = sum(specificity) / len(specificity) - f1 = sum(f1) / len(f1) - - print >>args.output, "Average: %.4f\t%.4f\t%.4f\t%.4f" % (precision, recall, specificity, f1) - + for c, r in zip( cnote.conlist(), rnote.conlist() ): + for c, r in zip(c, r): + confusion[labels[r]][labels[c]] += 1 + + + + # Display the confusion matrix + print >>args.output, "" + print >>args.output, "" + print >>args.output, "" + print >>args.output, "================" + print >>args.output, directory_name.upper() + " RESULTS" + print >>args.output, "================" + print >>args.output, "" + print >>args.output, "Confusion Matrix" + pad = max(len(l) for l in labels) + 6 + print >>args.output, "%s %s" % (' ' * pad, "\t".join(Model.labels.keys())) + for act, act_v in labels.items(): + print >>args.output, "%s %s" % (act.rjust(pad), "\t".join([str(confusion[act_v][pre_v]) for pre, pre_v in labels.items()])) + print >>args.output, "" + + + + # Compute the analysis stuff + precision = [] + recall = [] + specificity = [] + f1 = [] + + tp = 0 + fp = 0 + fn = 0 + tn = 0 + + print >>args.output, "Analysis" + print >>args.output, " " * pad, "Precision\tRecall\tF1" + + + + for lab, lab_v in labels.items(): + tp = confusion[lab_v][lab_v] + fp = sum(confusion[v][lab_v] for k, v in labels.items() if v != lab_v) + fn = sum(confusion[lab_v][v] for k, v in labels.items() if v != lab_v) + tn = sum(confusion[v1][v2] for k1, v1 in labels.items() + for k2, v2 in labels.items() if v1 != lab_v and v2 != lab_v) + precision += [float(tp) / (tp + fp + 1e-100)] + recall += [float(tp) / (tp + fn + 1e-100)] + specificity += [float(tn) / (tn + fp + 1e-100)] + f1 += [float(2 * tp) / (2 * tp + fp + fn + 1e-100)] + print >>args.output, "%s %.4f\t%.4f\t%.4f\t%.4f" % (lab.rjust(pad), precision[-1], recall[-1], specificity[-1], f1[-1]) + + print >>args.output, "--------" + + precision = sum(precision) / len(precision) + recall = sum(recall) / len(recall) + specificity = sum(specificity) / len(specificity) + f1 = sum(f1) / len(f1) + + print >>args.output, "Average: %.4f\t%.4f\t%.4f\t%.4f" % (precision, recall, specificity, f1) + if __name__ == '__main__': main() diff --git a/code/helper.py b/code/helper.py index ae689e0..a9edb02 100644 --- a/code/helper.py +++ b/code/helper.py @@ -3,12 +3,13 @@ import os.path import errno + def map_files(files): """Maps a list of files to basename -> path.""" output = {} - for f in files: - basename = os.path.splitext(os.path.basename(f))[0] - output[basename] = f + for f in files: #pylint: disable=invalid-name + basename = os.path.splitext(os.path.basename(f))[0] + output[basename] = f return output @@ -19,5 +20,5 @@ def mkpath(path): except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass - else: - raise + else: + raise diff --git a/code/libml.py b/code/libml.py index 01945d3..bc5d2d1 100644 --- a/code/libml.py +++ b/code/libml.py @@ -1,7 +1,7 @@ """Interface to ML libraries -- libsvm, liblinear, and crfsuite. -Much of this code performs parameter selection a la libsvm's easy.py. However, -the remainder provides an interface for the ML libraries so that the can be +Much of this code performs parameter selection a la libsvm's easy.py. However, +the remainder provides an interface for the ML libraries so that the can be called using a single function within the remainder of the code. """ # TODO: this code should leverage bindings rather than fork processes @@ -17,9 +17,9 @@ from subprocess import * if sys.hexversion < 0x03000000: - import Queue + import Queue else: - import queue as Queue + import queue as Queue # Library locations this_path = os.path.dirname(os.path.realpath(__file__)) @@ -29,11 +29,11 @@ is_win32 = (sys.platform == 'win32') if is_win32: - libsvm_path = os.path.join(libsvm_path, "windows") - liblinear_path = os.path.join(liblinear_path, "windows") - crfsuite_path = os.path.join(crfsuite_path, "windows") + libsvm_path = os.path.join(libsvm_path, "windows") + liblinear_path = os.path.join(liblinear_path, "windows") + crfsuite_path = os.path.join(crfsuite_path, "windows") else: - crfsuite_path = os.path.join(crfsuite_path, "frontend") + crfsuite_path = os.path.join(crfsuite_path, "frontend") # File locations svm_train = os.path.join(libsvm_path, "svm-train") @@ -50,202 +50,202 @@ ############################################################################### def range_f(begin,end,step): - # like range, but works on non-integer too - seq = [] - while True: - if step > 0 and begin > end: break - if step < 0 and begin < end: break - seq.append(begin) - begin = begin + step - return seq + # like range, but works on non-integer too + seq = [] + while True: + if step > 0 and begin > end: break + if step < 0 and begin < end: break + seq.append(begin) + begin = begin + step + return seq def permute_sequence(seq): - n = len(seq) - if n <= 1: return seq + n = len(seq) + if n <= 1: return seq - mid = int(n/2) - left = permute_sequence(seq[:mid]) - right = permute_sequence(seq[mid+1:]) + mid = int(n/2) + left = permute_sequence(seq[:mid]) + right = permute_sequence(seq[mid+1:]) - ret = [seq[mid]] - while left or right: - if left: ret.append(left.pop(0)) - if right: ret.append(right.pop(0)) + ret = [seq[mid]] + while left or right: + if left: ret.append(left.pop(0)) + if right: ret.append(right.pop(0)) - return ret + return ret def redraw(db,best_param,tofile=False): - if len(db) == 0: return - begin_level = round(max(x[2] for x in db)) - 3 - step_size = 0.5 - - best_log2c,best_log2g,best_rate = best_param - - # if newly obtained c, g, or cv values are the same, - # then stop redrawing the contour. - if all(x[0] == db[0][0] for x in db): return - if all(x[1] == db[0][1] for x in db): return - if all(x[2] == db[0][2] for x in db): return - - if tofile: - gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n") - gnuplot.write("set output \"{0}\"\n".format(png_filename.replace('\\','\\\\')).encode()) - #gnuplot.write(b"set term postscript color solid\n") - #gnuplot.write("set output \"{0}.ps\"\n".format(dataset_title).encode().encode()) - elif is_win32: - gnuplot.write(b"set term windows\n") - else: - gnuplot.write( b"set term x11\n") - gnuplot.write(b"set xlabel \"log2(C)\"\n") - gnuplot.write(b"set ylabel \"log2(gamma)\"\n") - gnuplot.write("set xrange [{0}:{1}]\n".format(c_begin,c_end).encode()) - gnuplot.write("set yrange [{0}:{1}]\n".format(g_begin,g_end).encode()) - gnuplot.write(b"set contour\n") - gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode()) - gnuplot.write(b"unset surface\n") - gnuplot.write(b"unset ztics\n") - gnuplot.write(b"set view 0,0\n") - gnuplot.write("set title \"{0}\"\n".format(dataset_title).encode()) - gnuplot.write(b"unset label\n") - gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \ - at screen 0.5,0.85 center\n". \ - format(best_log2c, best_log2g, best_rate).encode()) - gnuplot.write("set label \"C = {0} gamma = {1}\"" - " at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode()) - gnuplot.write(b"set key at screen 0.9,0.9\n") - gnuplot.write(b"splot \"-\" with lines\n") - - - - - db.sort(key = lambda x:(x[0], -x[1])) - - prevc = db[0][0] - for line in db: - if prevc != line[0]: - gnuplot.write(b"\n") - prevc = line[0] - gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode()) - gnuplot.write(b"e\n") - gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure - gnuplot.flush() + if len(db) == 0: return + begin_level = round(max(x[2] for x in db)) - 3 + step_size = 0.5 + + best_log2c,best_log2g,best_rate = best_param + + # if newly obtained c, g, or cv values are the same, + # then stop redrawing the contour. + if all(x[0] == db[0][0] for x in db): return + if all(x[1] == db[0][1] for x in db): return + if all(x[2] == db[0][2] for x in db): return + + if tofile: + gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n") + gnuplot.write("set output \"{0}\"\n".format(png_filename.replace('\\','\\\\')).encode()) + #gnuplot.write(b"set term postscript color solid\n") + #gnuplot.write("set output \"{0}.ps\"\n".format(dataset_title).encode().encode()) + elif is_win32: + gnuplot.write(b"set term windows\n") + else: + gnuplot.write( b"set term x11\n") + gnuplot.write(b"set xlabel \"log2(C)\"\n") + gnuplot.write(b"set ylabel \"log2(gamma)\"\n") + gnuplot.write("set xrange [{0}:{1}]\n".format(c_begin,c_end).encode()) + gnuplot.write("set yrange [{0}:{1}]\n".format(g_begin,g_end).encode()) + gnuplot.write(b"set contour\n") + gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode()) + gnuplot.write(b"unset surface\n") + gnuplot.write(b"unset ztics\n") + gnuplot.write(b"set view 0,0\n") + gnuplot.write("set title \"{0}\"\n".format(dataset_title).encode()) + gnuplot.write(b"unset label\n") + gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \ + at screen 0.5,0.85 center\n". \ + format(best_log2c, best_log2g, best_rate).encode()) + gnuplot.write("set label \"C = {0} gamma = {1}\"" + " at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode()) + gnuplot.write(b"set key at screen 0.9,0.9\n") + gnuplot.write(b"splot \"-\" with lines\n") + + + + + db.sort(key = lambda x:(x[0], -x[1])) + + prevc = db[0][0] + for line in db: + if prevc != line[0]: + gnuplot.write(b"\n") + prevc = line[0] + gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode()) + gnuplot.write(b"e\n") + gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure + gnuplot.flush() def calculate_jobs(): - c_seq = permute_sequence(range_f(c_begin,c_end,c_step)) - g_seq = permute_sequence(range_f(g_begin,g_end,g_step)) - nr_c = float(len(c_seq)) - nr_g = float(len(g_seq)) - i = 0 - j = 0 - jobs = [] - - while i < nr_c or j < nr_g: - if i/nr_c < j/nr_g: - # increase C resolution - line = [] - for k in range(0,j): - line.append((c_seq[i],g_seq[k])) - i = i + 1 - jobs.append(line) - else: - # increase g resolution - line = [] - for k in range(0,i): - line.append((c_seq[k],g_seq[j])) - j = j + 1 - jobs.append(line) - return jobs + c_seq = permute_sequence(range_f(c_begin,c_end,c_step)) + g_seq = permute_sequence(range_f(g_begin,g_end,g_step)) + nr_c = float(len(c_seq)) + nr_g = float(len(g_seq)) + i = 0 + j = 0 + jobs = [] + + while i < nr_c or j < nr_g: + if i/nr_c < j/nr_g: + # increase C resolution + line = [] + for k in range(0,j): + line.append((c_seq[i],g_seq[k])) + i = i + 1 + jobs.append(line) + else: + # increase g resolution + line = [] + for k in range(0,i): + line.append((c_seq[k],g_seq[j])) + j = j + 1 + jobs.append(line) + return jobs class WorkerStopToken: # used to notify the worker to stop - pass + pass class Worker(Thread): - def __init__(self,name,job_queue,result_queue): - Thread.__init__(self) - self.name = name - self.job_queue = job_queue - self.result_queue = result_queue - def run(self): - while True: - (cexp,gexp) = self.job_queue.get() - if cexp is WorkerStopToken: - self.job_queue.put((cexp,gexp)) - # print('worker {0} stop.'.format(self.name)) - break - try: - rate = self.run_one(2.0**cexp,2.0**gexp) - if rate is None: raise RuntimeError("get no rate") - except: - # we failed, let others do that and we just quit - - traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) - - self.job_queue.put((cexp,gexp)) - print('worker {0} quit.'.format(self.name)) - break - else: - self.result_queue.put((self.name,cexp,gexp,rate)) + def __init__(self,name,job_queue,result_queue): + Thread.__init__(self) + self.name = name + self.job_queue = job_queue + self.result_queue = result_queue + def run(self): + while True: + (cexp,gexp) = self.job_queue.get() + if cexp is WorkerStopToken: + self.job_queue.put((cexp,gexp)) + # print('worker {0} stop.'.format(self.name)) + break + try: + rate = self.run_one(2.0**cexp,2.0**gexp) + if rate is None: raise RuntimeError("get no rate") + except: + # we failed, let others do that and we just quit + + traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) + + self.job_queue.put((cexp,gexp)) + print('worker {0} quit.'.format(self.name)) + break + else: + self.result_queue.put((self.name,cexp,gexp,rate)) class LocalWorker(Worker): - def run_one(self,c,g): - cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \ - (svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname) - result = Popen(cmdline,shell=True,stdout=PIPE).stdout - for line in result.readlines(): - if str(line).find("Cross") != -1: - return float(line.split()[-1][0:-1]) + def run_one(self,c,g): + cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \ + (svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname) + result = Popen(cmdline,shell=True,stdout=PIPE).stdout + for line in result.readlines(): + if str(line).find("Cross") != -1: + return float(line.split()[-1][0:-1]) class SSHWorker(Worker): - def __init__(self,name,job_queue,result_queue,host): - Worker.__init__(self,name,job_queue,result_queue) - self.host = host - self.cwd = os.getcwd() - def run_one(self,c,g): - cmdline = 'ssh -x {0} "cd {1}; {2} -c {3} -g {4} -v {5} {6} {7}"'.format \ - (self.host,self.cwd, \ - svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname) - result = Popen(cmdline,shell=True,stdout=PIPE).stdout - for line in result.readlines(): - if str(line).find("Cross") != -1: - return float(line.split()[-1][0:-1]) + def __init__(self,name,job_queue,result_queue,host): + Worker.__init__(self,name,job_queue,result_queue) + self.host = host + self.cwd = os.getcwd() + def run_one(self,c,g): + cmdline = 'ssh -x {0} "cd {1}; {2} -c {3} -g {4} -v {5} {6} {7}"'.format \ + (self.host,self.cwd, \ + svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname) + result = Popen(cmdline,shell=True,stdout=PIPE).stdout + for line in result.readlines(): + if str(line).find("Cross") != -1: + return float(line.split()[-1][0:-1]) class TelnetWorker(Worker): - def __init__(self,name,job_queue,result_queue,host,username,password): - Worker.__init__(self,name,job_queue,result_queue) - self.host = host - self.username = username - self.password = password - def run(self): - import telnetlib - self.tn = tn = telnetlib.Telnet(self.host) - tn.read_until("login: ") - tn.write(self.username + "\n") - tn.read_until("Password: ") - tn.write(self.password + "\n") - - # XXX: how to know whether login is successful? - tn.read_until(self.username) - # - print('login ok', self.host) - tn.write("cd "+os.getcwd()+"\n") - Worker.run(self) - tn.write("exit\n") - def run_one(self,c,g): - cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \ - (svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname) - result = self.tn.write(cmdline+'\n') - (idx,matchm,output) = self.tn.expect(['Cross.*\n']) - for line in output.split('\n'): - if str(line).find("Cross") != -1: - return float(line.split()[-1][0:-1]) + def __init__(self,name,job_queue,result_queue,host,username,password): + Worker.__init__(self,name,job_queue,result_queue) + self.host = host + self.username = username + self.password = password + def run(self): + import telnetlib + self.tn = tn = telnetlib.Telnet(self.host) + tn.read_until("login: ") + tn.write(self.username + "\n") + tn.read_until("Password: ") + tn.write(self.password + "\n") + + # XXX: how to know whether login is successful? + tn.read_until(self.username) + # + print('login ok', self.host) + tn.write("cd "+os.getcwd()+"\n") + Worker.run(self) + tn.write("exit\n") + def run_one(self,c,g): + cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \ + (svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname) + result = self.tn.write(cmdline+'\n') + (idx,matchm,output) = self.tn.expect(['Cross.*\n']) + for line in output.split('\n'): + if str(line).find("Cross") != -1: + return float(line.split()[-1][0:-1]) def bits(n): - while n: - b = n & (~n+1) - yield b - n ^= b - + while n: + b = n & (~n+1) + yield b + n ^= b + ############################################################################### # Learning Interface ############################################################################### @@ -255,93 +255,93 @@ def bits(n): ALL = sum(2**i for i in range(3)) def train(model_filename, type=ALL): - for t in bits(type): - if t == SVM: - filename = model_filename + ".svm" - command = [svm_train, "-c", "50", "-g", "0.03", "-w0", "0.5", filename, filename + ".trained"] - - if t == LIN: - filename = model_filename + ".lin" - command = [lin_train, "-c", "50", "-w0", "0.5", filename, filename + ".trained"] - - if t == CRF: - filename = model_filename + ".crf" - command = [crf_suite, "learn", "-m", filename + ".trained", filename] - - output, error = Popen(command, stdout = PIPE, stderr = PIPE).communicate() - #print output - #print error - + for t in bits(type): + if t == SVM: + filename = model_filename + ".svm" + command = [svm_train, "-c", "50", "-g", "0.03", "-w0", "0.5", filename, filename + ".trained"] + + if t == LIN: + filename = model_filename + ".lin" + command = [lin_train, "-c", "50", "-w0", "0.5", filename, filename + ".trained"] + + if t == CRF: + filename = model_filename + ".crf" + command = [crf_suite, "learn", "-m", filename + ".trained", filename] + + output, error = Popen(command, stdout = PIPE, stderr = PIPE).communicate() + #print output + #print error + def predict(model_filename, type=ALL): - for t in bits(type): - if t == SVM: - filename = model_filename + ".svm" - command = [svm_predict, filename + ".test.in", filename + ".trained", filename + ".test.out"] - - if t == LIN: - filename = model_filename + ".lin" - command = [lin_predict, filename + ".test.in", filename + ".trained", filename + ".test.out"] - - if t == CRF: - filename = model_filename + ".crf" - command = [crf_suite, "tag", "-m", filename + ".trained" , filename + ".test.in"] # NEEDS OUTPUT - - output, error = Popen(command, stdout = PIPE, stderr = PIPE).communicate() - - if t == CRF: - with open(filename + ".test.out", "w") as f: - for line in output.split(): - f.write(line + "\n") - + for t in bits(type): + if t == SVM: + filename = model_filename + ".svm" + command = [svm_predict, filename + ".test.in", filename + ".trained", filename + ".test.out"] + + if t == LIN: + filename = model_filename + ".lin" + command = [lin_predict, filename + ".test.in", filename + ".trained", filename + ".test.out"] + + if t == CRF: + filename = model_filename + ".crf" + command = [crf_suite, "tag", "-m", filename + ".trained" , filename + ".test.in"] # NEEDS OUTPUT + + output, error = Popen(command, stdout = PIPE, stderr = PIPE).communicate() + + if t == CRF: + with open(filename + ".test.out", "w") as f: + for line in output.split(): + f.write(line + "\n") + def write_features(model_filename, rows, labels, type=ALL): - for t in bits(type): - if t == SVM: - file_suffix = ".svm" + (".test.in" if not labels else "") - null_label, feature_sep, sentence_sep = "-1", ":", "" - - if t == LIN: - file_suffix = ".lin" + (".test.in" if not labels else "") - null_label, feature_sep, sentence_sep = "-1", ":", "" - - if t == CRF: - file_suffix = ".crf" + (".test.in" if not labels else "") - null_label, feature_sep, sentence_sep = "", "=", "\n" - - filename = model_filename + file_suffix - with open(filename, "w") as f: - for sentence_index, sentence in enumerate(rows): - if labels: - sentence_labels = labels[sentence_index] - assert "Dimension mismatch", len(sentence) == len(sentence_labels) - - for word_index, features in enumerate(sentence): - if labels: - label = sentence_labels[word_index] - line = [str(label)] - else: - line = [null_label] - - for k,v in sorted(features.items()): - line.append(str(k) + feature_sep + str(v)) - - f.write("\t".join(line).strip() + "\n") - - f.write(sentence_sep) - + for t in bits(type): + if t == SVM: + file_suffix = ".svm" + (".test.in" if not labels else "") + null_label, feature_sep, sentence_sep = "-1", ":", "" + + if t == LIN: + file_suffix = ".lin" + (".test.in" if not labels else "") + null_label, feature_sep, sentence_sep = "-1", ":", "" + + if t == CRF: + file_suffix = ".crf" + (".test.in" if not labels else "") + null_label, feature_sep, sentence_sep = "", "=", "\n" + + filename = model_filename + file_suffix + with open(filename, "w") as f: + for sentence_index, sentence in enumerate(rows): + if labels: + sentence_labels = labels[sentence_index] + assert "Dimension mismatch", len(sentence) == len(sentence_labels) + + for word_index, features in enumerate(sentence): + if labels: + label = sentence_labels[word_index] + line = [str(label)] + else: + line = [null_label] + + for k,v in sorted(features.items()): + line.append(str(k) + feature_sep + str(v)) + + f.write("\t".join(line).strip() + "\n") + + f.write(sentence_sep) + def read_labels(model_filename, type=ALL): - labels = {} - for t in bits(type): - if t == SVM: - filename = model_filename + ".svm.test.out" - - if t == LIN: - filename = model_filename + ".lin.test.out" - - if t == CRF: - filename = model_filename + ".crf.test.out" - - with open(filename) as f: - lines = f.readlines() - labels[t] = [line.strip() for line in lines] - - return labels \ No newline at end of file + labels = {} + for t in bits(type): + if t == SVM: + filename = model_filename + ".svm.test.out" + + if t == LIN: + filename = model_filename + ".lin.test.out" + + if t == CRF: + filename = model_filename + ".crf.test.out" + + with open(filename) as f: + lines = f.readlines() + labels[t] = [line.strip() for line in lines] + + return labels \ No newline at end of file diff --git a/code/model.py b/code/model.py index 002b162..782b462 100644 --- a/code/model.py +++ b/code/model.py @@ -19,358 +19,356 @@ import libml class Model: - sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) - word_features = ImmutableSet(["word", "length", "mitre", "stem_porter", "stem_lancaster", "word_shape"]) - # THESE ARE FEATURES I TRIED THAT DON'T LOOK THAT PROMISING - # I have some faith in "metric_unit" and "has_problem_form" - # "radial_loc" may be too rare and "def_class" could be over fitting - # "metric_unit", "radial_loc", "has_problem_form", "def_class" - - labels = { - "none":0, - "treatment":1, - "problem":2, - "test":3 - } - reverse_labels = {v:k for k, v in labels.items()} - - @staticmethod - def load(filename='awesome.model'): - with open(filename) as model: - model = pickle.load(model) - model.filename = filename - return model + sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) + word_features = ImmutableSet(["word", "length", "mitre", "stem_porter", "stem_lancaster", "word_shape"]) + # THESE ARE FEATURES I TRIED THAT DON'T LOOK THAT PROMISING + # I have some faith in "metric_unit" and "has_problem_form" + # "radial_loc" may be too rare and "def_class" could be over fitting + # "metric_unit", "radial_loc", "has_problem_form", "def_class" + + labels = { + "none": 0, + "treatment": 1, + "problem": 2, + "test": 3 + } + reverse_labels = {v: k for k, v in labels.items()} + + @staticmethod + def load(filename='awesome.model'): + with open(filename) as model: + model = pickle.load(model) + model.filename = filename + return model # Constructor - def __init__(self, filename='awesome.model', type=libml.ALL): - model_directory = os.path.dirname(filename) - - if model_directory != "": - helper.mkpath(model_directory) - - self.filename = os.path.realpath(filename) - self.type = type - self.vocab = {} - - self.enabled_features = Model.sentence_features | Model.word_features - - + def __init__(self, filename='awesome.model', type=libml.ALL): + model_directory = os.path.dirname(filename) + + if model_directory != "": + helper.mkpath(model_directory) + + self.filename = os.path.realpath(filename) + self.type = type + self.vocab = {} + + self.enabled_features = Model.sentence_features | Model.word_features + + # Model::train() # # @param note. A Note object that has data for training the model - def train(self, note): + def train(self, note): - # Get the data and annotations from the Note object + # Get the data and annotations from the Note object - # data - A list of list of the medical text's words - # labels - A list of list of concepts (1:1 with data) - data = note.txtlist() - labels = note.conlist() + # data - A list of list of the medical text's words + # labels - A list of list of concepts (1:1 with data) + data = note.txtlist() + labels = note.conlist() # rows is a list of a list of hash tables - rows = [] - for sentence in data: - rows.append(self.features_for_sentence(sentence)) - + rows = [] + for sentence in data: + rows.append(self.features_for_sentence(sentence)) + # each list of hash tables - for row in rows: + for row in rows: # each hash table - for features in row: + for features in row: # each key in hash table - for feature in features: + for feature in features: # I think new word encountered - if feature not in self.vocab: - self.vocab[feature] = len(self.vocab) + 1 + if feature not in self.vocab: + self.vocab[feature] = len(self.vocab) + 1 # A list of a list encodings of concept labels (ex. 'none' => 0) # [ [0, 0, 0], [0], [0, 0, 0], [0], [0, 0, 0, 0, 0, 2, 2, 0, 1] ] - label_lu = lambda l: Model.labels[l] - labels = [map(label_lu, x) for x in labels] + label_lu = lambda l: Model.labels[l] + labels = [map(label_lu, x) for x in labels] + - # list of a list of hash tables (all keys & values now numbers) - feat_lu = lambda f: {self.vocab[item]:f[item] for item in f} - rows = [map(feat_lu, x) for x in rows] - + feat_lu = lambda f: {self.vocab[item]: f[item] for item in f} + rows = [map(feat_lu, x) for x in rows] + - libml.write_features(self.filename, rows, labels, self.type) + libml.write_features(self.filename, rows, labels, self.type) - with open(self.filename, "w") as model: - pickle.dump(self, model) + with open(self.filename, "w") as model: + pickle.dump(self, model) # Train the model - libml.train(self.filename, self.type) + libml.train(self.filename, self.type) + - # Model::predict() # # @param note. A Note object that contains the training data - def predict(self, note): + def predict(self, note): - # data - A list of list of the medical text's words - data = note.txtlist() - + # data - A list of list of the medical text's words + data = note.txtlist() - # Something to do with calibrating the model - rows = [] # rows <- list of a list of hash tables (feature vectors) - for sentence in data: - rows.append(self.features_for_sentence(sentence)) + # Something to do with calibrating the model + rows = [] # rows <- list of a list of hash tables (feature vectors) + for sentence in data: + rows.append(self.features_for_sentence(sentence)) - feat_lu = lambda f: {self.vocab[item]:f[item] for item in f if item in self.vocab} - rows = [map(feat_lu, x) for x in rows] - libml.write_features(self.filename, rows, None, self.type); + feat_lu = lambda f: {self.vocab[item]: f[item] for item in f if item in self.vocab} + rows = [map(feat_lu, x) for x in rows] + libml.write_features(self.filename, rows, None, self.type) # Use the trained model to make predictions - libml.predict(self.filename, self.type) + libml.predict(self.filename, self.type) + - # A hash table - # the keys are 1,2,4 (SVM, LIN, and CRF) - # each value is a list of concept labels encodings - labels_list = libml.read_labels(self.filename, self.type) - + # the keys are 1,2,4 (SVM, LIN, and CRF) + # each value is a list of concept labels encodings + labels_list = libml.read_labels(self.filename, self.type) + # translate labels_list into a readable format # ex. change all occurences of 0 -> 'none' - for t, labels in labels_list.items(): - tmp = [] - for sentence in data: - tmp.append([labels.pop(0) for i in range(len(sentence))]) - tmp[-1] = map(lambda l: l.strip(), tmp[-1]) - tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) - labels_list[t] = tmp + for t, labels in labels_list.items(): + tmp = [] + for sentence in data: + tmp.append([labels.pop(0) for i in range(len(sentence))]) + tmp[-1] = map(lambda l: l.strip(), tmp[-1]) + tmp[-1] = map(lambda l: Model.reverse_labels[int(l)], tmp[-1]) + labels_list[t] = tmp # The new labels_list is a translated version - return labels_list + return labels_list # input: A sentence from a medical text file (list of words) # output: A list of hash tables - def features_for_sentence(self, sentence): - features_list = [] - - for word in sentence: - features_list.append(self.features_for_word(word)) - - tags = None - for feature in Model.sentence_features: - if feature not in self.enabled_features: - continue - - if feature == "pos": - tags = tags or nltk.pos_tag(sentence) - for i, features in enumerate(features_list): - tag = tags[i][1] - features[(feature, tag)] = 1 - - if feature == "stem_wordnet": - tags = tags or nltk.pos_tag(sentence) - morphy_tags = { - 'NN':nltk.corpus.reader.wordnet.NOUN, - 'JJ':nltk.corpus.reader.wordnet.ADJ, - 'VB':nltk.corpus.reader.wordnet.VERB, - 'RB':nltk.corpus.reader.wordnet.ADV} - morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w,t in tags] - st = nltk.stem.WordNetLemmatizer() - for i, features in enumerate(features_list): - tag = morphy_tags[i] - features[(feature, st.lemmatize(*tag))] = 1 - - if feature == "test_result": - for index, features in enumerate(features_list): - right = " ".join([w for w in sentence[index:]]) - if self.is_test_result(right): - features[(feature, None)] = 1 - - - ngram_features = [{} for i in range(len(features_list))] - if "prev" in self.enabled_features: - prev = lambda f: {("prev_"+k[0], k[1]): v for k,v in f.items()} - prev_list = map(prev, features_list) - for i in range(len(features_list)): - if i == 0: - ngram_features[i][("prev", "*")] = 1 - else: - ngram_features[i].update(prev_list[i-1]) - - if "next" in self.enabled_features: - next = lambda f: {("next_"+k[0], k[1]): v for k,v in f.items()} - next_list = map(next, features_list) - for i in range(len(features_list)): - if i == len(features_list) - 1: - ngram_features[i][("next", "*")] = 1 - else: - ngram_features[i].update(next_list[i+1]) - - merged = lambda d1, d2: dict(d1.items() + d2.items()) - features_list = [merged(features_list[i], ngram_features[i]) - for i in range(len(features_list))] - - return features_list - - - - # input: a single word, like + def features_for_sentence(self, sentence): + features_list = [] + + for word in sentence: + features_list.append(self.features_for_word(word)) + + tags = None + for feature in Model.sentence_features: + if feature not in self.enabled_features: + continue + + if feature == "pos": + tags = tags or nltk.pos_tag(sentence) + for i, features in enumerate(features_list): + tag = tags[i][1] + features[(feature, tag)] = 1 + + if feature == "stem_wordnet": + tags = tags or nltk.pos_tag(sentence) + morphy_tags = { + 'NN': nltk.corpus.reader.wordnet.NOUN, + 'JJ': nltk.corpus.reader.wordnet.ADJ, + 'VB': nltk.corpus.reader.wordnet.VERB, + 'RB': nltk.corpus.reader.wordnet.ADV} + morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w, t in tags] + st = nltk.stem.WordNetLemmatizer() + for i, features in enumerate(features_list): + tag = morphy_tags[i] + features[(feature, st.lemmatize(*tag))] = 1 + + if feature == "test_result": + for index, features in enumerate(features_list): + right = " ".join([w for w in sentence[index:]]) + if self.is_test_result(right): + features[(feature, None)] = 1 + + + ngram_features = [{} for i in range(len(features_list))] + if "prev" in self.enabled_features: + prev = lambda f: {("prev_"+k[0], k[1]): v for k, v in f.items()} + prev_list = map(prev, features_list) + for i in range(len(features_list)): + if i == 0: + ngram_features[i][("prev", "*")] = 1 + else: + ngram_features[i].update(prev_list[i-1]) + + if "next" in self.enabled_features: + next = lambda f: {("next_"+k[0], k[1]): v for k, v in f.items()} + next_list = map(next, features_list) + for i in range(len(features_list)): + if i == len(features_list) - 1: + ngram_features[i][("next", "*")] = 1 + else: + ngram_features[i].update(next_list[i+1]) + + merged = lambda d1, d2: dict(d1.items() + d2.items()) + features_list = [merged(features_list[i], ngram_features[i]) + for i in range(len(features_list))] + + return features_list + + + + # input: a single word, like # Admission # output: A hash table of features # features include: word, length, mitre, stem_porter - def features_for_word(self, word): - features = {'dummy':1} # always have >0 dimensions + def features_for_word(self, word): + features = {'dummy': 1} # always have >0 dimensions # word_shape, word, length, mitre, stem_porter, stem_lancaster - for feature in Model.word_features: + for feature in Model.word_features: # word_shape, test_result, word, pos, next, length, stem_wordnet, mitre, stem_porter, prev, stem_lancaster - if feature not in self.enabled_features: - continue - - if feature == "word": - features[(feature, word)] = 1 - - if feature == "length": - features[(feature, None)] = len(word) - - if feature == "mitre": - for f in Model.mitre_features: - if re.search(Model.mitre_features[f], word): - features[(feature, f)] = 1 - - if feature == "stem_porter": - st = nltk.stem.PorterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_lancaster": - st = nltk.stem.LancasterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_snowball": - st = nltk.stem.SnowballStemmer("english") - #features[(feature, st.stem(word))] = 1 - - if feature == "word_shape": - wordShapes = getWordShapes(word) - for i, shape in enumerate(wordShapes): - features[(feature + str(i), shape)] = 1 - - if feature == "metric_unit": - unit = 0 - if self.is_weight(word): - unit = 1 - elif self.is_size(word): - unit = 2 - features[(feature, None)] = unit - - # look for prognosis locaiton - #if feature == "radial_loc": - # THIS MIGHT BE BUGGED - # if self.is_prognosis_location(word): - # features[(feature, None)] = 1 - - if feature == "has_problem_form": - if self.has_problem_form(word): - features[(feature, None)] = 1 - - if feature == "def_class": - features[(feature, None)] = self.get_def_class(word) - - return features - - mitre_features = { - "INITCAP" : r"^[A-Z].*$", - "ALLCAPS" : r"^[A-Z]+$", - "CAPSMIX" : r"^[A-Za-z]+$", - "HASDIGIT" : r"^.*[0-9].*$", - "SINGLEDIGIT" : r"^[0-9]$", - "DOUBLEDIGIT" : r"^[0-9][0-9]$", - "FOURDIGITS" : r"^[0-9][0-9][0-9][0-9]$", - "NATURALNUM" : r"^[0-9]+$", - "REALNUM" : r"^[0-9]+.[0-9]+$", - "ALPHANUM" : r"^[0-9A-Za-z]+$", - "HASDASH" : r"^.*-.*$", - "PUNCTUATION" : r"^[^A-Za-z0-9]+$", - "PHONE1" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "PHONE2" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "FIVEDIGIT" : r"^[0-9][0-9][0-9][0-9][0-9]", - "NOVOWELS" : r"^[^AaEeIiOoUu]+$", - "HASDASHNUMALPHA" : r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", - "DATESEPERATOR" : r"^[-/]$", - } - - def is_test_result (self, context): - # note: make spaces optional? - regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" - if not re.search(regex, context): - return re.search(r"^[A-Za-z]+ was (positive|negative)", context) - return True - - def is_weight (self, word): - regex = r"^[0-9]*(mg|g|milligrams|grams)$" - return re.search(regex, word) - - def is_size (self, word): - regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" - return re.search(regex, word) - - def is_prognosis_location (self, word): - regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" - return re.search(regex, word) - - def has_problem_form (self, word): - regex = r".*(ic|is)$" - return re.search(regex, word) - - # checks for a definitive classification at the word level - def get_def_class (self, word): - test_terms = { - "eval", "evaluation", "evaluations", - "sat", "sats", "saturation", - "exam", "exams", - "rate", "rates", - "test", "tests", - "xray", "xrays", - "screen", "screens", - "level", "levels", - "tox" - } - problem_terms = { - "swelling", - "wound", "wounds", - "symptom", "symptoms", - "shifts", "failure", - "insufficiency", "insufficiencies", - "mass", "masses", - "aneurysm", "aneurysms", - "ulcer", "ulcers", - "trama", "cancer", - "disease", "diseased", - "bacterial", "viral", - "syndrome", "syndromes", - "pain", "pains" - "burns", "burned", - "broken", "fractured" - } - treatment_terms = { - "therapy", - "replacement", - "anesthesia", - "supplement", "supplemental", - "vaccine", "vaccines" - "dose", "doses", - "shot", "shots", - "medication", "medicine", - "treament", "treatments" - } - if word.lower() in test_terms: - return 1 - elif word.lower() in problem_terms: - return 2 - elif word.lower() in treatment_terms: - return 3 - return 0 - + if feature not in self.enabled_features: + continue + + if feature == "word": + features[(feature, word)] = 1 + + if feature == "length": + features[(feature, None)] = len(word) + + if feature == "mitre": + for f in Model.mitre_features: + if re.search(Model.mitre_features[f], word): + features[(feature, f)] = 1 + + if feature == "stem_porter": + st = nltk.stem.PorterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_lancaster": + st = nltk.stem.LancasterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_snowball": + st = nltk.stem.SnowballStemmer("english") + #features[(feature, st.stem(word))] = 1 + + if feature == "word_shape": + wordShapes = getWordShapes(word) + for i, shape in enumerate(wordShapes): + features[(feature + str(i), shape)] = 1 + + if feature == "metric_unit": + unit = 0 + if self.is_weight(word): + unit = 1 + elif self.is_size(word): + unit = 2 + features[(feature, None)] = unit + + # look for prognosis locaiton + #if feature == "radial_loc": + # THIS MIGHT BE BUGGED + # if self.is_prognosis_location(word): + # features[(feature, None)] = 1 + + if feature == "has_problem_form": + if self.has_problem_form(word): + features[(feature, None)] = 1 + + if feature == "def_class": + features[(feature, None)] = self.get_def_class(word) + + return features + + mitre_features = { + "INITCAP": r"^[A-Z].*$", + "ALLCAPS": r"^[A-Z]+$", + "CAPSMIX": r"^[A-Za-z]+$", + "HASDIGIT": r"^.*[0-9].*$", + "SINGLEDIGIT": r"^[0-9]$", + "DOUBLEDIGIT": r"^[0-9][0-9]$", + "FOURDIGITS": r"^[0-9][0-9][0-9][0-9]$", + "NATURALNUM": r"^[0-9]+$", + "REALNUM": r"^[0-9]+.[0-9]+$", + "ALPHANUM": r"^[0-9A-Za-z]+$", + "HASDASH": r"^.*-.*$", + "PUNCTUATION": r"^[^A-Za-z0-9]+$", + "PHONE1": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "PHONE2": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "FIVEDIGIT": r"^[0-9][0-9][0-9][0-9][0-9]", + "NOVOWELS": r"^[^AaEeIiOoUu]+$", + "HASDASHNUMALPHA": r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", + "DATESEPERATOR": r"^[-/]$", + } + + def is_test_result(self, context): + # note: make spaces optional? + regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" + if not re.search(regex, context): + return re.search(r"^[A-Za-z]+ was (positive|negative)", context) + return True + + def is_weight(self, word): + regex = r"^[0-9]*(mg|g|milligrams|grams)$" + return re.search(regex, word) + + def is_size(self, word): + regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" + return re.search(regex, word) + + def is_prognosis_location(self, word): + regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" + return re.search(regex, word) + + def has_problem_form(self, word): + regex = r".*(ic|is)$" + return re.search(regex, word) + + # checks for a definitive classification at the word level + def get_def_class(self, word): + test_terms = { + "eval", "evaluation", "evaluations", + "sat", "sats", "saturation", + "exam", "exams", + "rate", "rates", + "test", "tests", + "xray", "xrays", + "screen", "screens", + "level", "levels", + "tox" + } + problem_terms = { + "swelling", + "wound", "wounds", + "symptom", "symptoms", + "shifts", "failure", + "insufficiency", "insufficiencies", + "mass", "masses", + "aneurysm", "aneurysms", + "ulcer", "ulcers", + "trama", "cancer", + "disease", "diseased", + "bacterial", "viral", + "syndrome", "syndromes", + "pain", "pains" + "burns", "burned", + "broken", "fractured" + } + treatment_terms = { + "therapy", + "replacement", + "anesthesia", + "supplement", "supplemental", + "vaccine", "vaccines" + "dose", "doses", + "shot", "shots", + "medication", "medicine", + "treament", "treatments" + } + if word.lower() in test_terms: + return 1 + elif word.lower() in problem_terms: + return 2 + elif word.lower() in treatment_terms: + return 3 + return 0 diff --git a/code/predict.py b/code/predict.py index 83f92bb..77737e2 100644 --- a/code/predict.py +++ b/code/predict.py @@ -11,42 +11,42 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("-i", - dest = "input", - help = "The input files to predict", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') + parser.add_argument("-i", + dest = "input", + help = "The input files to predict", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') ) - parser.add_argument("-o", - dest = "output", - help = "The directory to write the output", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') + parser.add_argument("-o", + dest = "output", + help = "The directory to write the output", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') ) parser.add_argument("-m", - dest = "model", - help = "The model to use for prediction", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') + dest = "model", + help = "The model to use for prediction", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') ) - + parser.add_argument("--no-svm", - dest = "no_svm", - action = "store_true", - help = "Disable SVM model generation", + dest = "no_svm", + action = "store_true", + help = "Disable SVM model generation", ) parser.add_argument("--no-lin", - dest = "no_lin", - action = "store_true", - help = "Disable LIN model generation", + dest = "no_lin", + action = "store_true", + help = "Disable LIN model generation", ) parser.add_argument("--no-crf", - dest = "no_crf", - action = "store_true", - help = "Disable CRF model generation", + dest = "no_crf", + action = "store_true", + help = "Disable CRF model generation", ) - + args = parser.parse_args() # Locate the test files @@ -59,45 +59,45 @@ def main(): # Determine what type of models to use (ex SVM vs. CRF) model = Model.load(args.model) if args.no_svm: - model.type &= ~libml.SVM + model.type &= ~libml.SVM if args.no_lin: - model.type &= ~libml.LIN + model.type &= ~libml.LIN if args.no_crf: - model.type &= ~libml.CRF - + model.type &= ~libml.CRF + for txt in files: - # Read the data into a Note object + # Read the data into a Note object note = Note() note.read_i2b2(txt) #note.read_plain(txt) # TEMP - in case of plain format # Use the model to predict the concept labels - labels = model.predict(note) + labels = model.predict(note) # labels (above) is a hash table # the keys are 1,2,4 (SVM, LIN, and CRF) # each value is a list of concept labels, like from the Note class - con = os.path.split(txt)[-1] - con = con[:-3] + 'con' - - for t in libml.bits(model.type): - if t == libml.SVM: - helper.mkpath(os.path.join(args.output, "svm")) - con_path = os.path.join(path, "svm", con) - if t == libml.LIN: - helper.mkpath(os.path.join(args.output, "lin")) - con_path = os.path.join(path, "lin", con) - if t == libml.CRF: - helper.mkpath(os.path.join(args.output, "crf")) - con_path = os.path.join(path, "crf", con) - + con = os.path.split(txt)[-1] + con = con[:-3] + 'con' + + for t in libml.bits(model.type): + if t == libml.SVM: + helper.mkpath(os.path.join(args.output, "svm")) + con_path = os.path.join(path, "svm", con) + if t == libml.LIN: + helper.mkpath(os.path.join(args.output, "lin")) + con_path = os.path.join(path, "lin", con) + if t == libml.CRF: + helper.mkpath(os.path.join(args.output, "crf")) + con_path = os.path.join(path, "crf", con) + # Output the concept predictions - note.write_i2b2(con_path, labels[t]) - #note.write_plain(con_path, labels[t]) # in case of plain format + note.write_i2b2(con_path, labels[t]) + #note.write_plain(con_path, labels[t]) # in case of plain format if __name__ == '__main__': diff --git a/code/runall.py b/code/runall.py index dd32a90..733b55a 100644 --- a/code/runall.py +++ b/code/runall.py @@ -35,54 +35,54 @@ # Calculate commands def get_cmds(function): - commands = [] - for i in range(1,n+1): - for c in itertools.combinations(features, i): - # commands from single feature up - cmd_features = [f if f in c else 'X' for f in features] - commands.append(function(cmd_features)) - - # commands from full feature down - cmd_features = ['X' if f in c else f for f in features] - commands.append(function(cmd_features)) - - # commands for full feature only - commands.append(function(features)) - return commands + commands = [] + for i in range(1, n+1): + for c in itertools.combinations(features, i): + # commands from single feature up + cmd_features = [f if f in c else 'X' for f in features] + commands.append(function(cmd_features)) + + # commands from full feature down + cmd_features = ['X' if f in c else f for f in features] + commands.append(function(cmd_features)) + + # commands for full feature only + commands.append(function(features)) + return commands # Drive process execution def execute(commands, sleep=1): - ps = [] - for cmd in commands: - p = Popen(cmd) - ps.append(p) - print "Done" - - was_finished = -1 - while True: - ps_status = [p.poll() for p in ps] - now_finished = len(ps_status) - ps_status.count(None) - if now_finished > was_finished: - print "\tCompleted: %d/%d\r" % (now_finished, len(ps_status)), - sys.stdout.flush() - if now_finished == len(ps_status): - break - was_finished = now_finished - time.sleep(sleep) + ps = [] + for cmd in commands: + p = Popen(cmd) + ps.append(p) + print "Done" + + was_finished = -1 + while True: + ps_status = [p.poll() for p in ps] + now_finished = len(ps_status) - ps_status.count(None) + if now_finished > was_finished: + print "\tCompleted: %d/%d\r" % (now_finished, len(ps_status)), + sys.stdout.flush() + if now_finished == len(ps_status): + break + was_finished = now_finished + time.sleep(sleep) ############################################################################### # Train ############################################################################### def train(cmd_features): - modelname = os.path.join(model_path, "-".join(cmd_features), "model") - cmd = ["python", our_train, "-m", modelname] - cmd += ["--no-svm"] - cmd += ["-e"] + [f for f in cmd_features if f != 'X'] - if _TEST: - cmd += ["-t", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "txt", "record-105.txt")] - cmd += ["-c", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "concept", "record-105.con")] - return cmd - + modelname = os.path.join(model_path, "-".join(cmd_features), "model") + cmd = ["python", our_train, "-m", modelname] + cmd += ["--no-svm"] + cmd += ["-e"] + [f for f in cmd_features if f != 'X'] + if _TEST: + cmd += ["-t", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "txt", "record-105.txt")] + cmd += ["-c", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "concept", "record-105.con")] + return cmd + print "BEGIN train" print "\tCalculating training commands...", commands = get_cmds(train) @@ -97,15 +97,15 @@ def train(cmd_features): # Predict ############################################################################### def predict(cmd_features): - modelname = os.path.join(model_path, "-".join(cmd_features), "model") - cmd = ["python", our_predict, "-m", modelname] - cmd += ["-o", os.path.join(model_path, "-".join(cmd_features), "test_predictions")] - if _TEST: - cmd += ["-i", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "txt", "record-105.txt")] - else: - cmd += ["-i", os.path.join(data_path, "test_data", "*")] - return cmd - + modelname = os.path.join(model_path, "-".join(cmd_features), "model") + cmd = ["python", our_predict, "-m", modelname] + cmd += ["-o", os.path.join(model_path, "-".join(cmd_features), "test_predictions")] + if _TEST: + cmd += ["-i", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "txt", "record-105.txt")] + else: + cmd += ["-i", os.path.join(data_path, "test_data", "*")] + return cmd + print "BEGIN predict" print "\tCalculating prediction commands...", commands = get_cmds(predict) @@ -120,17 +120,17 @@ def predict(cmd_features): # Evaluate ############################################################################### def evaluate(cmd_features): - cmd = ["python", our_evaluate] - cmd += ["-c", os.path.join(model_path, "-".join(cmd_features), "test_predictions")] - cmd += ["-o", os.path.join(model_path, "-".join(cmd_features), "evaluation.txt")] - if _TEST: - cmd += ["-t", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "txt", "record-105.txt")] - cmd += ["-r", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "concept")] - else: - cmd += ["-t", os.path.join(data_path, "test_data", "*")] - cmd += ["-r", os.path.join(data_path, "reference_standard_for_test_data", "concepts")] - return cmd - + cmd = ["python", our_evaluate] + cmd += ["-c", os.path.join(model_path, "-".join(cmd_features), "test_predictions")] + cmd += ["-o", os.path.join(model_path, "-".join(cmd_features), "evaluation.txt")] + if _TEST: + cmd += ["-t", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "txt", "record-105.txt")] + cmd += ["-r", os.path.join(data_path, "concept_assertion_relation_training_data", "merged", "concept")] + else: + cmd += ["-t", os.path.join(data_path, "test_data", "*")] + cmd += ["-r", os.path.join(data_path, "reference_standard_for_test_data", "concepts")] + return cmd + print "BEGIN evaluate" print "\tCalculating evaluation commands...", commands = get_cmds(evaluate) @@ -139,4 +139,4 @@ def evaluate(cmd_features): execute(commands) print "\tCompleted evaluation." print "END evaluate" -print \ No newline at end of file +print diff --git a/code/statistics.py b/code/statistics.py index 4cb0285..7aa86d2 100644 --- a/code/statistics.py +++ b/code/statistics.py @@ -7,40 +7,40 @@ from note import * + def main(): - data_paths = { - 'test': ('../data/test_data/*', '../data/reference_standard_for_test_data/concepts/'), - 'train': ('../data/concept_assertion_relation_training_data/merged/txt/*', '../data/concept_assertion_relation_training_data/merged/concept') - } - - for type, paths in data_paths.items(): - full_path = lambda f: os.path.join(os.path.dirname(os.path.realpath(__file__)), f) - args_txt = full_path(paths[0]) - args_ref = full_path(paths[1]) - - txt_files = glob.glob(args_txt) - ref_files = os.listdir(args_ref) - ref_files = map(lambda f: os.path.join(args_ref, f), ref_files) - - txt_files_map = helper.map_files(txt_files) - ref_files_map = helper.map_files(ref_files) - - files = [] - for k in txt_files_map: - if k in ref_files_map: - files.append((txt_files_map[k], ref_files_map[k])) - - labels = {} - for txt, ref in files: - txt = read_txt(txt) - for r in read_con(ref, txt): - for r in r: - if r not in labels: - labels[r] = 0 - labels[r] += 1 - - print type, labels - + data_paths = { + 'test': ('../data/test_data/*', '../data/reference_standard_for_test_data/concepts/'), + 'train': ('../data/concept_assertion_relation_training_data/merged/txt/*', '../data/concept_assertion_relation_training_data/merged/concept') + } + + for type, paths in data_paths.items(): + full_path = lambda f: os.path.join(os.path.dirname(os.path.realpath(__file__)), f) + args_txt = full_path(paths[0]) + args_ref = full_path(paths[1]) + + txt_files = glob.glob(args_txt) + ref_files = os.listdir(args_ref) + ref_files = map(lambda f: os.path.join(args_ref, f), ref_files) + + txt_files_map = helper.map_files(txt_files) + ref_files_map = helper.map_files(ref_files) + + files = [] + for k in txt_files_map: + if k in ref_files_map: + files.append((txt_files_map[k], ref_files_map[k])) + + labels = {} + for txt, ref in files: + txt = read_txt(txt) + for r in read_con(ref, txt): + for r in r: + if r not in labels: + labels[r] = 0 + labels[r] += 1 + + print type, labels + if __name__ == "__main__": - main() - \ No newline at end of file + main() diff --git a/code/train.py b/code/train.py index aee4d07..6ab964f 100644 --- a/code/train.py +++ b/code/train.py @@ -1,6 +1,5 @@ import os import os.path -import sys import glob import argparse import helper @@ -10,60 +9,61 @@ from model import Model from note import * + def main(): parser = argparse.ArgumentParser() - parser.add_argument("-t", - dest = "txt", - help = "The files that contain the training examples", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/txt/record-33.txt') + parser.add_argument("-t", + dest = "txt", + help = "The files that contain the training examples", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/txt/record-33.txt') ) - - parser.add_argument("-c", - dest = "con", - help = "The files that contain the labels for the training examples", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/concept/record-33.txt') + + parser.add_argument("-c", + dest = "con", + help = "The files that contain the labels for the training examples", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/concept/record-33.txt') ) - + parser.add_argument("-m", - dest = "model", - help = "Path to the model that should be generated", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') + dest = "model", + help = "Path to the model that should be generated", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') ) parser.add_argument("-d", - dest = "disabled_features", - help = "The features that should not be used", - nargs = "+", - default = None + dest = "disabled_features", + help = "The features that should not be used", + nargs = "+", + default = None ) parser.add_argument("-e", - dest = "enabled_features", - help = "The features that should be used. This option trumps -d", - nargs = "+", - default = None + dest = "enabled_features", + help = "The features that should be used. This option trumps -d", + nargs = "+", + default = None ) parser.add_argument("--no-svm", - dest = "no_svm", - action = "store_true", - help = "Disable SVM model generation", + dest = "no_svm", + action = "store_true", + help = "Disable SVM model generation", ) parser.add_argument("--no-lin", - dest = "no_lin", - action = "store_true", - help = "Disable LIN model generation", + dest = "no_lin", + action = "store_true", + help = "Disable LIN model generation", ) parser.add_argument("--no-crf", - dest = "no_crf", - action = "store_true", - help = "Disable CRF model generation", + dest = "no_crf", + action = "store_true", + help = "Disable CRF model generation", ) @@ -86,23 +86,21 @@ def main(): # ex. training_list = [ ('record-13.txt', 'record-13.con') ] training_list = [] for k in txt_files_map: - if k in con_files_map: - training_list.append((txt_files_map[k], con_files_map[k])) + if k in con_files_map: + training_list.append((txt_files_map[k], con_files_map[k])) # TEMP - useful for when I was reading in XML files #training_list.append(txt_files_map[k]) - # What kind of model should be used? (ex. SVM vs. CRF) type = 0 if not args.no_svm: - type = type | libml.SVM + type = type | libml.SVM if not args.no_lin: - type = type | libml.LIN + type = type | libml.LIN if not args.no_crf: - type = type | libml.CRF - + type = type | libml.CRF # Read the data into a Note object @@ -115,16 +113,16 @@ def main(): note_tmp = Note() # Create Note note_tmp.read_i2b2(txt, con) # Read data into Note - notes.append( note_tmp ) # Add the Note to the list + notes.append(note_tmp) # Add the Note to the list # Create a Machine Learning model model = Model(filename = args.model, type = type) - + if args.disabled_features != None: - model.enabled_features = model.enabled_features - Set(args.disabled_features) + model.enabled_features = model.enabled_features - Set(args.disabled_features) if args.enabled_features != None: - model.enabled_features = Set(args.enabled_features) + model.enabled_features = Set(args.enabled_features) # Train the model using the Note's data diff --git a/code/wordshape.py b/code/wordshape.py index 57d46cc..b536f49 100644 --- a/code/wordshape.py +++ b/code/wordshape.py @@ -1,7 +1,7 @@ import re from sets import Set -BOUNDARY_SIZE = 2; +BOUNDARY_SIZE = 2 NOWORDSHAPE = -1 WORDSHAPEDAN1 = 0 @@ -20,6 +20,7 @@ greek = ["alpha", "beta", "gamma", "delta", "epsilon", "zeta", "theta", "iota", "kappa", "lambda", "omicron", "rho", "sigma", "tau", "upsilon", "omega"] biogreek = r"alpha|beta|gamma|delta|epsilon|zeta|theta|iota|kappa|lambda|omicron|rho|sigma|tau|upsilon|omega" + def lookupShaper(name): if name is None: return NOWORDSHAPE @@ -50,16 +51,19 @@ def lookupShaper(name): else: return NOWORDSHAPE + def dontUseLC(shape): return shape == WORDSHAPEDAN2 or shape == WORDSHAPEDAN2BIO or shape == WORDSHAPEJENNY1 or shape == WORDSHAPECHRIS2 or shape == WORDSHAPECHRIS3 + def wordShape(inStr, wordShaper): return wordShape(inStr, wordShaper, None) -def wordShape(inStr, wordShaper, knownLCWords): + +def wordShape(inStr, wordShaper, knownLCWords): if knownLCWords is not None and dontUseLC(wordShaper): knownLCWords = None - + if wordShaper == NOWORDSHAPE: return inStr elif wordShaper == WORDSHAPEDAN1: @@ -87,6 +91,7 @@ def wordShape(inStr, wordShaper, knownLCWords): elif wordShaper == WORDSHAPECHRIS3USELC: return wordShapeChris2(inStr, True, knownLCWords) + def wordShapeDan1(s): digit = True upper = True @@ -122,17 +127,17 @@ def wordShapeDan2(s, knownLCWords): for c in s: m = c if c.isdigit(): - m = 'd' + m = 'd' if c.islower() or c == '_': - m = 'x' + m = 'x' if c.isupper(): - m = 'X' + m = 'X' if m != 'x' and m != 'X': nonLetters = True if m != lastM: sb += m lastM = m - + if length <= 3: sb += ':' + str(length) @@ -140,7 +145,8 @@ def wordShapeDan2(s, knownLCWords): if not nonLetters and knownLCWords.contains(s.lower()): sb += 'k' return sb - + + def wordShapeJenny1(s): sb = "WT-" lastM = '~' @@ -159,18 +165,18 @@ def wordShapeJenny1(s): for gr in greek: if s.startswith(gr): - m = 'g' - i = i + len(gr) - 1 - break + m = 'g' + i = i + len(gr) - 1 + break if m != 'x' and m != 'X': nonLetters = True - + if m != lastM: sb += m lastM = m - + if length <= 3: sb += ':' + str(length) @@ -179,47 +185,49 @@ def wordShapeJenny1(s): # sb += 'k' return sb + def wordShapeChris2(s, omitIfInBoundary, knownLCWords): - length = len(s) - if length <= BOUNDARY_SIZE * 2: - return wordShapeChris2Short(s, length, knownLCWords) - else: - return wordShapeChris2Long(s, omitIfInBoundary, length, knownLCWords) + length = len(s) + if length <= BOUNDARY_SIZE * 2: + return wordShapeChris2Short(s, length, knownLCWords) + else: + return wordShapeChris2Long(s, omitIfInBoundary, length, knownLCWords) + def wordShapeChris2Short(s, length, knownLCWords): - sblength = length - sb = "" - - if knownLCWords is not None: - sblength = length + 1 - - nonLetters = False - - for i in range (0, length): - c = s[i] - m = c - if c.isdigit(): - m = 'd' - if c.islower(): - m = 'x' - if c.isupper() or c.istitle(): - m = 'X' - - for gr in greek: - if s.startswith(gr): - m = 'g' - i = i + len(gr) - 1 - break - - if m != 'x' and m != 'X': - nonLetters = True - - sb += m - - if knownLCWords is not None: - if not nonLetters and knownLCWords.contains(s.lower()): - sb += 'k' - return sb + sblength = length + sb = "" + + if knownLCWords is not None: + sblength = length + 1 + + nonLetters = False + + for i in range (0, length): + c = s[i] + m = c + if c.isdigit(): + m = 'd' + if c.islower(): + m = 'x' + if c.isupper() or c.istitle(): + m = 'X' + + for gr in greek: + if s.startswith(gr): + m = 'g' + i = i + len(gr) - 1 + break + + if m != 'x' and m != 'X': + nonLetters = True + + sb += m + + if knownLCWords is not None: + if not nonLetters and knownLCWords.contains(s.lower()): + sb += 'k' + return sb def wordShapeChris2Long (s, omitIfInBoundary, length, knownLCWords): @@ -261,9 +269,9 @@ def wordShapeChris2Long (s, omitIfInBoundary, length, knownLCWords): sbSize = beginUpto + endUpto + len(seenSet) - if knownLCWords is not None: + if knownLCWords is not None: sbSize += 1 - + sb = "" sb += beginChars if omitIfInBoundary: @@ -273,7 +281,7 @@ def wordShapeChris2Long (s, omitIfInBoundary, length, knownLCWords): if beginChars[i] == ch: insert = False break - + for i in range (0, endUpto): if endChars[i] == ch: insert = False @@ -295,9 +303,11 @@ def wordShapeChris2Long (s, omitIfInBoundary, length, knownLCWords): # else: # return wordShapeDan2(s, knownLCWords) + def containsGreekLetter (s): return re.search(biogreek, s) + def wordShapeChris1 (s): length = len(s) if length == 0: @@ -307,13 +317,13 @@ def wordShapeChris1 (s): number = True seenDigit = False seenNonDigit = False - + for i in range(0, length): ch = s[i] digit = ch.isdigit() if digit: seenDigit = True - else: + else: seenNonDigit = True digit = digit or ch == '.' or ch == ',' or (i == 0 and (ch == '-' or ch == '+')) if not digit: @@ -337,7 +347,7 @@ def wordShapeChris1 (s): seenLower = False seenUpper = False allCaps = True - allLower = True + allLower = True initCap = False dash = False period = False @@ -363,50 +373,50 @@ def wordShapeChris1 (s): elif (let): seenLower = True allCaps = False - + if i == 0 and (up or tit): initCap = True if length == 2 and initCap and period: - return "ACRONYM1" + return "ACRONYM1" elif seenUpper and allCaps and not seenDigit and period: - return "ACRONYM" + return "ACRONYM" elif seenDigit and dash and not seenUpper and not seenLower: - return "DIGIT-DASH" + return "DIGIT-DASH" elif initCap and seenLower and seenDigit and dash: - return "CAPITALIZED-DIGIT-DASH" + return "CAPITALIZED-DIGIT-DASH" elif initCap and seenLower and seenDigit: - return "CAPITALIZED-DIGIT" + return "CAPITALIZED-DIGIT" elif initCap and seenLower & dash: - return "CAPITALIZED-DASH" + return "CAPITALIZED-DASH" elif initCap and seenLower: - return "CAPITALIZED" + return "CAPITALIZED" elif seenUpper and allCaps and seenDigit and dash: - return "ALLCAPS-DIGIT-DASH" + return "ALLCAPS-DIGIT-DASH" elif seenUpper and allCaps and seenDigit: - return "ALLCAPS-DIGIT" + return "ALLCAPS-DIGIT" elif seenUpper and allCaps and dash: - return "ALLCAPS" + return "ALLCAPS" elif seenUpper and allCaps: - return "ALLCAPS" + return "ALLCAPS" elif seenLower and allLower and seenDigit and dash: - return "LOWERif wordShaper ==-DIGIT-DASH" + return "LOWERif wordShaper ==-DIGIT-DASH" elif seenLower and allLower and seenDigit: - return "LOWERif wordShaper ==-DIGIT" + return "LOWERif wordShaper ==-DIGIT" elif seenLower and allLower and dash: - return "LOWERif wordShaper ==-DASH" + return "LOWERif wordShaper ==-DASH" elif seenLower and allLower: - return "LOWERif wordShaper ==" + return "LOWERif wordShaper ==" elif seenLower and seenDigit: - return "MIXEDif wordShaper ==-DIGIT" + return "MIXEDif wordShaper ==-DIGIT" elif seenLower: - return "MIXEDif wordShaper ==" + return "MIXEDif wordShaper ==" elif seenDigit: - return "SYMBOL-DIGIT" + return "SYMBOL-DIGIT" else: - return "SYMBOL" - + return "SYMBOL" + # gets Chris1, Dan1, Jenny1, Chris2 and Dan2 word shapes def getWordShapes(word): - return [wordShapeChris1(word), wordShapeDan1(word), wordShapeJenny1(word), wordShapeChris2(word, False, None), wordShapeDan2(word, None)] \ No newline at end of file + return [wordShapeChris1(word), wordShapeDan1(word), wordShapeJenny1(word), wordShapeChris2(word, False, None), wordShapeDan2(word, None)] From 852e3543a9eb8d6174453a5b45323eadbad38b0d Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 7 Jan 2014 20:38:25 -0500 Subject: [PATCH 009/393] Fixed the tab/indentation issue. tested for 'i2b2' and 'plain' formats --- code/note.py | 116 +++++++++++++++++++++++++-------------------------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/code/note.py b/code/note.py index 75c773f..2c95fb2 100644 --- a/code/note.py +++ b/code/note.py @@ -7,8 +7,8 @@ class Note: # Constructor def __init__(self): - # data - A list of lines directly from the file - # concepts - A one-to-one correspondence of each word's concept + # data - A list of lines directly from the file + # concepts - A one-to-one correspondence of each word's concept self.data = [] self.concepts = [] @@ -21,35 +21,35 @@ def __init__(self): def read_i2b2(self, txt, con=None): # Read in the medical text - with open(txt) as f: - for line in f: - # Add sentence to the data list + with open(txt) as f: + for line in f: + # Add sentence to the data list self.data.append(line) - # For each word, store a corresponding concept label - tmp = [] - for word in line.split(): - tmp.append('none') - self.concepts.append(tmp) + # For each word, store a corresponding concept label + tmp = [] + for word in line.split(): + tmp.append('none') + self.concepts.append(tmp) # If an accompanying concept file was specified, read it - if con: - with open(con) as f: - for line in f: - c, t = line.split('||') - t = t[3:-2] - c = c.split() - start = c[-2].split(':') - end = c[-1].split(':') - assert "concept spans one line", start[0] == end[0] - l = int(start[0]) - 1 - start = int(start[1]) - end = int(end[1]) + if con: + with open(con) as f: + for line in f: + c, t = line.split('||') + t = t[3:-2] + c = c.split() + start = c[-2].split(':') + end = c[-1].split(':') + assert "concept spans one line", start[0] == end[0] + l = int(start[0]) - 1 + start = int(start[1]) + end = int(end[1]) - for i in range(start, end + 1): - self.concepts[l][i] = t + for i in range( len(self.concepts[l]) ): + self.concepts[l][i] = t @@ -61,14 +61,14 @@ def read_i2b2(self, txt, con=None): # Write the concept predictions to a given file in i2b2 format def write_i2b2(self, con, labels): - with open(con, 'w') as f: - for i, tmp in enumerate(zip(self.txtlist(), labels)): - datum, label = tmp - for j, tmp in enumerate(zip(datum, label)): - datum, label = tmp - if label != 'none': - idx = "%d:%d" % (i + 1, j) - print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx, idx, label) + with open(con, 'w') as f: + for i, tmp in enumerate(zip(self.txtlist(), labels)): + datum, label = tmp + for j, tmp in enumerate(zip(datum, label)): + datum, label = tmp + if label != 'none': + idx = "%d:%d" % (i + 1, j) + print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx, idx, label) @@ -79,42 +79,42 @@ def write_i2b2(self, con, labels): def read_plain(self, txt, con=None): # Read in the medical text - with open(txt) as f: + with open(txt) as f: for line in f: - # Add sentence to the data list - self.data.append(line) + # Add sentence to the data list + self.data.append(line) - # For each word, store a corresponding concept label - tmp = [] - for word in line.split(): - tmp.append('none') - self.concepts.append(tmp) + # For each word, store a corresponding concept label + tmp = [] + for word in line.split(): + tmp.append('none') + self.concepts.append(tmp) # If an accompanying concept file was specified, read it - if con: - with open(con) as f: - for line in f: - c, t = line.split('||') - t = t[3:-2] - c = c.split() + if con: + with open(con) as f: + for line in f: + c, t = line.split('||') + t = t[3:-2] + c = c.split() - start = c[-2].split(':') - end = c[-1].split(':') + start = c[-2].split(':') + end = c[-1].split(':') - assert "concept spans one line", start[0] == end[0] - l = int(start[0]) - 1 - start = int(start[1]) - end = int(end[1]) + assert "concept spans one line", start[0] == end[0] + l = int(start[0]) - 1 + start = int(start[1]) + end = int(end[1]) # Tokenize the input intervals stok = len(self.data[l][:start].split()) etok = len(self.data[l][start:end+1].split()) + stok - 1 # Update the corresponding concept labels - for i in range(stok, etok + 1): - self.concepts[l][i] = t + for i in range(stok, etok + 1): + self.concepts[l][i] = t @@ -179,7 +179,7 @@ def read_xml(self, txt): tmp = [] for i, group in enumerate(line.split('<')): # All odd groups have label info (because of line split) - # ex. 'treatment>discharge medications' + # ex. 'treatment>discharge medications' if (i%2): # Get concept label match = re.search('(\w+)>(.+)', group) @@ -196,7 +196,7 @@ def read_xml(self, txt): # If even group , then process with 'none' labels else: for word in group.split(): - # / closes the xml tag of a previous label (skip) + # / closes the xml tag of a previous label (skip) if word[0] == '/': continue else: @@ -236,10 +236,10 @@ def txtlist( self ): # # @return a list of lists of the concepts associated with each word from data def conlist( self ): - return self.concepts + return self.concepts # For iterating def __iter__(self): - return iter(self.data) + return iter(self.data) From 71920ddfa00bd0ecf5a9427416c81f8100fbd117 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Wed, 8 Jan 2014 01:05:59 -0500 Subject: [PATCH 010/393] Fixed tab/indentation issues for files. --- code/evaluate.py | 182 ++++++------- code/model.py | 692 +++++++++++++++++++++++------------------------ code/note.py | 2 +- code/predict.py | 85 +++--- code/train.py | 93 +++---- 5 files changed, 527 insertions(+), 527 deletions(-) diff --git a/code/evaluate.py b/code/evaluate.py index 3a1cd3c..4c3e3a6 100644 --- a/code/evaluate.py +++ b/code/evaluate.py @@ -13,38 +13,38 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("-t", - help = "Test files that were used to generate predictions", - dest = "txt", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') + help = "Test files that were used to generate predictions", + dest = "txt", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') ) parser.add_argument("-c", - help = "The directory that contains predicted concept files organized into subdirectories for svm, lin, srf", - dest = "con", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions/') + help = "The directory that contains predicted concept files organized into subdirectories for svm, lin, srf", + dest = "con", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions/') ) parser.add_argument("-r", - help = "The directory that contains reference gold standard concept files", - dest = "ref", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/reference_standard_for_test_data/concepts/') + help = "The directory that contains reference gold standard concept files", + dest = "ref", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/reference_standard_for_test_data/concepts/') ) - + parser.add_argument("-o", - help = "Write the evaluation to a file rather than STDOUT", - dest = "output", - default = None + help = "Write the evaluation to a file rather than STDOUT", + dest = "output", + default = None ) # Parse command line arguments args = parser.parse_args() - + # Is output destination specified if args.output: - args.output = open(args.output, "w") + args.output = open(args.output, "w") else: - args.output = sys.stdout + args.output = sys.stdout txt_files = glob.glob(args.txt) @@ -57,31 +57,31 @@ def main(): con_directories = os.listdir(args.con) for con_directory in con_directories: - files = [] - directory_name = os.path.basename(con_directory) + files = [] + directory_name = os.path.basename(con_directory) - if directory_name not in ["svm", "crf", "lin"]: - continue + if directory_name not in ["svm", "crf", "lin"]: + continue - con_files = os.listdir(os.path.join(args.con, con_directory)) - con_files = map(lambda f: os.path.join(args.con, con_directory, f), con_files) - - con_files_map = helper.map_files(con_files) + con_files = os.listdir(os.path.join(args.con, con_directory)) + con_files = map(lambda f: os.path.join(args.con, con_directory, f), con_files) + + con_files_map = helper.map_files(con_files) - for k in txt_files_map: - if k in con_files_map and k in ref_files_map: - files.append((txt_files_map[k], con_files_map[k], ref_files_map[k])) + for k in txt_files_map: + if k in con_files_map and k in ref_files_map: + files.append((txt_files_map[k], con_files_map[k], ref_files_map[k])) - # Compute the confusion matrix - labels = Model.labels # hash tabble: label -> index - confusion = [[0] * len(labels) for e in labels] + # Compute the confusion matrix + labels = Model.labels # hash tabble: label -> index + confusion = [[0] * len(labels) for e in labels] # txt <- medical text # con <- model predictions # ref <- actual labels - for txt, con, ref in files: + for txt, con, ref in files: # A note that represents the model's predictions cnote = Note() @@ -94,65 +94,65 @@ def main(): #rnote.read_plain(txt, ref ) # in case in plain format # Get corresponding concept labels (prediction vs. actual) - for c, r in zip( cnote.conlist(), rnote.conlist() ): - for c, r in zip(c, r): - confusion[labels[r]][labels[c]] += 1 - - - - # Display the confusion matrix - print >>args.output, "" - print >>args.output, "" - print >>args.output, "" - print >>args.output, "================" - print >>args.output, directory_name.upper() + " RESULTS" - print >>args.output, "================" - print >>args.output, "" - print >>args.output, "Confusion Matrix" - pad = max(len(l) for l in labels) + 6 - print >>args.output, "%s %s" % (' ' * pad, "\t".join(Model.labels.keys())) - for act, act_v in labels.items(): - print >>args.output, "%s %s" % (act.rjust(pad), "\t".join([str(confusion[act_v][pre_v]) for pre, pre_v in labels.items()])) - print >>args.output, "" - - - - # Compute the analysis stuff - precision = [] - recall = [] - specificity = [] - f1 = [] - - tp = 0 - fp = 0 - fn = 0 - tn = 0 - - print >>args.output, "Analysis" - print >>args.output, " " * pad, "Precision\tRecall\tF1" - - - - for lab, lab_v in labels.items(): - tp = confusion[lab_v][lab_v] - fp = sum(confusion[v][lab_v] for k, v in labels.items() if v != lab_v) - fn = sum(confusion[lab_v][v] for k, v in labels.items() if v != lab_v) - tn = sum(confusion[v1][v2] for k1, v1 in labels.items() - for k2, v2 in labels.items() if v1 != lab_v and v2 != lab_v) - precision += [float(tp) / (tp + fp + 1e-100)] - recall += [float(tp) / (tp + fn + 1e-100)] - specificity += [float(tn) / (tn + fp + 1e-100)] - f1 += [float(2 * tp) / (2 * tp + fp + fn + 1e-100)] - print >>args.output, "%s %.4f\t%.4f\t%.4f\t%.4f" % (lab.rjust(pad), precision[-1], recall[-1], specificity[-1], f1[-1]) - - print >>args.output, "--------" - - precision = sum(precision) / len(precision) - recall = sum(recall) / len(recall) - specificity = sum(specificity) / len(specificity) - f1 = sum(f1) / len(f1) - - print >>args.output, "Average: %.4f\t%.4f\t%.4f\t%.4f" % (precision, recall, specificity, f1) - + for c, r in zip( cnote.conlist(), rnote.conlist() ): + for c, r in zip(c, r): + confusion[labels[r]][labels[c]] += 1 + + + + # Display the confusion matrix + print >>args.output, "" + print >>args.output, "" + print >>args.output, "" + print >>args.output, "================" + print >>args.output, directory_name.upper() + " RESULTS" + print >>args.output, "================" + print >>args.output, "" + print >>args.output, "Confusion Matrix" + pad = max(len(l) for l in labels) + 6 + print >>args.output, "%s %s" % (' ' * pad, "\t".join(Model.labels.keys())) + for act, act_v in labels.items(): + print >>args.output, "%s %s" % (act.rjust(pad), "\t".join([str(confusion[act_v][pre_v]) for pre, pre_v in labels.items()])) + print >>args.output, "" + + + + # Compute the analysis stuff + precision = [] + recall = [] + specificity = [] + f1 = [] + + tp = 0 + fp = 0 + fn = 0 + tn = 0 + + print >>args.output, "Analysis" + print >>args.output, " " * pad, "Precision\tRecall\tF1" + + + + for lab, lab_v in labels.items(): + tp = confusion[lab_v][lab_v] + fp = sum(confusion[v][lab_v] for k, v in labels.items() if v != lab_v) + fn = sum(confusion[lab_v][v] for k, v in labels.items() if v != lab_v) + tn = sum(confusion[v1][v2] for k1, v1 in labels.items() + for k2, v2 in labels.items() if v1 != lab_v and v2 != lab_v) + precision += [float(tp) / (tp + fp + 1e-100)] + recall += [float(tp) / (tp + fn + 1e-100)] + specificity += [float(tn) / (tn + fp + 1e-100)] + f1 += [float(2 * tp) / (2 * tp + fp + fn + 1e-100)] + print >>args.output, "%s %.4f\t%.4f\t%.4f\t%.4f" % (lab.rjust(pad), precision[-1], recall[-1], specificity[-1], f1[-1]) + + print >>args.output, "--------" + + precision = sum(precision) / len(precision) + recall = sum(recall) / len(recall) + specificity = sum(specificity) / len(specificity) + f1 = sum(f1) / len(f1) + + print >>args.output, "Average: %.4f\t%.4f\t%.4f\t%.4f" % (precision, recall, specificity, f1) + if __name__ == '__main__': main() diff --git a/code/model.py b/code/model.py index 002b162..c04f43b 100644 --- a/code/model.py +++ b/code/model.py @@ -19,358 +19,358 @@ import libml class Model: - sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) - word_features = ImmutableSet(["word", "length", "mitre", "stem_porter", "stem_lancaster", "word_shape"]) - # THESE ARE FEATURES I TRIED THAT DON'T LOOK THAT PROMISING - # I have some faith in "metric_unit" and "has_problem_form" - # "radial_loc" may be too rare and "def_class" could be over fitting - # "metric_unit", "radial_loc", "has_problem_form", "def_class" - - labels = { - "none":0, - "treatment":1, - "problem":2, - "test":3 - } - reverse_labels = {v:k for k, v in labels.items()} - - @staticmethod - def load(filename='awesome.model'): - with open(filename) as model: - model = pickle.load(model) - model.filename = filename - return model - - # Constructor - def __init__(self, filename='awesome.model', type=libml.ALL): - model_directory = os.path.dirname(filename) - - if model_directory != "": - helper.mkpath(model_directory) - - self.filename = os.path.realpath(filename) - self.type = type - self.vocab = {} - - self.enabled_features = Model.sentence_features | Model.word_features - + sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) + word_features = ImmutableSet(["word", "length", "mitre", "stem_porter", "stem_lancaster", "word_shape"]) + # THESE ARE FEATURES I TRIED THAT DON'T LOOK THAT PROMISING + # I have some faith in "metric_unit" and "has_problem_form" + # "radial_loc" may be too rare and "def_class" could be over fitting + # "metric_unit", "radial_loc", "has_problem_form", "def_class" + + labels = { + "none":0, + "treatment":1, + "problem":2, + "test":3 + } + reverse_labels = {v:k for k, v in labels.items()} + + @staticmethod + def load(filename='awesome.model'): + with open(filename) as model: + model = pickle.load(model) + model.filename = filename + return model + + # Constructor + def __init__(self, filename='awesome.model', type=libml.ALL): + model_directory = os.path.dirname(filename) + + if model_directory != "": + helper.mkpath(model_directory) + + self.filename = os.path.realpath(filename) + self.type = type + self.vocab = {} - # Model::train() - # - # @param note. A Note object that has data for training the model - def train(self, note): + self.enabled_features = Model.sentence_features | Model.word_features + + + # Model::train() + # + # @param note. A Note object that has data for training the model + def train(self, note): - # Get the data and annotations from the Note object + # Get the data and annotations from the Note object - # data - A list of list of the medical text's words - # labels - A list of list of concepts (1:1 with data) - data = note.txtlist() - labels = note.conlist() + # data - A list of list of the medical text's words + # labels - A list of list of concepts (1:1 with data) + data = note.txtlist() + labels = note.conlist() - # rows is a list of a list of hash tables - rows = [] - for sentence in data: - rows.append(self.features_for_sentence(sentence)) + # rows is a list of a list of hash tables + rows = [] + for sentence in data: + rows.append(self.features_for_sentence(sentence)) - # each list of hash tables - for row in rows: - # each hash table - for features in row: - # each key in hash table - for feature in features: - # I think new word encountered - if feature not in self.vocab: - self.vocab[feature] = len(self.vocab) + 1 - - # A list of a list encodings of concept labels (ex. 'none' => 0) - # [ [0, 0, 0], [0], [0, 0, 0], [0], [0, 0, 0, 0, 0, 2, 2, 0, 1] ] - label_lu = lambda l: Model.labels[l] - labels = [map(label_lu, x) for x in labels] - - - # list of a list of hash tables (all keys & values now numbers) - feat_lu = lambda f: {self.vocab[item]:f[item] for item in f} - rows = [map(feat_lu, x) for x in rows] - - - libml.write_features(self.filename, rows, labels, self.type) - - with open(self.filename, "w") as model: - pickle.dump(self, model) - - # Train the model - libml.train(self.filename, self.type) - - - - # Model::predict() - # - # @param note. A Note object that contains the training data - def predict(self, note): - - # data - A list of list of the medical text's words - data = note.txtlist() + # each list of hash tables + for row in rows: + # each hash table + for features in row: + # each key in hash table + for feature in features: + # I think new word encountered + if feature not in self.vocab: + self.vocab[feature] = len(self.vocab) + 1 + + # A list of a list encodings of concept labels (ex. 'none' => 0) + # [ [0, 0, 0], [0], [0, 0, 0], [0], [0, 0, 0, 0, 0, 2, 2, 0, 1] ] + label_lu = lambda l: Model.labels[l] + labels = [map(label_lu, x) for x in labels] + + + # list of a list of hash tables (all keys & values now numbers) + feat_lu = lambda f: {self.vocab[item]:f[item] for item in f} + rows = [map(feat_lu, x) for x in rows] + + + libml.write_features(self.filename, rows, labels, self.type) + + with open(self.filename, "w") as model: + pickle.dump(self, model) + + # Train the model + libml.train(self.filename, self.type) + + + + # Model::predict() + # + # @param note. A Note object that contains the training data + def predict(self, note): + + # data - A list of list of the medical text's words + data = note.txtlist() - # Something to do with calibrating the model - rows = [] # rows <- list of a list of hash tables (feature vectors) - for sentence in data: - rows.append(self.features_for_sentence(sentence)) - - - feat_lu = lambda f: {self.vocab[item]:f[item] for item in f if item in self.vocab} - rows = [map(feat_lu, x) for x in rows] - libml.write_features(self.filename, rows, None, self.type); - - # Use the trained model to make predictions - libml.predict(self.filename, self.type) - - - # A hash table - # the keys are 1,2,4 (SVM, LIN, and CRF) - # each value is a list of concept labels encodings - labels_list = libml.read_labels(self.filename, self.type) - - - # translate labels_list into a readable format - # ex. change all occurences of 0 -> 'none' - for t, labels in labels_list.items(): - tmp = [] - for sentence in data: - tmp.append([labels.pop(0) for i in range(len(sentence))]) - tmp[-1] = map(lambda l: l.strip(), tmp[-1]) - tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) - labels_list[t] = tmp - - - # The new labels_list is a translated version - return labels_list - - - - - # input: A sentence from a medical text file (list of words) - # output: A list of hash tables - def features_for_sentence(self, sentence): - features_list = [] - - for word in sentence: - features_list.append(self.features_for_word(word)) - - tags = None - for feature in Model.sentence_features: - if feature not in self.enabled_features: - continue - - if feature == "pos": - tags = tags or nltk.pos_tag(sentence) - for i, features in enumerate(features_list): - tag = tags[i][1] - features[(feature, tag)] = 1 - - if feature == "stem_wordnet": - tags = tags or nltk.pos_tag(sentence) - morphy_tags = { - 'NN':nltk.corpus.reader.wordnet.NOUN, - 'JJ':nltk.corpus.reader.wordnet.ADJ, - 'VB':nltk.corpus.reader.wordnet.VERB, - 'RB':nltk.corpus.reader.wordnet.ADV} - morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w,t in tags] - st = nltk.stem.WordNetLemmatizer() - for i, features in enumerate(features_list): - tag = morphy_tags[i] - features[(feature, st.lemmatize(*tag))] = 1 - - if feature == "test_result": - for index, features in enumerate(features_list): - right = " ".join([w for w in sentence[index:]]) - if self.is_test_result(right): - features[(feature, None)] = 1 - - - ngram_features = [{} for i in range(len(features_list))] - if "prev" in self.enabled_features: - prev = lambda f: {("prev_"+k[0], k[1]): v for k,v in f.items()} - prev_list = map(prev, features_list) - for i in range(len(features_list)): - if i == 0: - ngram_features[i][("prev", "*")] = 1 - else: - ngram_features[i].update(prev_list[i-1]) - - if "next" in self.enabled_features: - next = lambda f: {("next_"+k[0], k[1]): v for k,v in f.items()} - next_list = map(next, features_list) - for i in range(len(features_list)): - if i == len(features_list) - 1: - ngram_features[i][("next", "*")] = 1 - else: - ngram_features[i].update(next_list[i+1]) - - merged = lambda d1, d2: dict(d1.items() + d2.items()) - features_list = [merged(features_list[i], ngram_features[i]) - for i in range(len(features_list))] - - return features_list - - - - # input: a single word, like - # Admission - # output: A hash table of features - # features include: word, length, mitre, stem_porter - def features_for_word(self, word): - features = {'dummy':1} # always have >0 dimensions - - # word_shape, word, length, mitre, stem_porter, stem_lancaster - for feature in Model.word_features: - - # word_shape, test_result, word, pos, next, length, stem_wordnet, mitre, stem_porter, prev, stem_lancaster - if feature not in self.enabled_features: - continue - - if feature == "word": - features[(feature, word)] = 1 - - if feature == "length": - features[(feature, None)] = len(word) - - if feature == "mitre": - for f in Model.mitre_features: - if re.search(Model.mitre_features[f], word): - features[(feature, f)] = 1 - - if feature == "stem_porter": - st = nltk.stem.PorterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_lancaster": - st = nltk.stem.LancasterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_snowball": - st = nltk.stem.SnowballStemmer("english") - #features[(feature, st.stem(word))] = 1 + # Something to do with calibrating the model + rows = [] # rows <- list of a list of hash tables (feature vectors) + for sentence in data: + rows.append(self.features_for_sentence(sentence)) + + + feat_lu = lambda f: {self.vocab[item]:f[item] for item in f if item in self.vocab} + rows = [map(feat_lu, x) for x in rows] + libml.write_features(self.filename, rows, None, self.type); + + # Use the trained model to make predictions + libml.predict(self.filename, self.type) + + + # A hash table + # the keys are 1,2,4 (SVM, LIN, and CRF) + # each value is a list of concept labels encodings + labels_list = libml.read_labels(self.filename, self.type) + + + # translate labels_list into a readable format + # ex. change all occurences of 0 -> 'none' + for t, labels in labels_list.items(): + tmp = [] + for sentence in data: + tmp.append([labels.pop(0) for i in range(len(sentence))]) + tmp[-1] = map(lambda l: l.strip(), tmp[-1]) + tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) + labels_list[t] = tmp + + + # The new labels_list is a translated version + return labels_list + + + + + # input: A sentence from a medical text file (list of words) + # output: A list of hash tables + def features_for_sentence(self, sentence): + features_list = [] + + for word in sentence: + features_list.append(self.features_for_word(word)) + + tags = None + for feature in Model.sentence_features: + if feature not in self.enabled_features: + continue + + if feature == "pos": + tags = tags or nltk.pos_tag(sentence) + for i, features in enumerate(features_list): + tag = tags[i][1] + features[(feature, tag)] = 1 + + if feature == "stem_wordnet": + tags = tags or nltk.pos_tag(sentence) + morphy_tags = { + 'NN':nltk.corpus.reader.wordnet.NOUN, + 'JJ':nltk.corpus.reader.wordnet.ADJ, + 'VB':nltk.corpus.reader.wordnet.VERB, + 'RB':nltk.corpus.reader.wordnet.ADV} + morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w,t in tags] + st = nltk.stem.WordNetLemmatizer() + for i, features in enumerate(features_list): + tag = morphy_tags[i] + features[(feature, st.lemmatize(*tag))] = 1 + + if feature == "test_result": + for index, features in enumerate(features_list): + right = " ".join([w for w in sentence[index:]]) + if self.is_test_result(right): + features[(feature, None)] = 1 + + + ngram_features = [{} for i in range(len(features_list))] + if "prev" in self.enabled_features: + prev = lambda f: {("prev_"+k[0], k[1]): v for k,v in f.items()} + prev_list = map(prev, features_list) + for i in range(len(features_list)): + if i == 0: + ngram_features[i][("prev", "*")] = 1 + else: + ngram_features[i].update(prev_list[i-1]) - if feature == "word_shape": - wordShapes = getWordShapes(word) - for i, shape in enumerate(wordShapes): - features[(feature + str(i), shape)] = 1 - - if feature == "metric_unit": - unit = 0 - if self.is_weight(word): - unit = 1 - elif self.is_size(word): - unit = 2 - features[(feature, None)] = unit - - # look for prognosis locaiton - #if feature == "radial_loc": - # THIS MIGHT BE BUGGED - # if self.is_prognosis_location(word): - # features[(feature, None)] = 1 - - if feature == "has_problem_form": - if self.has_problem_form(word): - features[(feature, None)] = 1 - - if feature == "def_class": - features[(feature, None)] = self.get_def_class(word) - - return features - - mitre_features = { - "INITCAP" : r"^[A-Z].*$", - "ALLCAPS" : r"^[A-Z]+$", - "CAPSMIX" : r"^[A-Za-z]+$", - "HASDIGIT" : r"^.*[0-9].*$", - "SINGLEDIGIT" : r"^[0-9]$", - "DOUBLEDIGIT" : r"^[0-9][0-9]$", - "FOURDIGITS" : r"^[0-9][0-9][0-9][0-9]$", - "NATURALNUM" : r"^[0-9]+$", - "REALNUM" : r"^[0-9]+.[0-9]+$", - "ALPHANUM" : r"^[0-9A-Za-z]+$", - "HASDASH" : r"^.*-.*$", - "PUNCTUATION" : r"^[^A-Za-z0-9]+$", - "PHONE1" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "PHONE2" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "FIVEDIGIT" : r"^[0-9][0-9][0-9][0-9][0-9]", - "NOVOWELS" : r"^[^AaEeIiOoUu]+$", - "HASDASHNUMALPHA" : r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", - "DATESEPERATOR" : r"^[-/]$", - } - - def is_test_result (self, context): - # note: make spaces optional? - regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" - if not re.search(regex, context): - return re.search(r"^[A-Za-z]+ was (positive|negative)", context) - return True - - def is_weight (self, word): - regex = r"^[0-9]*(mg|g|milligrams|grams)$" - return re.search(regex, word) - - def is_size (self, word): - regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" - return re.search(regex, word) - - def is_prognosis_location (self, word): - regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" - return re.search(regex, word) - - def has_problem_form (self, word): - regex = r".*(ic|is)$" - return re.search(regex, word) - - # checks for a definitive classification at the word level - def get_def_class (self, word): - test_terms = { - "eval", "evaluation", "evaluations", - "sat", "sats", "saturation", - "exam", "exams", - "rate", "rates", - "test", "tests", - "xray", "xrays", - "screen", "screens", - "level", "levels", - "tox" - } - problem_terms = { - "swelling", - "wound", "wounds", - "symptom", "symptoms", - "shifts", "failure", - "insufficiency", "insufficiencies", - "mass", "masses", - "aneurysm", "aneurysms", - "ulcer", "ulcers", - "trama", "cancer", - "disease", "diseased", - "bacterial", "viral", - "syndrome", "syndromes", - "pain", "pains" - "burns", "burned", - "broken", "fractured" - } - treatment_terms = { - "therapy", - "replacement", - "anesthesia", - "supplement", "supplemental", - "vaccine", "vaccines" - "dose", "doses", - "shot", "shots", - "medication", "medicine", - "treament", "treatments" - } - if word.lower() in test_terms: - return 1 - elif word.lower() in problem_terms: - return 2 - elif word.lower() in treatment_terms: - return 3 - return 0 - + if "next" in self.enabled_features: + next = lambda f: {("next_"+k[0], k[1]): v for k,v in f.items()} + next_list = map(next, features_list) + for i in range(len(features_list)): + if i == len(features_list) - 1: + ngram_features[i][("next", "*")] = 1 + else: + ngram_features[i].update(next_list[i+1]) + + merged = lambda d1, d2: dict(d1.items() + d2.items()) + features_list = [merged(features_list[i], ngram_features[i]) + for i in range(len(features_list))] + + return features_list + + + + # input: a single word, like + # Admission + # output: A hash table of features + # features include: word, length, mitre, stem_porter + def features_for_word(self, word): + features = {'dummy':1} # always have >0 dimensions + + # word_shape, word, length, mitre, stem_porter, stem_lancaster + for feature in Model.word_features: + + # word_shape, test_result, word, pos, next, length, stem_wordnet, mitre, stem_porter, prev, stem_lancaster + if feature not in self.enabled_features: + continue + + if feature == "word": + features[(feature, word)] = 1 + + if feature == "length": + features[(feature, None)] = len(word) + + if feature == "mitre": + for f in Model.mitre_features: + if re.search(Model.mitre_features[f], word): + features[(feature, f)] = 1 + + if feature == "stem_porter": + st = nltk.stem.PorterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_lancaster": + st = nltk.stem.LancasterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_snowball": + st = nltk.stem.SnowballStemmer("english") + #features[(feature, st.stem(word))] = 1 + + if feature == "word_shape": + wordShapes = getWordShapes(word) + for i, shape in enumerate(wordShapes): + features[(feature + str(i), shape)] = 1 + + if feature == "metric_unit": + unit = 0 + if self.is_weight(word): + unit = 1 + elif self.is_size(word): + unit = 2 + features[(feature, None)] = unit + + # look for prognosis locaiton + #if feature == "radial_loc": + # THIS MIGHT BE BUGGED + # if self.is_prognosis_location(word): + # features[(feature, None)] = 1 + + if feature == "has_problem_form": + if self.has_problem_form(word): + features[(feature, None)] = 1 + + if feature == "def_class": + features[(feature, None)] = self.get_def_class(word) + + return features + + mitre_features = { + "INITCAP" : r"^[A-Z].*$", + "ALLCAPS" : r"^[A-Z]+$", + "CAPSMIX" : r"^[A-Za-z]+$", + "HASDIGIT" : r"^.*[0-9].*$", + "SINGLEDIGIT" : r"^[0-9]$", + "DOUBLEDIGIT" : r"^[0-9][0-9]$", + "FOURDIGITS" : r"^[0-9][0-9][0-9][0-9]$", + "NATURALNUM" : r"^[0-9]+$", + "REALNUM" : r"^[0-9]+.[0-9]+$", + "ALPHANUM" : r"^[0-9A-Za-z]+$", + "HASDASH" : r"^.*-.*$", + "PUNCTUATION" : r"^[^A-Za-z0-9]+$", + "PHONE1" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "PHONE2" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "FIVEDIGIT" : r"^[0-9][0-9][0-9][0-9][0-9]", + "NOVOWELS" : r"^[^AaEeIiOoUu]+$", + "HASDASHNUMALPHA" : r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", + "DATESEPERATOR" : r"^[-/]$", + } + + def is_test_result (self, context): + # note: make spaces optional? + regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" + if not re.search(regex, context): + return re.search(r"^[A-Za-z]+ was (positive|negative)", context) + return True + + def is_weight (self, word): + regex = r"^[0-9]*(mg|g|milligrams|grams)$" + return re.search(regex, word) + + def is_size (self, word): + regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" + return re.search(regex, word) + + def is_prognosis_location (self, word): + regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" + return re.search(regex, word) + + def has_problem_form (self, word): + regex = r".*(ic|is)$" + return re.search(regex, word) + + # checks for a definitive classification at the word level + def get_def_class (self, word): + test_terms = { + "eval", "evaluation", "evaluations", + "sat", "sats", "saturation", + "exam", "exams", + "rate", "rates", + "test", "tests", + "xray", "xrays", + "screen", "screens", + "level", "levels", + "tox" + } + problem_terms = { + "swelling", + "wound", "wounds", + "symptom", "symptoms", + "shifts", "failure", + "insufficiency", "insufficiencies", + "mass", "masses", + "aneurysm", "aneurysms", + "ulcer", "ulcers", + "trama", "cancer", + "disease", "diseased", + "bacterial", "viral", + "syndrome", "syndromes", + "pain", "pains" + "burns", "burned", + "broken", "fractured" + } + treatment_terms = { + "therapy", + "replacement", + "anesthesia", + "supplement", "supplemental", + "vaccine", "vaccines" + "dose", "doses", + "shot", "shots", + "medication", "medicine", + "treament", "treatments" + } + if word.lower() in test_terms: + return 1 + elif word.lower() in problem_terms: + return 2 + elif word.lower() in treatment_terms: + return 3 + return 0 + diff --git a/code/note.py b/code/note.py index 2c95fb2..0d404b0 100644 --- a/code/note.py +++ b/code/note.py @@ -48,7 +48,7 @@ def read_i2b2(self, txt, con=None): start = int(start[1]) end = int(end[1]) - for i in range( len(self.concepts[l]) ): + for i in range( start, end+1 ): self.concepts[l][i] = t diff --git a/code/predict.py b/code/predict.py index 83f92bb..89f9b3a 100644 --- a/code/predict.py +++ b/code/predict.py @@ -12,41 +12,41 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", - dest = "input", - help = "The input files to predict", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') + dest = "input", + help = "The input files to predict", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') ) parser.add_argument("-o", - dest = "output", - help = "The directory to write the output", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') + dest = "output", + help = "The directory to write the output", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') ) parser.add_argument("-m", - dest = "model", - help = "The model to use for prediction", - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') + dest = "model", + help = "The model to use for prediction", + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') ) - + parser.add_argument("--no-svm", - dest = "no_svm", - action = "store_true", - help = "Disable SVM model generation", + dest = "no_svm", + action = "store_true", + help = "Disable SVM model generation", ) parser.add_argument("--no-lin", - dest = "no_lin", - action = "store_true", - help = "Disable LIN model generation", + dest = "no_lin", + action = "store_true", + help = "Disable LIN model generation", ) parser.add_argument("--no-crf", - dest = "no_crf", - action = "store_true", - help = "Disable CRF model generation", + dest = "no_crf", + action = "store_true", + help = "Disable CRF model generation", ) - + args = parser.parse_args() # Locate the test files @@ -59,45 +59,44 @@ def main(): # Determine what type of models to use (ex SVM vs. CRF) model = Model.load(args.model) if args.no_svm: - model.type &= ~libml.SVM + model.type &= ~libml.SVM if args.no_lin: - model.type &= ~libml.LIN + model.type &= ~libml.LIN if args.no_crf: - model.type &= ~libml.CRF - + model.type &= ~libml.CRF + for txt in files: - # Read the data into a Note object + # Read the data into a Note object note = Note() note.read_i2b2(txt) - #note.read_plain(txt) # TEMP - in case of plain format # Use the model to predict the concept labels - labels = model.predict(note) + labels = model.predict(note) # labels (above) is a hash table # the keys are 1,2,4 (SVM, LIN, and CRF) # each value is a list of concept labels, like from the Note class - con = os.path.split(txt)[-1] - con = con[:-3] + 'con' - - for t in libml.bits(model.type): - if t == libml.SVM: - helper.mkpath(os.path.join(args.output, "svm")) - con_path = os.path.join(path, "svm", con) - if t == libml.LIN: - helper.mkpath(os.path.join(args.output, "lin")) - con_path = os.path.join(path, "lin", con) - if t == libml.CRF: - helper.mkpath(os.path.join(args.output, "crf")) - con_path = os.path.join(path, "crf", con) - + con = os.path.split(txt)[-1] + con = con[:-3] + 'con' + + for t in libml.bits(model.type): + if t == libml.SVM: + helper.mkpath(os.path.join(args.output, "svm")) + con_path = os.path.join(path, "svm", con) + if t == libml.LIN: + helper.mkpath(os.path.join(args.output, "lin")) + con_path = os.path.join(path, "lin", con) + if t == libml.CRF: + helper.mkpath(os.path.join(args.output, "crf")) + con_path = os.path.join(path, "crf", con) + # Output the concept predictions - note.write_i2b2(con_path, labels[t]) - #note.write_plain(con_path, labels[t]) # in case of plain format + note.write_i2b2(con_path, labels[t]) + #note.write_plain(con_path, labels[t]) # in case of plain format if __name__ == '__main__': diff --git a/code/train.py b/code/train.py index aee4d07..42697b7 100644 --- a/code/train.py +++ b/code/train.py @@ -14,56 +14,56 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("-t", - dest = "txt", - help = "The files that contain the training examples", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/txt/record-33.txt') + dest = "txt", + help = "The files that contain the training examples", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/txt/record-33.txt') ) - + parser.add_argument("-c", - dest = "con", - help = "The files that contain the labels for the training examples", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/concept/record-33.txt') + dest = "con", + help = "The files that contain the labels for the training examples", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/concept/record-33.txt') ) - + parser.add_argument("-m", - dest = "model", - help = "Path to the model that should be generated", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') + dest = "model", + help = "Path to the model that should be generated", + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') ) parser.add_argument("-d", - dest = "disabled_features", - help = "The features that should not be used", - nargs = "+", - default = None + dest = "disabled_features", + help = "The features that should not be used", + nargs = "+", + default = None ) parser.add_argument("-e", - dest = "enabled_features", - help = "The features that should be used. This option trumps -d", - nargs = "+", - default = None + dest = "enabled_features", + help = "The features that should be used. This option trumps -d", + nargs = "+", + default = None ) parser.add_argument("--no-svm", - dest = "no_svm", - action = "store_true", - help = "Disable SVM model generation", + dest = "no_svm", + action = "store_true", + help = "Disable SVM model generation", ) parser.add_argument("--no-lin", - dest = "no_lin", - action = "store_true", - help = "Disable LIN model generation", + dest = "no_lin", + action = "store_true", + help = "Disable LIN model generation", ) parser.add_argument("--no-crf", - dest = "no_crf", - action = "store_true", - help = "Disable CRF model generation", + dest = "no_crf", + action = "store_true", + help = "Disable CRF model generation", ) @@ -86,45 +86,46 @@ def main(): # ex. training_list = [ ('record-13.txt', 'record-13.con') ] training_list = [] for k in txt_files_map: - if k in con_files_map: - training_list.append((txt_files_map[k], con_files_map[k])) + if k in con_files_map: + training_list.append((txt_files_map[k], con_files_map[k])) - # TEMP - useful for when I was reading in XML files - #training_list.append(txt_files_map[k]) + # TEMP - useful for when I was reading in XML files + #training_list.append(txt_files_map[k]) # What kind of model should be used? (ex. SVM vs. CRF) type = 0 if not args.no_svm: - type = type | libml.SVM + type = type | libml.SVM if not args.no_lin: - type = type | libml.LIN + type = type | libml.LIN if not args.no_crf: - type = type | libml.CRF - + type = type | libml.CRF + # Read the data into a Note object notes = [] for txt, con in training_list: #for txt in training_list: - # Alternative data formats + note_tmp = Note() # Create Note + + # --- Alternative data formats --- #note_tmp.read_plain(txt, con) # plain #note_tmp.read_xml(txt) # xml + note_tmp.read_i2b2(txt, con) # i2b2 (normal) - note_tmp = Note() # Create Note - note_tmp.read_i2b2(txt, con) # Read data into Note - notes.append( note_tmp ) # Add the Note to the list + notes.append( note_tmp ) # Add the Note to the list # Create a Machine Learning model model = Model(filename = args.model, type = type) - + if args.disabled_features != None: - model.enabled_features = model.enabled_features - Set(args.disabled_features) + model.enabled_features = model.enabled_features - Set(args.disabled_features) if args.enabled_features != None: - model.enabled_features = Set(args.enabled_features) + model.enabled_features = Set(args.enabled_features) # Train the model using the Note's data From 4360936efc558a34049460cbabcbf0faba1715c1 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Thu, 16 Jan 2014 20:11:19 -0500 Subject: [PATCH 011/393] rephrased comment --- code/predict.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/code/predict.py b/code/predict.py index 89f9b3a..66767ec 100644 --- a/code/predict.py +++ b/code/predict.py @@ -73,12 +73,11 @@ def main(): note.read_i2b2(txt) # Use the model to predict the concept labels + # Returns a hash table with: + # keys as 1,2,4 (SVM, LIN, CRF) + # values as list of list of concept tokens (one-to-one with dat_list) labels = model.predict(note) - # labels (above) is a hash table - # the keys are 1,2,4 (SVM, LIN, and CRF) - # each value is a list of concept labels, like from the Note class - con = os.path.split(txt)[-1] con = con[:-3] + 'con' From a25d8d4e34d07a40f8e2bfe85a590b05ce04a4ef Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Thu, 16 Jan 2014 20:11:59 -0500 Subject: [PATCH 012/393] Added greedy adjacent boundary detection --- code/model.py | 170 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 156 insertions(+), 14 deletions(-) diff --git a/code/model.py b/code/model.py index c04f43b..4913cb7 100644 --- a/code/model.py +++ b/code/model.py @@ -18,6 +18,17 @@ import libml + +#count = 0 +#count2 = 0 +#def my_sort(pair1, pair2): +# a = pair1[0][0] +# b = pair2[0][0] +# if a == b: return 0 +# if a < b: return -1 +# return 1 + + class Model: sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) word_features = ImmutableSet(["word", "length", "mitre", "stem_porter", "stem_lancaster", "word_shape"]) @@ -73,16 +84,56 @@ def train(self, note): for sentence in data: rows.append(self.features_for_sentence(sentence)) - # each list of hash tables + #print "\n" + "-" * 80 + "\n" + #print "len(rows)" + #print len(rows) + + #print "\n" + "-" * 80 + "\n" + #print "rows[0]" + #for elem in rows[0]: + # for it in sorted(elem.items(),my_sort) : print it + # print "\n" + #print "\n" + "-" * 80 + "\n" + + #print "\n" + "-" * 80 + "\n" + #print "len(rows[0])" + #print len(rows[0]) + #print "\n" + "-" * 80 + "\n" + + #print "rows[0][0]" + #print rows[0][0] + #print "\n" + "-" * 80 + "\n" + + #print "self.vocab" + #print self.vocab + #print "\n" + "-" * 80 + "\n" + #print "\n" * 4 + + + # each list of hash tables (one list per line in file) for row in rows: - # each hash table + # each hash table (one hash table per word in the line) for features in row: - # each key in hash table + # each key (tuple) pair in hash table (one key per feature) for feature in features: - # I think new word encountered + # assigning a unique number to each feature + # ex. Here are three key,value pairs that go into self.vocab + # (('word_shape4', 'WT-Xx'), 2) + # (('next_pos', 'NNP'), 3) + # (('next_word', 'Date'), 4) if feature not in self.vocab: self.vocab[feature] = len(self.vocab) + 1 + #def tmp_sort(pair1, pair2): + # return pair1[1] - pair2[1] + + #print "self.vocab" + #print self.vocab + #print "\n" + "-" * 80 + "\n" + #for elem in sorted( self.vocab.items(), tmp_sort ) : print elem + #print "\n" + "-" * 80 + "\n" + + # A list of a list encodings of concept labels (ex. 'none' => 0) # [ [0, 0, 0], [0], [0, 0, 0], [0], [0, 0, 0, 0, 0, 2, 2, 0, 1] ] label_lu = lambda l: Model.labels[l] @@ -93,7 +144,6 @@ def train(self, note): feat_lu = lambda f: {self.vocab[item]:f[item] for item in f} rows = [map(feat_lu, x) for x in rows] - libml.write_features(self.filename, rows, labels, self.type) with open(self.filename, "w") as model: @@ -106,15 +156,17 @@ def train(self, note): # Model::predict() # - # @param note. A Note object that contains the training data + # @param note. A Note object that contains the data def predict(self, note): # data - A list of list of the medical text's words data = note.txtlist() - - # Something to do with calibrating the model - rows = [] # rows <- list of a list of hash tables (feature vectors) + # rows <- list of a list of hash tables + # each list of hash tables (one list per line in file) + # each hash table (one hash table per word in the line) + # each key (tuple) pair in hash table (one key per feature) + rows = [] for sentence in data: rows.append(self.features_for_sentence(sentence)) @@ -128,11 +180,16 @@ def predict(self, note): # A hash table - # the keys are 1,2,4 (SVM, LIN, and CRF) + # the keys are 1,2,4 (LIN, CRF, and SVM) # each value is a list of concept labels encodings labels_list = libml.read_labels(self.filename, self.type) + print "labels_list" + print labels_list + print "\n" + "-" * 80 + + # translate labels_list into a readable format # ex. change all occurences of 0 -> 'none' for t, labels in labels_list.items(): @@ -143,31 +200,92 @@ def predict(self, note): tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) labels_list[t] = tmp + print "labels_list" + print labels_list + print "\n" + "-" * 80 - # The new labels_list is a translated version - return labels_list + # Group classified tokens based on adjacency + nontrivial_concepts = ImmutableSet( ['treatment', 'problem', 'test'] ) + tmp_hash = {} + for t,labels in labels_list.items(): + + tmp = [] # Stores a list of classifications + # A classification is a 4-tuple: + # (concept, lineno, starttok, endtok) + start_ind = 0 + end_ind = 0 + streak = 0 # To keep count of the streak of classified tokens + for i, concept_line in enumerate(labels): + + # C-style array indexing. Probably could be done a better way + # used because I needed the ability of lookahead + for j in range(len(concept_line)): + + # Non-trivial classification + if concept_line[j] in nontrivial_concepts: + + # Increase size of current streak + streak += 1 + + # lookahead (check if streak will continue) + if (j+1 == len(concept_line))or \ + (concept_line[j] != concept_line[j+1]): + # Add streak + tmp.append((concept_line[j], i+1, j-streak+1, j)) + # Reset count + streak = 0 + + tmp_hash[t] = tmp + print tmp_hash + print "\n" + "-" * 80 - # input: A sentence from a medical text file (list of words) - # output: A list of hash tables + # The new labels_list is a translated version + #return labels_list + return tmp_hash + + + + # Model::feature_for_sentence + # + # input: A sentence/line from a medical text file (list of words) + # output: A list of hash tables (one hash table per word) def features_for_sentence(self, sentence): + + # Question! - What do the values of each key,value pair represent? + + #global count + features_list = [] for word in sentence: features_list.append(self.features_for_word(word)) + tags = None for feature in Model.sentence_features: if feature not in self.enabled_features: continue if feature == "pos": + tags = tags or nltk.pos_tag(sentence) + for i, features in enumerate(features_list): tag = tags[i][1] features[(feature, tag)] = 1 + + #if count == 0: + # print "tags" + # print tags + # print "\n" + "-" * 80 + "\n" + # print "feature_list\n" + # for it in features_list: + # for elem in sorted(it.items(),my_sort) : print elem + # print "" + # print "\n" + "-" * 80 + "\n" if feature == "stem_wordnet": tags = tags or nltk.pos_tag(sentence) @@ -211,6 +329,13 @@ def features_for_sentence(self, sentence): merged = lambda d1, d2: dict(d1.items() + d2.items()) features_list = [merged(features_list[i], ngram_features[i]) for i in range(len(features_list))] + + #if count == 0: + # #print "\n" + "-" * 80 + # #print "features_list" + # #for elem in features_list : print elem + # #print "\n" + "-" * 80 + # count = 1 return features_list @@ -221,6 +346,9 @@ def features_for_sentence(self, sentence): # output: A hash table of features # features include: word, length, mitre, stem_porter def features_for_word(self, word): + + #global count2 + features = {'dummy':1} # always have >0 dimensions # word_shape, word, length, mitre, stem_porter, stem_lancaster @@ -255,6 +383,13 @@ def features_for_word(self, word): if feature == "word_shape": wordShapes = getWordShapes(word) + + #if count2 < 3: + # #print "word: ", word + # #print "wordShapes: ", wordShapes + # #print "\n" + "-" * 80 + # print '' + for i, shape in enumerate(wordShapes): features[(feature + str(i), shape)] = 1 @@ -279,8 +414,13 @@ def features_for_word(self, word): if feature == "def_class": features[(feature, None)] = self.get_def_class(word) + #if count2 < 3: + # count2 += 1 + return features + + mitre_features = { "INITCAP" : r"^[A-Z].*$", "ALLCAPS" : r"^[A-Z]+$", @@ -302,6 +442,8 @@ def features_for_word(self, word): "DATESEPERATOR" : r"^[-/]$", } + + def is_test_result (self, context): # note: make spaces optional? regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" From 68eb6efa5f792e4b6332a228a39d6817350d6a9c Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Thu, 16 Jan 2014 20:12:32 -0500 Subject: [PATCH 013/393] Changed internal representation of concepts to allow support for boundaries --- code/note.py | 119 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 96 insertions(+), 23 deletions(-) diff --git a/code/note.py b/code/note.py index 0d404b0..98915d7 100644 --- a/code/note.py +++ b/code/note.py @@ -9,8 +9,10 @@ class Note: def __init__(self): # data - A list of lines directly from the file # concepts - A one-to-one correspondence of each word's concept + # classifications - A list of tuples that convey concept labels self.data = [] self.concepts = [] + self.classifications = [] @@ -26,49 +28,87 @@ def read_i2b2(self, txt, con=None): # Add sentence to the data list self.data.append(line) - # For each word, store a corresponding concept label - tmp = [] - for word in line.split(): - tmp.append('none') - self.concepts.append(tmp) - - # If an accompanying concept file was specified, read it if con: with open(con) as f: for line in f: - c, t = line.split('||') - t = t[3:-2] - c = c.split() - start = c[-2].split(':') - end = c[-1].split(':') + + # concept + prefix, suffix = line.split('||') + txt = prefix.split() + con = suffix[3:-2] + + start = txt[-2].split(':') + end = txt[-1].split(':') + assert "concept spans one line", start[0] == end[0] - l = int(start[0]) - 1 + + # lineno + l = int(start[0]) + + # starttok + # endtok start = int(start[1]) end = int(end[1]) - for i in range( start, end+1 ): - self.concepts[l][i] = t + # Add the classification to the Note object + self.classifications.append( (con,l,start,end) ) + + #print "\n" + "-" * 80 # Note::write_i2b2() # # @param con. A path to the file of where to write the prediction. - # @param labels. A list of predictions of labels for the given text. + # @param labels. A list of classifications # # Write the concept predictions to a given file in i2b2 format def write_i2b2(self, con, labels): + # List of list of words (line-by-line) + tlist = self.txtlist() + + #for i, elem in enumerate(self.data): + # print i, ": ", elem + with open(con, 'w') as f: - for i, tmp in enumerate(zip(self.txtlist(), labels)): - datum, label = tmp - for j, tmp in enumerate(zip(datum, label)): - datum, label = tmp - if label != 'none': - idx = "%d:%d" % (i + 1, j) - print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx, idx, label) + + for classification in labels: + + # Ensure 'none' classifications are skipped + if classification[0] == 'none': + continue + + concept = classification[0] + lineno = classification[1] + start = classification[2] + end = classification[3] + + # A list of words (corresponding line from the text file) + text = tlist[lineno-1] + + #print "\n" + "-" * 80 + #print "start: ", start + #print "text: ", text + #print "text[start]: ", text[start] + + # The text string of words that has been classified + datum = text[start] + for j in range(start, end): + datum += " " + text[j+1] + + # Line:TokenNumber of where the concept starts and ends + idx1 = "%d:%d" % (lineno, start) + idx2 = "%d:%d" % (lineno, end ) + + # Classification + label = concept + + # Print format + print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx1, idx2, label) + print "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx1, idx2, label) @@ -236,6 +276,39 @@ def txtlist( self ): # # @return a list of lists of the concepts associated with each word from data def conlist( self ): + + # Cached for later calls + if self.concepts: return self.concepts + + # For each word, store a corresponding concept label + # Initially, all labels will be stored as 'none' + for line in self.data: + tmp = [] + for word in line.split(): + tmp.append('none') + self.concepts.append(tmp) + + #for i, elem in enumerate(self.data): + # print i, ": ", elem + + # Use the classifications to correct all mislabled 'none's + for classification in self.classifications: + + #print "classification: ", classification + #print "classification[0]: ", classification[0] + #print "classification[1]: ", classification[1] + #print "classification[2]: ", classification[2] + #print "classification[3]: ", classification[3] + + concept = classification[0] + lineno = classification[1] - 1 + start = classification[2] + end = classification[3] + + self.concepts[lineno][start] = concept + for i in range(start, end): + self.concepts[lineno][i+1] = concept + return self.concepts From 7d25c659cd6f92b6bc844b639304f2f9791dbd09 Mon Sep 17 00:00:00 2001 From: Tristan Naumann Date: Mon, 10 Feb 2014 21:06:41 -0500 Subject: [PATCH 014/393] Isolating features in separate file --- code/features.py | 233 ++++++++++++++++++++++++++++++++++++++++++++++ code/model.py | 238 ++--------------------------------------------- 2 files changed, 239 insertions(+), 232 deletions(-) create mode 100644 code/features.py diff --git a/code/features.py b/code/features.py new file mode 100644 index 0000000..8c1ea40 --- /dev/null +++ b/code/features.py @@ -0,0 +1,233 @@ +import nltk + +from sets import ImmutableSet + +from wordshape import * + +sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) +word_features = ImmutableSet(["word", "length", "mitre", "stem_porter", "stem_lancaster", "word_shape"]) +enabled_features = sentence_features | word_features + + # input: A sentence from a medical text file (list of words) + # output: A list of hash tables +def features_for_sentence(sentence): + features_list = [] + + for word in sentence: + features_list.append(features_for_word(word)) + + tags = None + for feature in sentence_features: + if feature not in enabled_features: + continue + + if feature == "pos": + tags = tags or nltk.pos_tag(sentence) + for i, features in enumerate(features_list): + tag = tags[i][1] + features[(feature, tag)] = 1 + + if feature == "stem_wordnet": + tags = tags or nltk.pos_tag(sentence) + morphy_tags = { + 'NN': nltk.corpus.reader.wordnet.NOUN, + 'JJ': nltk.corpus.reader.wordnet.ADJ, + 'VB': nltk.corpus.reader.wordnet.VERB, + 'RB': nltk.corpus.reader.wordnet.ADV} + morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w, t in tags] + st = nltk.stem.WordNetLemmatizer() + for i, features in enumerate(features_list): + tag = morphy_tags[i] + features[(feature, st.lemmatize(*tag))] = 1 + + if feature == "test_result": + for index, features in enumerate(features_list): + right = " ".join([w for w in sentence[index:]]) + if is_test_result(right): + features[(feature, None)] = 1 + + + ngram_features = [{} for i in range(len(features_list))] + if "prev" in enabled_features: + prev = lambda f: {("prev_"+k[0], k[1]): v for k, v in f.items()} + prev_list = map(prev, features_list) + for i in range(len(features_list)): + if i == 0: + ngram_features[i][("prev", "*")] = 1 + else: + ngram_features[i].update(prev_list[i-1]) + + if "next" in enabled_features: + next = lambda f: {("next_"+k[0], k[1]): v for k, v in f.items()} + next_list = map(next, features_list) + for i in range(len(features_list)): + if i == len(features_list) - 1: + ngram_features[i][("next", "*")] = 1 + else: + ngram_features[i].update(next_list[i+1]) + + merged = lambda d1, d2: dict(d1.items() + d2.items()) + features_list = [merged(features_list[i], ngram_features[i]) + for i in range(len(features_list))] + + return features_list + + + + # input: a single word, like + # Admission + # output: A hash table of features + # features include: word, length, mitre, stem_porter +def features_for_word( word): + features = {'dummy': 1} # always have >0 dimensions + + # word_shape, word, length, mitre, stem_porter, stem_lancaster + for feature in word_features: + + # word_shape, test_result, word, pos, next, length, stem_wordnet, mitre, stem_porter, prev, stem_lancaster + if feature not in enabled_features: + continue + + if feature == "word": + features[(feature, word)] = 1 + + if feature == "length": + features[(feature, None)] = len(word) + + if feature == "mitre": + for f in mitre_features: + if re.search(mitre_features[f], word): + features[(feature, f)] = 1 + + if feature == "stem_porter": + st = nltk.stem.PorterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_lancaster": + st = nltk.stem.LancasterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_snowball": + st = nltk.stem.SnowballStemmer("english") + #features[(feature, st.stem(word))] = 1 + + if feature == "word_shape": + wordShapes = getWordShapes(word) + for i, shape in enumerate(wordShapes): + features[(feature + str(i), shape)] = 1 + + if feature == "metric_unit": + unit = 0 + if is_weight(word): + unit = 1 + elif is_size(word): + unit = 2 + features[(feature, None)] = unit + + # look for prognosis locaiton + #if feature == "radial_loc": + # THIS MIGHT BE BUGGED + # if is_prognosis_location(word): + # features[(feature, None)] = 1 + + if feature == "has_problem_form": + if has_problem_form(word): + features[(feature, None)] = 1 + + if feature == "def_class": + features[(feature, None)] = get_def_class(word) + + return features + +mitre_features = { + "INITCAP": r"^[A-Z].*$", + "ALLCAPS": r"^[A-Z]+$", + "CAPSMIX": r"^[A-Za-z]+$", + "HASDIGIT": r"^.*[0-9].*$", + "SINGLEDIGIT": r"^[0-9]$", + "DOUBLEDIGIT": r"^[0-9][0-9]$", + "FOURDIGITS": r"^[0-9][0-9][0-9][0-9]$", + "NATURALNUM": r"^[0-9]+$", + "REALNUM": r"^[0-9]+.[0-9]+$", + "ALPHANUM": r"^[0-9A-Za-z]+$", + "HASDASH": r"^.*-.*$", + "PUNCTUATION": r"^[^A-Za-z0-9]+$", + "PHONE1": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "PHONE2": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "FIVEDIGIT": r"^[0-9][0-9][0-9][0-9][0-9]", + "NOVOWELS": r"^[^AaEeIiOoUu]+$", + "HASDASHNUMALPHA": r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", + "DATESEPERATOR": r"^[-/]$", +} + +def is_test_result( context): + # note: make spaces optional? + regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" + if not re.search(regex, context): + return re.search(r"^[A-Za-z]+ was (positive|negative)", context) + return True + +def is_weight( word): + regex = r"^[0-9]*(mg|g|milligrams|grams)$" + return re.search(regex, word) + +def is_size( word): + regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" + return re.search(regex, word) + +def is_prognosis_location( word): + regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" + return re.search(regex, word) + +def has_problem_form( word): + regex = r".*(ic|is)$" + return re.search(regex, word) + +# checks for a definitive classification at the word level +def get_def_class( word): + test_terms = { + "eval", "evaluation", "evaluations", + "sat", "sats", "saturation", + "exam", "exams", + "rate", "rates", + "test", "tests", + "xray", "xrays", + "screen", "screens", + "level", "levels", + "tox" + } + problem_terms = { + "swelling", + "wound", "wounds", + "symptom", "symptoms", + "shifts", "failure", + "insufficiency", "insufficiencies", + "mass", "masses", + "aneurysm", "aneurysms", + "ulcer", "ulcers", + "trama", "cancer", + "disease", "diseased", + "bacterial", "viral", + "syndrome", "syndromes", + "pain", "pains" + "burns", "burned", + "broken", "fractured" + } + treatment_terms = { + "therapy", + "replacement", + "anesthesia", + "supplement", "supplemental", + "vaccine", "vaccines" + "dose", "doses", + "shot", "shots", + "medication", "medicine", + "treament", "treatments" + } + if word.lower() in test_terms: + return 1 + elif word.lower() in problem_terms: + return 2 + elif word.lower() in treatment_terms: + return 3 + return 0 diff --git a/code/model.py b/code/model.py index 782b462..e695358 100644 --- a/code/model.py +++ b/code/model.py @@ -17,6 +17,7 @@ from wordshape import * import libml +import features class Model: sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) @@ -71,14 +72,14 @@ def train(self, note): # rows is a list of a list of hash tables rows = [] for sentence in data: - rows.append(self.features_for_sentence(sentence)) + rows.append(features.features_for_sentence(sentence)) # each list of hash tables for row in rows: # each hash table - for features in row: + for feature_names in row: # each key in hash table - for feature in features: + for feature in feature_names: # I think new word encountered if feature not in self.vocab: self.vocab[feature] = len(self.vocab) + 1 @@ -115,7 +116,7 @@ def predict(self, note): # Something to do with calibrating the model rows = [] # rows <- list of a list of hash tables (feature vectors) for sentence in data: - rows.append(self.features_for_sentence(sentence)) + rows.append(features.features_for_sentence(sentence)) feat_lu = lambda f: {self.vocab[item]: f[item] for item in f if item in self.vocab} @@ -144,231 +145,4 @@ def predict(self, note): # The new labels_list is a translated version - return labels_list - - - - - # input: A sentence from a medical text file (list of words) - # output: A list of hash tables - def features_for_sentence(self, sentence): - features_list = [] - - for word in sentence: - features_list.append(self.features_for_word(word)) - - tags = None - for feature in Model.sentence_features: - if feature not in self.enabled_features: - continue - - if feature == "pos": - tags = tags or nltk.pos_tag(sentence) - for i, features in enumerate(features_list): - tag = tags[i][1] - features[(feature, tag)] = 1 - - if feature == "stem_wordnet": - tags = tags or nltk.pos_tag(sentence) - morphy_tags = { - 'NN': nltk.corpus.reader.wordnet.NOUN, - 'JJ': nltk.corpus.reader.wordnet.ADJ, - 'VB': nltk.corpus.reader.wordnet.VERB, - 'RB': nltk.corpus.reader.wordnet.ADV} - morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w, t in tags] - st = nltk.stem.WordNetLemmatizer() - for i, features in enumerate(features_list): - tag = morphy_tags[i] - features[(feature, st.lemmatize(*tag))] = 1 - - if feature == "test_result": - for index, features in enumerate(features_list): - right = " ".join([w for w in sentence[index:]]) - if self.is_test_result(right): - features[(feature, None)] = 1 - - - ngram_features = [{} for i in range(len(features_list))] - if "prev" in self.enabled_features: - prev = lambda f: {("prev_"+k[0], k[1]): v for k, v in f.items()} - prev_list = map(prev, features_list) - for i in range(len(features_list)): - if i == 0: - ngram_features[i][("prev", "*")] = 1 - else: - ngram_features[i].update(prev_list[i-1]) - - if "next" in self.enabled_features: - next = lambda f: {("next_"+k[0], k[1]): v for k, v in f.items()} - next_list = map(next, features_list) - for i in range(len(features_list)): - if i == len(features_list) - 1: - ngram_features[i][("next", "*")] = 1 - else: - ngram_features[i].update(next_list[i+1]) - - merged = lambda d1, d2: dict(d1.items() + d2.items()) - features_list = [merged(features_list[i], ngram_features[i]) - for i in range(len(features_list))] - - return features_list - - - - # input: a single word, like - # Admission - # output: A hash table of features - # features include: word, length, mitre, stem_porter - def features_for_word(self, word): - features = {'dummy': 1} # always have >0 dimensions - - # word_shape, word, length, mitre, stem_porter, stem_lancaster - for feature in Model.word_features: - - # word_shape, test_result, word, pos, next, length, stem_wordnet, mitre, stem_porter, prev, stem_lancaster - if feature not in self.enabled_features: - continue - - if feature == "word": - features[(feature, word)] = 1 - - if feature == "length": - features[(feature, None)] = len(word) - - if feature == "mitre": - for f in Model.mitre_features: - if re.search(Model.mitre_features[f], word): - features[(feature, f)] = 1 - - if feature == "stem_porter": - st = nltk.stem.PorterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_lancaster": - st = nltk.stem.LancasterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_snowball": - st = nltk.stem.SnowballStemmer("english") - #features[(feature, st.stem(word))] = 1 - - if feature == "word_shape": - wordShapes = getWordShapes(word) - for i, shape in enumerate(wordShapes): - features[(feature + str(i), shape)] = 1 - - if feature == "metric_unit": - unit = 0 - if self.is_weight(word): - unit = 1 - elif self.is_size(word): - unit = 2 - features[(feature, None)] = unit - - # look for prognosis locaiton - #if feature == "radial_loc": - # THIS MIGHT BE BUGGED - # if self.is_prognosis_location(word): - # features[(feature, None)] = 1 - - if feature == "has_problem_form": - if self.has_problem_form(word): - features[(feature, None)] = 1 - - if feature == "def_class": - features[(feature, None)] = self.get_def_class(word) - - return features - - mitre_features = { - "INITCAP": r"^[A-Z].*$", - "ALLCAPS": r"^[A-Z]+$", - "CAPSMIX": r"^[A-Za-z]+$", - "HASDIGIT": r"^.*[0-9].*$", - "SINGLEDIGIT": r"^[0-9]$", - "DOUBLEDIGIT": r"^[0-9][0-9]$", - "FOURDIGITS": r"^[0-9][0-9][0-9][0-9]$", - "NATURALNUM": r"^[0-9]+$", - "REALNUM": r"^[0-9]+.[0-9]+$", - "ALPHANUM": r"^[0-9A-Za-z]+$", - "HASDASH": r"^.*-.*$", - "PUNCTUATION": r"^[^A-Za-z0-9]+$", - "PHONE1": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "PHONE2": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "FIVEDIGIT": r"^[0-9][0-9][0-9][0-9][0-9]", - "NOVOWELS": r"^[^AaEeIiOoUu]+$", - "HASDASHNUMALPHA": r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", - "DATESEPERATOR": r"^[-/]$", - } - - def is_test_result(self, context): - # note: make spaces optional? - regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" - if not re.search(regex, context): - return re.search(r"^[A-Za-z]+ was (positive|negative)", context) - return True - - def is_weight(self, word): - regex = r"^[0-9]*(mg|g|milligrams|grams)$" - return re.search(regex, word) - - def is_size(self, word): - regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" - return re.search(regex, word) - - def is_prognosis_location(self, word): - regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" - return re.search(regex, word) - - def has_problem_form(self, word): - regex = r".*(ic|is)$" - return re.search(regex, word) - - # checks for a definitive classification at the word level - def get_def_class(self, word): - test_terms = { - "eval", "evaluation", "evaluations", - "sat", "sats", "saturation", - "exam", "exams", - "rate", "rates", - "test", "tests", - "xray", "xrays", - "screen", "screens", - "level", "levels", - "tox" - } - problem_terms = { - "swelling", - "wound", "wounds", - "symptom", "symptoms", - "shifts", "failure", - "insufficiency", "insufficiencies", - "mass", "masses", - "aneurysm", "aneurysms", - "ulcer", "ulcers", - "trama", "cancer", - "disease", "diseased", - "bacterial", "viral", - "syndrome", "syndromes", - "pain", "pains" - "burns", "burned", - "broken", "fractured" - } - treatment_terms = { - "therapy", - "replacement", - "anesthesia", - "supplement", "supplemental", - "vaccine", "vaccines" - "dose", "doses", - "shot", "shots", - "medication", "medicine", - "treament", "treatments" - } - if word.lower() in test_terms: - return 1 - elif word.lower() in problem_terms: - return 2 - elif word.lower() in treatment_terms: - return 3 - return 0 + return labels_list \ No newline at end of file From 3c0513fcfbe2c5e3ca5e68ead92af421b88c4d4f Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 14:35:25 -0500 Subject: [PATCH 015/393] Separated features from model.py - features are currently incomplete --- code/clicon_features.py | 372 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 372 insertions(+) create mode 100644 code/clicon_features.py diff --git a/code/clicon_features.py b/code/clicon_features.py new file mode 100644 index 0000000..4d624a3 --- /dev/null +++ b/code/clicon_features.py @@ -0,0 +1,372 @@ +###################################################################### +# CliCon - clicon_features.py # +# # +# Willie Boag wboag@cs.uml.edu # +# # +# Purpose: Isolate the model's features from model.py # +###################################################################### + + +__author__ = 'Willie Boag' +__date__ = 'Jan. 27, 2014' + + + +import nltk +import re + +import clicon_genia_interface + + +class FeatureWrapper: + + + # Run the GENIA tagger on the given data + def __init__(self, data): + #self.GENIA_features = clicon_genia_interface.genia(data) + self.GENIA_counter = 0 + + + + # Iterate through GENIA Tagger features + def next_GENIA_line(self): + + # End of list - reset counter & return None + if self.GENIA_counter == len(self.GENIA_features): + self.GENIA_counter = 0 + return None + + # Advance to next line + self.GENIA_counter += 1 + + return self.GENIA_features[self.GENIA_counter-1] + + + + + # IOB_features() + # + # input: A sentence + # output: A hash table of features + def IOB_features_for_sentence(self, sentence): + + isProse = self.prose_sentence(sentence) + + # Different features depending on whether sentence is 'prose' + if isProse: + line_features = self.IOB_prose_features_for_sentence(sentence) + else: + line_features = self.IOB_nonprose_features_for_sentence(sentence) + + # Return features as well as indication of whether it is prose or not + return (isProse, line_features) + + + + # IOB_prose_features_for_sentence() + # + # input: A sentence + # output: A hash table of features + def IOB_prose_features_for_sentence(self, sentence): + + + # List of dictionaries of features + line_features = [ {('dummy',1):1} for _ in sentence ] + + # Feature: Generic# stemmed word + for i,word in enumerate(sentence): + generic = re.sub('[0-9]','0',word) + line_features[i].update( { ('Generic#',generic) : 1}) + + # Feature: Previous word + line_features[0].update( { ( 'prev_word', '' ) : 1} ) + for i in range(1,len(sentence)): + line_features[i].update( { ('uncased_prev_word',sentence[i-1].lower()) : 1} ) + + # Feature Uncased previous word + line_features[0].update( { ('uncased_prev_word', '' ) : 1} ) + for i in range(1,len(sentence)): + line_features[i].update( { ( 'prev_word',sentence[i-1] ) : 1} ) + + # Feature: Last two leters of word + for word in sentence: + line_features[i].update( { ('last_two_letters',word[-2:]) : 1} ) + + # Feature: Previous POS + pos_tagged = nltk.pos_tag(sentence) + line_features[0].update( { ('prev_POS','') : 1} ) + for i in range(1,len(sentence)): + line_features[i].update( { ('prev_POS',pos_tagged[i-1]) : 1} ) + + # Feature: 1-token part-of-speech context + for (i,(_,pos)) in enumerate(pos_tagged): + line_features[i].update( { ('pos',pos) : 1} ) + + # Feature: UMLS concept hypernyms + + # GENIA features + for i in range(len(sentence)): + + # FIXME - Do not call GENIA features right now + # (to speed up runtime during development) + continue + + # Get the GENIA features of the current sentence + genia_feats = self.next_GENIA_line() + if not genia_feats: genia_feats = self.next_GENIA_line() + + + # Feature: Current word's GENIA features + keys = ['GENIA-stem','GENIA-POS','GENIA-chunktag'] + curr = genia_feats[i] + output = dict( (('curr-'+k, curr[k]), 1) for k in keys if k in curr) + + # Feature: Previous word's GENIA features + if i: + prev = genia_feats[i] + output = dict( (('prev-'+k, prev[k]), 1) for k in keys if k in prev) + else: + output = dict( (('prev-'+k, ''), 1) for k in keys if k in curr) + + # Feature: Next word's GENIA stem + # Note: This is done retroactively, updating the previous token + if i > 0: + line_features[i-1].update( {('next-GENIA-stem',curr['GENIA-stem']) : 1} ) + # Do not accidentally skip the final token + if i == (len(sentence) - 1): + line_features[i].update( { ('next-GENIA-stem','') : 1} ) + + line_features[i].update(output) + + # MetaMap semantic type + + + return line_features + + + + # IOB_nonprose_features_for_sentence() + # + # input: A sentence + # output: A hash table of features + def IOB_nonprose_features_for_sentence(self, sentence): + + # Get the GENIA features of the current sentence + # The GENIA featurs are not used for nonprose, but it keeps things aligned for the prose + #genia_feats = self.next_GENIA_line() + #if not genia_feats: genia_feats = self.next_GENIA_line() + + # List of dictionaries of features + line_features = [ {('dummy',1):1} for _ in sentence ] + + # Feature: The word, itself + for i,word in enumerate(sentence): + line_features[i].update( { ('word',word.lower()) : 1} ) + + # Feature: QANN uncased word + + # Feature: Uncased previous word + line_features[0].update( { ( 'prev_word','') : 1} ) + line_features[0].update( { ('uncased_prev_word','') : 1} ) + for i in range(1,len(sentence)): + line_features[i].update( { ('uncased_prev_word',sentence[i-1].lower()) : 1} ) + + # 3-token part-of-speech context + + # MetaMap semantic type + + # MetaMap CUI + + # Feature: Previous POS + pos_tagged = nltk.pos_tag(sentence) + line_features[0].update( { ('prev_POS','') : 1} ) + for i in range(1,len(sentence)): + line_features[i].update( { ('prev_POS',pos_tagged[i-1]) : 1} ) + + return line_features + + + + # prose_sentence() + # + # input: A sentence + # output: Boolean yes/no + def prose_sentence(self, sentence): + + if sentence[-1] == '.' or sentence[-1] == '?': + return True + elif sentence[-1] == ':': + return False + elif len(sentence) <= 5: + return False + elif self.at_least_half_nonprose(sentence): + return True + else: + return False + + + # at_least_half_nonprose() + # + # input: A sentence + # output: A bollean yes/no + def at_least_half_nonprose(self, sentence): + + count = 0 + + for word in sentence: + if self.prose_word(word): + count += 1 + + if count >= len(sentence): + return True + else: + return False + + + # prose::word() + # + # input: A word + # output: Boolean yes/no + def prose_word(self, word): + + # Punctuation + for punc in ".?,!:\"'": + if re.search(punc, word): + return False + + # Digit + if re.match('\d', word): + return False + + # All uppercase + if word == word.upper(): + return False + + # Else + return True + + + + # generate_chunks() + # + # input: Three arguments: + # 1) A list of list of word (the data of the file) + # 2) A list of list of IOB tags (one-to-one with list from arg 1) + # 3) A list of list of concepts (one-to-one with list from arg 1) + # + # + # output: A 3-tuple of: + # 1) A list of list of chunks (word token phrases) + # 2) A list of list of concepts (one-to-one with list from ret 1) + # 3) A list of all indices into 1 that have been deemed nontrivial + def generate_chunks(self, data, IOB_tags, labels=None): + + # List of list of tokens (similar to 'text', but concepts are grouped) + text_chunks = [] + + # one-to-one concept classification with text_chunks + if labels: + concept_chunks = [] + else: + concept_chunks = None + + # List of (line,token) pairs for classifications that are nont 'none' + hits = [] + + # Create tokens of full concept boundaries for second classifier + for i, concept_line in enumerate(IOB_tags): + + # One line of 'chunked' + line_of_text_chunks = [] + if labels: line_of_concept_chunks = [] + + # stores the current streak + queue = [] + + # Necessary when multiple concepts are on one line + # The second concept's j index is relative to a word-split array + # The j of the new token should be relative to how chunks there are + chunk_offset = 0 + + # C-style array indexing. Probably could be done a better way. + # Used because I needed the ability of lookahead + for j in range(len(concept_line)): + + # Outside + # concet_line in "012" instead of "IOB" + if concept_line[j] == 'O': + line_of_text_chunks.append(data[i][j]) + if labels: line_of_concept_chunks.append('none') + + # Beginning of a concept boundary + else: + + # Increase size of current streak + queue.append(data[i][j]) + + # lookahead (check if streak will continue) + if (j+1 == len(concept_line))or \ + (concept_line[j+1] != 'I'): # end of classifiation + + # Add full concept token + line_of_text_chunks.append(' '.join(queue)) + if labels: line_of_concept_chunks.append(labels[i][j]) + + # Store indices of detected concept + hits.append( ( i, j + 1-len(queue) - chunk_offset ) ) + + # Reminder: used in the case that a concept follows a + # multi-word concept on the same line + chunk_offset += len(queue) - 1 + + # Reset streak + queue = [] + + text_chunks.append(line_of_text_chunks) + if labels: concept_chunks.append(line_of_concept_chunks) + + + return (text_chunks, concept_chunks, hits) + + + + + # concept_features() + # + # input: A sentence/line from a medical text file (list of chunks) + # An index into the sentence to indentify the given chunk + # output: A list of hash tables (one hash table per word) + def concept_features(self, sentence, ind): + + retVal = {} + + retVal.update( { ('chunk',sentence[ind]) : 1 } ) + + # Feature: Uncased unigrams + for i,word in enumerate( sentence[ind].split() ): + featname = 'unigram-%d' % i + retVal.update( { (featname, word.lower()) : 1 } ) + + # Feature: First four letters of each word + prefix_list = [ word[0:4] for word in sentence[ind].split() ] + for i,word in enumerate(prefix_list): + featname = 'first-four-letters-%d' % i + retVal.update( { (featname, word) : 1 } ) + + # Feature: Stemmed previous word + + + # Feature: Uncased previous bigram + + + # Feature: Argument type + nearest predicate + + + # Feature: UMLS concept type + + + # Feature Wikipedia concept type + + return retVal + + From 552964098dd68860c744a3f1a05a254d028ade50 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 14:36:05 -0500 Subject: [PATCH 016/393] Interface to GENIA tagger for features --- code/clicon_genia_interface.py | 102 +++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 code/clicon_genia_interface.py diff --git a/code/clicon_genia_interface.py b/code/clicon_genia_interface.py new file mode 100644 index 0000000..8bc0633 --- /dev/null +++ b/code/clicon_genia_interface.py @@ -0,0 +1,102 @@ +###################################################################### +# CliCon - clicon_genia_interface.py # +# # +# Willie Boag wboag@cs.uml.edu # +# # +# Purpose: Provide a way for Python to utilize the output of the # +# GENIA Tagger # +# # +# Genia Tagger: http://www.nactem.ac.uk/tsujii/GENIA/tagger/ # +###################################################################### + + + +__author__ = 'Willie Boag' +__date__ = 'Jan. 27, 2014' + +import os +import sys + + +def main(): + + f = open('/home/wboag/ConceptExtraction-master/data/concept_assertion_relation_training_data/beth/txt/record-20.txt', 'r') + + txt = f.readlines() + + data = [] + for line in txt: + data.append(line.split()) + + for (line,linedict) in zip(txt,genia(data)): + print line.split(), '\n' + for d in linedict: + print d + print '---\n\n' + + + +# +# genia() +# +# Call the genia tagger and return its output in python format +# +def genia( data ): + + ''' + @param data. A list of list of strings (lines of words from a file) + @return A list of dcitionaries of the genia tagger's output. + ''' + + + # FIXME - write list to file and then feed it to GENIA + # FIXME - hard coded directory!! + ftmp = open('/home/wboag/geniatagger-3.0.1/DELETE-THIS.txt', 'w') + for line in data: ftmp.write(' '.join(line) + '\n') + ftmp.close() + + + # FIXME - hard coded directory!! + genia_dir = '/home/wboag/geniatagger-3.0.1' + stream = os.popen('cd %s ; ./geniatagger -nt DELETE-THIS.txt' % genia_dir) + + # Process each line of output + retlist = [] + i = 0 + j = 0 + fline = [] + old = [] + for line in stream.readlines(): + + line = line.split() + + # Empty line + if line == []: continue + + # One dictionary per word in the file + output = {'GENIA-word' :line[0], + 'GENIA-stem' :line[1], + 'GENIA-POS' :line[2], + 'GENIA-chunktag':line[3], + 'GENIA-NEtag' :line[4]} + + + # Add token to list + fline.append(output) + j += 1 + if j == len(data[i]): + #print data[i] + #print fline + #print '---' + i += 1 + j = 0 + retlist.append(fline) + fline = [] + + return retlist + + + + +if __name__ == '__main__': + main() From 2c4ab10f6fe019b1f2b7298c230ba612c242fb7b Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 14:39:13 -0500 Subject: [PATCH 017/393] Added data member for handling chunking --- code/note.py | 71 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/code/note.py b/code/note.py index 98915d7..24ba1ec 100644 --- a/code/note.py +++ b/code/note.py @@ -10,9 +10,11 @@ def __init__(self): # data - A list of lines directly from the file # concepts - A one-to-one correspondence of each word's concept # classifications - A list of tuples that convey concept labels - self.data = [] - self.concepts = [] + # boundaries - A one-to-one correspondence of each word's BIO status + self.data = [] + self.concepts = [] self.classifications = [] + self.boundaries = [] @@ -28,6 +30,9 @@ def read_i2b2(self, txt, con=None): # Add sentence to the data list self.data.append(line) + # Make put a temporary 'O' in each spot + self.boundaries.append( ['O' for _ in line.split()] ) + # If an accompanying concept file was specified, read it if con: @@ -50,11 +55,24 @@ def read_i2b2(self, txt, con=None): # starttok # endtok start = int(start[1]) - end = int(end[1]) + end = int( end[1]) # Add the classification to the Note object self.classifications.append( (con,l,start,end) ) + #print "txt: ", txt + #print "l: ", l + #print "start: ", start + #print "end: ", end + #print "line: ", self.data[l-1] + + # Beginning of a concept + self.boundaries[l-1][start] = 'B' + + # Inside of a concept + for i in range(start,end): + self.boundaries[l-1][i+1] = 'I' + #print "\n" + "-" * 80 @@ -73,6 +91,7 @@ def write_i2b2(self, con, labels): #for i, elem in enumerate(self.data): # print i, ": ", elem + with open(con, 'w') as f: for classification in labels: @@ -108,10 +127,47 @@ def write_i2b2(self, con, labels): # Print format print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx1, idx2, label) - print "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx1, idx2, label) + #print "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx1, idx2, label) + # Note::write_BIOs_labels() + # + # @param _. Filename. Ignore it. + # @param labels. A list of list of BIOs labels + # + # Print the prediction of BIOs concept boundary classification + def write_BIOs_labels(self, _, labels): + + # List of list of words (line-by-line) + text = self.txtlist() + + for i, concept_line in enumerate(labels): + + # stores the current streak + queue = [] + + # C-style array indexing. Probably could be done a better way. + # Used because I needed the ability of lookahead + for j in range(len(concept_line)): + + # Beginning of a concept boundary + if concept_line[j] != 'O': + + # Increase size of current streak + queue.append(text[i][j]) + + # lookahead (check if streak will continue) + if (j+1 == len(concept_line))or \ + (concept_line[j+1] != 'I'): + print '%d:%d %d:%d' % (i+1,j-len(queue)+1,i+1,j) + print ' '.join(queue) + print '' + # Reset streak + queue = [] + + + # Note::read_plain() # # @param txt. A file path for the plain tokenized medical record @@ -313,6 +369,13 @@ def conlist( self ): + # boundaries() + # + # @return a list of lists of the BIO vals associated with each word from data + def boundlist( self ): + return self.boundaries + + # For iterating def __iter__(self): return iter(self.data) From e9f62739e5fefd9e36f7e0576b609fa8e0d78c05 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 14:40:47 -0500 Subject: [PATCH 018/393] Major overhaul. Isolated features away. Added framework for two passes --- code/model.py | 735 +++++++++++++++++++++++--------------------------- 1 file changed, 337 insertions(+), 398 deletions(-) diff --git a/code/model.py b/code/model.py index 4913cb7..092e6f9 100644 --- a/code/model.py +++ b/code/model.py @@ -1,41 +1,15 @@ from __future__ import with_statement -import time import os import pickle -import re -import subprocess -import sys -import nltk -import nltk.corpus.reader -import nltk.stem import helper - -from sets import Set -from sets import ImmutableSet - -from wordshape import * - import libml +import clicon_features -#count = 0 -#count2 = 0 -#def my_sort(pair1, pair2): -# a = pair1[0][0] -# b = pair2[0][0] -# if a == b: return 0 -# if a < b: return -1 -# return 1 class Model: - sentence_features = ImmutableSet(["pos", "stem_wordnet", "test_result", "prev", "next"]) - word_features = ImmutableSet(["word", "length", "mitre", "stem_porter", "stem_lancaster", "word_shape"]) - # THESE ARE FEATURES I TRIED THAT DON'T LOOK THAT PROMISING - # I have some faith in "metric_unit" and "has_problem_form" - # "radial_loc" may be too rare and "def_class" could be over fitting - # "metric_unit", "radial_loc", "has_problem_form", "def_class" labels = { "none":0, @@ -45,6 +19,17 @@ class Model: } reverse_labels = {v:k for k, v in labels.items()} + + + # IOBs labels + IOBs_labels = { + 'O':0, + 'B':1, + 'I':2 + } + reverse_IOBs_labels = {v:k for k,v in IOBs_labels.items()} + + @staticmethod def load(filename='awesome.model'): with open(filename) as model: @@ -52,6 +37,7 @@ def load(filename='awesome.model'): model.filename = filename return model + # Constructor def __init__(self, filename='awesome.model', type=libml.ALL): model_directory = os.path.dirname(filename) @@ -61,96 +47,178 @@ def __init__(self, filename='awesome.model', type=libml.ALL): self.filename = os.path.realpath(filename) self.type = type - self.vocab = {} + self.IOB_vocab = {} + self.concept_vocab = {} - self.enabled_features = Model.sentence_features | Model.word_features + # Model::train() # - # @param note. A Note object that has data for training the model - def train(self, note): + # @param notes. A Note object that has data for training the model + def train(self, notes): # Get the data and annotations from the Note object # data - A list of list of the medical text's words # labels - A list of list of concepts (1:1 with data) - data = note.txtlist() - labels = note.conlist() - - - # rows is a list of a list of hash tables - rows = [] - for sentence in data: - rows.append(self.features_for_sentence(sentence)) - - #print "\n" + "-" * 80 + "\n" - #print "len(rows)" - #print len(rows) - - #print "\n" + "-" * 80 + "\n" - #print "rows[0]" - #for elem in rows[0]: - # for it in sorted(elem.items(),my_sort) : print it - # print "\n" - #print "\n" + "-" * 80 + "\n" - - #print "\n" + "-" * 80 + "\n" - #print "len(rows[0])" - #print len(rows[0]) - #print "\n" + "-" * 80 + "\n" - - #print "rows[0][0]" - #print rows[0][0] - #print "\n" + "-" * 80 + "\n" - - #print "self.vocab" - #print self.vocab - #print "\n" + "-" * 80 + "\n" - #print "\n" * 4 + data = [] + labels = [] + chunks = [] + for note in notes: + data += note.txtlist() + labels += note.conlist() + chunks += note.boundlist() + + + # Create object that is a wrapper for the features + feat_obj = clicon_features.FeatureWrapper(data) + + + # IOB tagging + prose = [] + nonprose = [] + prose_line_numbers = [] + nonprose_line_numbers = [] + for i,line in enumerate(data): + isProse,feats = feat_obj.IOB_features_for_sentence(line) + if isProse: + prose.append( feats ) + prose_line_numbers.append(i) + else: + nonprose.append( feats ) + nonprose_line_numbers.append(i) # each list of hash tables (one list per line in file) - for row in rows: + #for row in rows: + for row in prose + nonprose: # each hash table (one hash table per word in the line) for features in row: # each key (tuple) pair in hash table (one key per feature) for feature in features: - # assigning a unique number to each feature - # ex. Here are three key,value pairs that go into self.vocab - # (('word_shape4', 'WT-Xx'), 2) - # (('next_pos', 'NNP'), 3) - # (('next_word', 'Date'), 4) - if feature not in self.vocab: - self.vocab[feature] = len(self.vocab) + 1 - - #def tmp_sort(pair1, pair2): - # return pair1[1] - pair2[1] - - #print "self.vocab" - #print self.vocab - #print "\n" + "-" * 80 + "\n" - #for elem in sorted( self.vocab.items(), tmp_sort ) : print elem - #print "\n" + "-" * 80 + "\n" + # assigning a unique number to each (feature,value) pair + if feature not in self.IOB_vocab: + self.IOB_vocab[feature] = len(self.IOB_vocab) + 1 - # A list of a list encodings of concept labels (ex. 'none' => 0) - # [ [0, 0, 0], [0], [0, 0, 0], [0], [0, 0, 0, 0, 0, 2, 2, 0, 1] ] - label_lu = lambda l: Model.labels[l] - labels = [map(label_lu, x) for x in labels] + # IOB labels + # A list of a list encodings of concept labels (ex. 'B' => 1) + # [ [0, 0, 0], [0], [0, 0, 0], [0], [0, 0, 0, 0, 0, 1, 2, 0, 1] ] + label_lu = lambda l: Model.IOBs_labels[l] + chunks = [map(label_lu, x) for x in chunks] - # list of a list of hash tables (all keys & values now numbers) - feat_lu = lambda f: {self.vocab[item]:f[item] for item in f} - rows = [map(feat_lu, x) for x in rows] + feat_lu = lambda f: {self.IOB_vocab[item]:f[item] for item in f} + prose = [map(feat_lu, x) for x in prose] + nonprose = [map(feat_lu, x) for x in nonprose] - libml.write_features(self.filename, rows, labels, self.type) + # Segregate chunks into 'Prose CHUNKS' and 'Nonprose CHUNKS' + prose_ind = 0 + nonprose_ind = 0 + pchunks = [] + nchunks = [] + p_end_flag = False + n_end_flag = False + for i,line in enumerate(chunks): + if (not p_end_flag) and (i == prose_line_numbers[prose_ind]): + pchunks.append(line) + prose_ind += 1 + if prose_ind == len(prose_line_numbers): p_end_flag = True + elif (not n_end_flag) and (i == nonprose_line_numbers[nonprose_ind]): + nchunks.append(line) + nonprose_ind += 1 + if nonprose_ind == len(nonprose_line_numbers): n_end_flag = True + else: + # Should never really get here + print 'Line #%d is neither prose nor nonprose!' % i + print line, '\n' + + + prose_model = self.filename + '1' + nonprose_model = self.filename + '2' + + libml.write_features( prose_model, prose, pchunks, self.type) + libml.write_features(nonprose_model, nonprose, nchunks, self.type) + + libml.train( prose_model, self.type) + libml.train(nonprose_model, self.type) + + + #################### + # Second Pass # + #################### + + + # IOB labels + # undo encodings of concept labels (ex. 1 => 'B') + label_lu = lambda l: Model.reverse_IOBs_labels[l] + chunks = [map(label_lu, x) for x in chunks] + + + # Merge 'B' words with its 'I's (and account for minor change in indices) + tmp = feat_obj.generate_chunks(data,chunks,labels) + + # text_chunks - a merged text (highly similiar to data, except merged) + # concept_chunks - one-to-one concept classification with text_chunks + # hits - one-to-one concept token indices with text_chunks + text_chunks, concept_chunks, hits = tmp + + + # rows is a list of a list of hash tables + # it is used for holding the features that will be used for training + rows = [] + text_matches = [] + concept_matches = [] + for hit in hits: + i,j = hit + rows.append(feat_obj.concept_features(text_chunks[i], j)) + + text_matches.append(text_chunks[i][j]) + concept_matches.append(concept_chunks[i][j]) + + + # each hash table (one hash table per word in the line) + for features in rows: + # each key (tuple) pair in hash table (one key per feature) + for feature in features: + # assigning a unique number to each (feature,value) pair + if feature not in self.concept_vocab: + self.concept_vocab[feature] = len(self.concept_vocab) + 1 + + + # Encode concept labels to numbers (ex. 'treatment' => 1) + # NOTE: There are no longer 'none' classifications + # ex. [1,2,1] + labels = [] + for con in concept_matches: + #print con + tmp = Model.labels[con] + labels.append(tmp) + + + # Purpose: Encode something like ('chunk', 'rehabilitation') as a unique + # number, as determined by the self.concept_vocab hash table + #feat_lu = lambda f: {self.concept_vocab[item]:f[item] for item in f} + #rows = [map(feat_lu, x) for x in rows] + tmp_rows = [] + for fdict in rows: + #print fdict + tmp = {self.concept_vocab[key]:fdict[key] for key in fdict} + tmp_rows.append(tmp) + rows = tmp_rows + + # Write second pass model to file + second_pass_model = self.filename + '3' + libml.write_features(second_pass_model, [rows], [labels], self.type) + libml.train(second_pass_model, self.type) + + + # Pickle dump with open(self.filename, "w") as model: pickle.dump(self, model) - # Train the model - libml.train(self.filename, self.type) @@ -162,357 +230,228 @@ def predict(self, note): # data - A list of list of the medical text's words data = note.txtlist() - # rows <- list of a list of hash tables - # each list of hash tables (one list per line in file) - # each hash table (one hash table per word in the line) - # each key (tuple) pair in hash table (one key per feature) - rows = [] - for sentence in data: - rows.append(self.features_for_sentence(sentence)) + # A wrapper for features + feat_obj = clicon_features.FeatureWrapper(data) + + # prose and nonprose - each store a list of sentence feature dicts + prose = [] + nonprose = [] + prose_line_numbers = [] + nonprose_line_numbers = [] + for i,line in enumerate(data): + # returns both the feature dict AND whether the sentence was prose + isProse,feats = feat_obj.IOB_features_for_sentence(line) + if isProse: + prose.append( feats ) + prose_line_numbers.append(i) + else: + nonprose.append( feats ) + nonprose_line_numbers.append(i) + + + # FIXME + # Not sure if this should be reset, but it makes sense to me to do it + # But why is it a data member if it shouldnt persist + self.IOB_vocab = {} + + # Create a mapping of each (feature,value) pair to a unique number + for row in prose + nonprose: + for features in row: + for feature in features: + if feature not in self.IOB_vocab: + self.IOB_vocab[feature] = len(self.IOB_vocab) + 1 - feat_lu = lambda f: {self.vocab[item]:f[item] for item in f if item in self.vocab} - rows = [map(feat_lu, x) for x in rows] - libml.write_features(self.filename, rows, None, self.type); + # For applying the (key,value) mapping + feat_lu = lambda f: {self.IOB_vocab[item]:f[item] for item in f if item in self.IOB_vocab} - # Use the trained model to make predictions - libml.predict(self.filename, self.type) + # Prose (predict, and read predictions) + prose = [map(feat_lu, x) for x in prose] + prose_model = self.filename + '1' + + libml.write_features(prose_model, prose, None, self.type); + libml.predict(prose_model, self.type) + + prose_labels_list = libml.read_labels(prose_model, self.type) - # A hash table - # the keys are 1,2,4 (LIN, CRF, and SVM) - # each value is a list of concept labels encodings - labels_list = libml.read_labels(self.filename, self.type) - - print "labels_list" - print labels_list - print "\n" + "-" * 80 + # Nonprose (predict, and read predictions) + nonprose = [map(feat_lu, x) for x in nonprose] + nonprose_model = self.filename + '2' + + libml.write_features(nonprose_model, nonprose, None, self.type); + libml.predict(nonprose_model, self.type) + + nonprose_labels_list = libml.read_labels(nonprose_model, self.type) + + + # Stitch prose and nonprose labels lists together + + labels = [] + prose_ind = 0 + nonprose_ind = 0 + p_end_flag = (len( prose_line_numbers) == 0) + n_end_flag = (len(nonprose_line_numbers) == 0) + labels_list = {} + + for key in [self.type]: + + # Pretty much renaming just for length/readability pruposes + plist = prose_labels_list[key] + nlist = nonprose_labels_list[key] + + for i in range( len(data) ): + if (not p_end_flag) and (i == prose_line_numbers[prose_ind]): + line = plist[0:len(data[i]) ] # Beginning + plist = plist[ len(data[i]):] # The rest + labels += line + prose_ind += 1 + if prose_ind == len(prose_line_numbers): p_end_flag = True + elif (not n_end_flag) and (i == nonprose_line_numbers[nonprose_ind]): + line = nlist[0:len(data[i]) ] # Beginning + nlist = nlist[ len(data[i]):] # The rest + labels += line + nonprose_ind += 1 + if nonprose_ind == len(nonprose_line_numbers): n_end_flag = True + else: + # Shouldn't really get here ever + print 'Line #%d is neither prose nor nonprose!' % i + + labels_list[key] = labels + + + # IOB labels # translate labels_list into a readable format - # ex. change all occurences of 0 -> 'none' + # ex. change all occurences of 1 -> 'B' for t, labels in labels_list.items(): tmp = [] for sentence in data: tmp.append([labels.pop(0) for i in range(len(sentence))]) - tmp[-1] = map(lambda l: l.strip(), tmp[-1]) - tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) + tmp[-1]= map(lambda l: l.strip(), tmp[-1]) + tmp[-1]= map(lambda l: Model.reverse_IOBs_labels[int(l)],tmp[-1]) labels_list[t] = tmp - print "labels_list" - print labels_list - print "\n" + "-" * 80 + #print '-'*80 + #print "\nlabels_list" + #print labels_list + #print "\n" + "-" * 80 - # Group classified tokens based on adjacency - nontrivial_concepts = ImmutableSet( ['treatment', 'problem', 'test'] ) - tmp_hash = {} - for t,labels in labels_list.items(): + # Reminder: list of list of words (line-by-line) + text = data - tmp = [] # Stores a list of classifications - # A classification is a 4-tuple: - # (concept, lineno, starttok, endtok) - start_ind = 0 - end_ind = 0 - streak = 0 # To keep count of the streak of classified tokens - for i, concept_line in enumerate(labels): + # List of list of tokens (similar to 'text', but concepts are grouped) + chunked = {1:[], 2:[], 4:[]} - # C-style array indexing. Probably could be done a better way - # used because I needed the ability of lookahead - for j in range(len(concept_line)): - # Non-trivial classification - if concept_line[j] in nontrivial_concepts: - - # Increase size of current streak - streak += 1 - - # lookahead (check if streak will continue) - if (j+1 == len(concept_line))or \ - (concept_line[j] != concept_line[j+1]): - # Add streak - tmp.append((concept_line[j], i+1, j-streak+1, j)) - # Reset count - streak = 0 + # Create tokens of full concept boundaries for second classifier + for t,chunks in labels_list.items(): - tmp_hash[t] = tmp + # Merge 'B' words with its 'I's to form phrased chunks + tmp = feat_obj.generate_chunks(text,chunks) - print tmp_hash - print "\n" + "-" * 80 + # text_chunks - a merged text + # place_holder - ignore. It has a value of [] + # hits - one-to-one concept token indices with text_chunks + text_chunks, place_holder, hits = tmp - # The new labels_list is a translated version - #return labels_list - return tmp_hash + # Store chunked text + chunked[t] = text_chunks + ############################# + # Second Pass # + ############################# - # Model::feature_for_sentence - # - # input: A sentence/line from a medical text file (list of words) - # output: A list of hash tables (one hash table per word) - def features_for_sentence(self, sentence): - # Question! - What do the values of each key,value pair represent? + # Predict classification for chunks + # FIXME - possible error - only predicts on 4 + text_chunks = chunked[4] - #global count - features_list = [] - for word in sentence: - features_list.append(self.features_for_word(word)) + # rows - the format for representing feats for machine learning + # text_matches - the phrase chunks corresponding to classifications + rows = [] + text_matches = [] + for hit in hits: + i,j = hit + rows.append(feat_obj.concept_features(text_chunks[i], j)) + text_matches.append(text_chunks[i][j]) - tags = None - for feature in Model.sentence_features: - if feature not in self.enabled_features: - continue + # FIXME + # Not sure if this should be reset, but it makes sense to me to do it + # But why is it a data member if it shouldnt persist + self.concept_vocab = {} - if feature == "pos": - - tags = tags or nltk.pos_tag(sentence) - - for i, features in enumerate(features_list): - tag = tags[i][1] - features[(feature, tag)] = 1 - - #if count == 0: - # print "tags" - # print tags - # print "\n" + "-" * 80 + "\n" - # print "feature_list\n" - # for it in features_list: - # for elem in sorted(it.items(),my_sort) : print elem - # print "" - # print "\n" + "-" * 80 + "\n" - - if feature == "stem_wordnet": - tags = tags or nltk.pos_tag(sentence) - morphy_tags = { - 'NN':nltk.corpus.reader.wordnet.NOUN, - 'JJ':nltk.corpus.reader.wordnet.ADJ, - 'VB':nltk.corpus.reader.wordnet.VERB, - 'RB':nltk.corpus.reader.wordnet.ADV} - morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w,t in tags] - st = nltk.stem.WordNetLemmatizer() - for i, features in enumerate(features_list): - tag = morphy_tags[i] - features[(feature, st.lemmatize(*tag))] = 1 - - if feature == "test_result": - for index, features in enumerate(features_list): - right = " ".join([w for w in sentence[index:]]) - if self.is_test_result(right): - features[(feature, None)] = 1 - - - ngram_features = [{} for i in range(len(features_list))] - if "prev" in self.enabled_features: - prev = lambda f: {("prev_"+k[0], k[1]): v for k,v in f.items()} - prev_list = map(prev, features_list) - for i in range(len(features_list)): - if i == 0: - ngram_features[i][("prev", "*")] = 1 - else: - ngram_features[i].update(prev_list[i-1]) - - if "next" in self.enabled_features: - next = lambda f: {("next_"+k[0], k[1]): v for k,v in f.items()} - next_list = map(next, features_list) - for i in range(len(features_list)): - if i == len(features_list) - 1: - ngram_features[i][("next", "*")] = 1 - else: - ngram_features[i].update(next_list[i+1]) - - merged = lambda d1, d2: dict(d1.items() + d2.items()) - features_list = [merged(features_list[i], ngram_features[i]) - for i in range(len(features_list))] - - #if count == 0: - # #print "\n" + "-" * 80 - # #print "features_list" - # #for elem in features_list : print elem - # #print "\n" + "-" * 80 - # count = 1 - - return features_list + for features in rows: + for feature in features: + if feature not in self.concept_vocab: + self.concept_vocab[feature] = len(self.concept_vocab) + 1 + # Purpose: Encode something like ('chunk', 'rehabilitation') as a unique + # number, as determined by the self.concept_vocab hash table + tmp_rows = [] + for fdict in rows: + #print fdict + tmp = {self.concept_vocab[key]:fdict[key] for key in fdict} + tmp_rows.append(tmp) + rows = tmp_rows - # input: a single word, like - # Admission - # output: A hash table of features - # features include: word, length, mitre, stem_porter - def features_for_word(self, word): + # Predict using model + second_pass_model = self.filename + '3' + libml.write_features(second_pass_model, [rows], None, self.type); + libml.predict(second_pass_model, self.type) + second_pass_labels_list = libml.read_labels(second_pass_model, self.type) - #global count2 - features = {'dummy':1} # always have >0 dimensions + # FIXME - I probably shouldn't have to do this + # I don't know why it doesn't use all ML libs + for t in [1,2,4]: + if t not in second_pass_labels_list: + second_pass_labels_list[t] = [] - # word_shape, word, length, mitre, stem_porter, stem_lancaster - for feature in Model.word_features: - # word_shape, test_result, word, pos, next, length, stem_wordnet, mitre, stem_porter, prev, stem_lancaster - if feature not in self.enabled_features: + # translate labels_list into a readable format + # ex. change all occurences of 0 -> 'none' + for t, labels in second_pass_labels_list.items(): + + if labels == []: + # FIXME - this means that there are ML libs not being used + #print '\nNot predicting on: ', t, '\n' continue - if feature == "word": - features[(feature, word)] = 1 - - if feature == "length": - features[(feature, None)] = len(word) - - if feature == "mitre": - for f in Model.mitre_features: - if re.search(Model.mitre_features[f], word): - features[(feature, f)] = 1 - - if feature == "stem_porter": - st = nltk.stem.PorterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_lancaster": - st = nltk.stem.LancasterStemmer() - features[(feature, st.stem(word))] = 1 - - if feature == "stem_snowball": - st = nltk.stem.SnowballStemmer("english") - #features[(feature, st.stem(word))] = 1 - - if feature == "word_shape": - wordShapes = getWordShapes(word) - - #if count2 < 3: - # #print "word: ", word - # #print "wordShapes: ", wordShapes - # #print "\n" + "-" * 80 - # print '' - - for i, shape in enumerate(wordShapes): - features[(feature + str(i), shape)] = 1 - - if feature == "metric_unit": - unit = 0 - if self.is_weight(word): - unit = 1 - elif self.is_size(word): - unit = 2 - features[(feature, None)] = unit - - # look for prognosis locaiton - #if feature == "radial_loc": - # THIS MIGHT BE BUGGED - # if self.is_prognosis_location(word): - # features[(feature, None)] = 1 - - if feature == "has_problem_form": - if self.has_problem_form(word): - features[(feature, None)] = 1 - - if feature == "def_class": - features[(feature, None)] = self.get_def_class(word) - - #if count2 < 3: - # count2 += 1 - - return features - - - - mitre_features = { - "INITCAP" : r"^[A-Z].*$", - "ALLCAPS" : r"^[A-Z]+$", - "CAPSMIX" : r"^[A-Za-z]+$", - "HASDIGIT" : r"^.*[0-9].*$", - "SINGLEDIGIT" : r"^[0-9]$", - "DOUBLEDIGIT" : r"^[0-9][0-9]$", - "FOURDIGITS" : r"^[0-9][0-9][0-9][0-9]$", - "NATURALNUM" : r"^[0-9]+$", - "REALNUM" : r"^[0-9]+.[0-9]+$", - "ALPHANUM" : r"^[0-9A-Za-z]+$", - "HASDASH" : r"^.*-.*$", - "PUNCTUATION" : r"^[^A-Za-z0-9]+$", - "PHONE1" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "PHONE2" : r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", - "FIVEDIGIT" : r"^[0-9][0-9][0-9][0-9][0-9]", - "NOVOWELS" : r"^[^AaEeIiOoUu]+$", - "HASDASHNUMALPHA" : r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", - "DATESEPERATOR" : r"^[-/]$", - } - + tmp = [] + for sentence in [text_matches]: + tmp.append([labels.pop(0) for i in range(len(sentence))]) + tmp[-1] = map(lambda l: l.strip(), tmp[-1]) + tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) + second_pass_labels_list[t] = tmp - def is_test_result (self, context): - # note: make spaces optional? - regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" - if not re.search(regex, context): - return re.search(r"^[A-Za-z]+ was (positive|negative)", context) - return True + # Put predictions into format for Note class to read + retVal = {} + for t in [1,2,4]: - def is_weight (self, word): - regex = r"^[0-9]*(mg|g|milligrams|grams)$" - return re.search(regex, word) - - def is_size (self, word): - regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" - return re.search(regex, word) + # Skip non-predictions + if second_pass_labels_list[t] == []: continue + + classifications = [] + for hit,concept in zip(hits, second_pass_labels_list[t][0]): + i,j = hit + length = len(text_chunks[i][j].split()) + #print (concept, i, j, j+length-1 ) + classifications.append( (concept, i+1, j, j+length-1 ) ) + + retVal[t] = classifications + + + # Return values + return retVal - def is_prognosis_location (self, word): - regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" - return re.search(regex, word) - - def has_problem_form (self, word): - regex = r".*(ic|is)$" - return re.search(regex, word) - - # checks for a definitive classification at the word level - def get_def_class (self, word): - test_terms = { - "eval", "evaluation", "evaluations", - "sat", "sats", "saturation", - "exam", "exams", - "rate", "rates", - "test", "tests", - "xray", "xrays", - "screen", "screens", - "level", "levels", - "tox" - } - problem_terms = { - "swelling", - "wound", "wounds", - "symptom", "symptoms", - "shifts", "failure", - "insufficiency", "insufficiencies", - "mass", "masses", - "aneurysm", "aneurysms", - "ulcer", "ulcers", - "trama", "cancer", - "disease", "diseased", - "bacterial", "viral", - "syndrome", "syndromes", - "pain", "pains" - "burns", "burned", - "broken", "fractured" - } - treatment_terms = { - "therapy", - "replacement", - "anesthesia", - "supplement", "supplemental", - "vaccine", "vaccines" - "dose", "doses", - "shot", "shots", - "medication", "medicine", - "treament", "treatments" - } - if word.lower() in test_terms: - return 1 - elif word.lower() in problem_terms: - return 2 - elif word.lower() in treatment_terms: - return 3 - return 0 - From d74051ae5349f35bd3576cad94445a668f128bf9 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 16:08:25 -0500 Subject: [PATCH 019/393] Added case for an empty sentence. --- code/clicon_features.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/code/clicon_features.py b/code/clicon_features.py index 4d624a3..691ca00 100644 --- a/code/clicon_features.py +++ b/code/clicon_features.py @@ -156,6 +156,9 @@ def IOB_nonprose_features_for_sentence(self, sentence): #genia_feats = self.next_GENIA_line() #if not genia_feats: genia_feats = self.next_GENIA_line() + # If sentence is empty + if not sentence: return {} + # List of dictionaries of features line_features = [ {('dummy',1):1} for _ in sentence ] @@ -166,7 +169,6 @@ def IOB_nonprose_features_for_sentence(self, sentence): # Feature: QANN uncased word # Feature: Uncased previous word - line_features[0].update( { ( 'prev_word','') : 1} ) line_features[0].update( { ('uncased_prev_word','') : 1} ) for i in range(1,len(sentence)): line_features[i].update( { ('uncased_prev_word',sentence[i-1].lower()) : 1} ) @@ -193,6 +195,10 @@ def IOB_nonprose_features_for_sentence(self, sentence): # output: Boolean yes/no def prose_sentence(self, sentence): + # Empty sentence is not prose + if not sentence: + return False + if sentence[-1] == '.' or sentence[-1] == '?': return True elif sentence[-1] == ':': From 57833d2419944cd920740fbf46bc164eb846acdd Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 16:11:38 -0500 Subject: [PATCH 020/393] Changed some default arguments around --- code/predict.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/code/predict.py b/code/predict.py index 66767ec..a684209 100644 --- a/code/predict.py +++ b/code/predict.py @@ -15,12 +15,14 @@ def main(): dest = "input", help = "The input files to predict", default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '/home/wboag/ConceptExtraction-master/data/test_data/*') ) parser.add_argument("-o", dest = "output", help = "The directory to write the output", default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') + #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '/home/wboag/ConceptExtraction-master/data/test_predictions') ) parser.add_argument("-m", @@ -72,9 +74,10 @@ def main(): note = Note() note.read_i2b2(txt) + # Use the model to predict the concept labels # Returns a hash table with: - # keys as 1,2,4 (SVM, LIN, CRF) + # keys as 1,2,4 # values as list of list of concept tokens (one-to-one with dat_list) labels = model.predict(note) @@ -92,11 +95,14 @@ def main(): if t == libml.CRF: helper.mkpath(os.path.join(args.output, "crf")) con_path = os.path.join(path, "crf", con) - + # Output the concept predictions note.write_i2b2(con_path, labels[t]) #note.write_plain(con_path, labels[t]) # in case of plain format + #note.write_BIOs_labels(con_path, labels[t]) + + if __name__ == '__main__': main() From 43919019f4351af8087033161ed69f00ec760222 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 16:14:45 -0500 Subject: [PATCH 021/393] default argument configuration --- code/train.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/code/train.py b/code/train.py index 42697b7..6d67dc6 100644 --- a/code/train.py +++ b/code/train.py @@ -16,22 +16,20 @@ def main(): parser.add_argument("-t", dest = "txt", help = "The files that contain the training examples", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/txt/record-33.txt') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/txt/*') ) parser.add_argument("-c", dest = "con", help = "The files that contain the labels for the training examples", - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/beth/concept/record-33.txt') + default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/concept_assertion_relation_training_data/merged/concept/*') ) parser.add_argument("-m", dest = "model", help = "Path to the model that should be generated", #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/awesome.model') - default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/run_models/run.model') + default = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../models/run_models/run.model') ) parser.add_argument("-d", @@ -93,7 +91,6 @@ def main(): #training_list.append(txt_files_map[k]) - # What kind of model should be used? (ex. SVM vs. CRF) type = 0 if not args.no_svm: @@ -129,7 +126,7 @@ def main(): # Train the model using the Note's data - model.train(notes[0]) + model.train(notes) if __name__ == '__main__': From 4920886e6c8efbbe16f8f51d19547f2147f536ad Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 18 Feb 2014 16:38:11 -0500 Subject: [PATCH 022/393] trying to put on github --- code/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/code/model.py b/code/model.py index 10c7d3b..f13a1d4 100644 --- a/code/model.py +++ b/code/model.py @@ -52,9 +52,9 @@ def __init__(self, filename='awesome.model', type=libml.ALL): - # Model::train() - # - # @param note. A Note object that has data for training the model + # Model::train() + # + # @param note. A Note object that has data for training the model def train(self, note): # Get the data and annotations from the Note object From 1a1ce86dafd7df36bb79fe970c75ef08f1aa94d9 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Sat, 1 Mar 2014 14:14:26 -0500 Subject: [PATCH 023/393] Adding UMLS features --- code/SQLookup.py | 38 +++++++++++ code/create_sqliteDB.py | 74 ++++++++++++++++++++ code/umls.py | 146 ++++++++++++++++++++++++++++++++++++++++ code/umls_cache.py | 20 ++++++ 4 files changed, 278 insertions(+) create mode 100644 code/SQLookup.py create mode 100644 code/create_sqliteDB.py create mode 100644 code/umls.py create mode 100644 code/umls_cache.py diff --git a/code/SQLookup.py b/code/SQLookup.py new file mode 100644 index 0000000..fefa13e --- /dev/null +++ b/code/SQLookup.py @@ -0,0 +1,38 @@ +import copy +import sqlite3 +import create_sqliteDB +import os + +WINDOW_SIZE = 7 + +#connect to UMLS database +def SQLConnect(): + + #try to connect to the sqlite database. Make one otherwise + if( os.path.isfile( "../umls_tables/umls.db" ) ): + print "\ndb exists" + db = sqlite3.connect( "../umls_tables/umls.db" ) + else: + print "\ndb doesn't exist" + create_sqliteDB.create_db() + db = sqlite3.connect( "../umls_tables/umls.db" ) + db.text_factory = str + return db.cursor() + +#used in SQlookup, I made this global so SQLConnect is only called once. +c = SQLConnect() + +#searchs umls database for the semantic type of a string +def SQlookup( c , string ): + + #queries database and finds first semantic type match, returns a 1 when a match is found. + c.execute( "SELECT sty FROM MRCON a, MRSTY b WHERE a.cui = b.cui AND str = ? LIMIT 1;" , (string,) ) + + #returns a tuple with the match or None if there was no match. + return c.fetchone() + +#returns the semantic type of a word +def string_lookup( string ): + + return SQlookup( c , string ) + diff --git a/code/create_sqliteDB.py b/code/create_sqliteDB.py new file mode 100644 index 0000000..71d3ef9 --- /dev/null +++ b/code/create_sqliteDB.py @@ -0,0 +1,74 @@ +#database.py creates a .db file for performing umls searches. +import sqlite3 +import sys + +def create_db(): + + print "\ncreating umls.db" + #connect to the .db file we are creating. + conn = sqlite3.connect( "../umls_tables/umls.db" ) + + conn.text_factory = str + + #load data in files. + try: + MRSTY_TABLE = open( ( "../umls_tables/MRSTY"), "r" ) + except IOError: + print "\nNo file to use for creating MRSTY table\n" + conn.close() + sys.exit() + + try: + MRCON_TABLE = open( ("../umls_tables/MRCON") , "r" ) + except IOError: + print "\nNo file to use for creating MRCON table\n" + conn.close() + sys.exit() + + MRSTY_TABLE = MRSTY_TABLE.read() ; + MRSTY_TABLE = MRSTY_TABLE.split('\n') + + MRCON_TABLE = MRCON_TABLE.read() ; + MRCON_TABLE = MRCON_TABLE.split( '\n' ) + + #data that will be inserted into tables. + MRTSY_DATA = [] + MRCON_DATA = [] + + c = conn.cursor() + + #parse and store the data from the files. + for line in MRSTY_TABLE: + MRTSY_DATA.append( tuple(line.split('|')) ) + for line in MRCON_TABLE: + MRCON_DATA.append( tuple(line.split('|')) ) + + #create tables. + c.execute( "CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, STR, LRL, EMPTY ) ;" ) + c.execute( "CREATE TABLE MRSTY( CUI, TUI, STY, EMPTY ) ;" ) + + #insert data onto database + for line in MRCON_DATA: + try: + c.execute( "INSERT INTO MRCON( CUI, LAT, TS, LUI, STT, SUI, STR, LRL, EMPTY ) values ( ?, ?, ? ,?, ?,?,?,?,?);", line ) + except sqlite3.ProgrammingError: + continue + for line in MRTSY_DATA: + try: + c.execute( "INSERT INTO MRSTY( CUI, TUI, STY, EMPTY) values( ?, ?, ?, ?)" , line ) + except sqlite3.ProgrammingError: + continue + + #create indices for faster queries + c.execute( "CREATE INDEX mrsty_cui_map ON MRSTY(CUI)") + c.execute( "CREATE INDEX mrcon_str_map ON MRCON(STR)") + c.execute( "CREATE INDEX mrcon_cui_map ON MRCON(CUI)") + + #save changes to .db + conn.commit() + + print "\nsqlite database created" + + #close connection + conn.close() + diff --git a/code/umls.py b/code/umls.py new file mode 100644 index 0000000..0aa842c --- /dev/null +++ b/code/umls.py @@ -0,0 +1,146 @@ +import cPickle as pickle +import SQLookup + +def umls_semantic_type_word( umls_string_cache , sentence ): + + #If the umls semantic type is in the cache use that semantic type otherwise lookup the semantic type and add to the cache. + if umls_string_cache.has_key( sentence ): + mapping = umls_string_cache.get_map( sentence ) + else: + concept = SQLookup.string_lookup( sentence ) + if concept != None: + umls_string_cache.add_map( sentence , concept[0] ) + else: + umls_string_cache.add_map( sentence , None ) + mapping = umls_string_cache.get_map(sentence) + + return mapping + +def umls_semantic_context_of_words( umls_string_cache, sentence ): + + #Defines the largest string span for the sentence. + WINDOW_SIZE = 7 + + #Holds the span of the umls concept of the largest substring that each word in sentence is found in. + umls_context_list = [] + + #Holds the span of the concept for each substring. + #A tuple containing tne beginning and end index of a substring functions as the key and the key is assigned to a umls definition. + concept_span_dict = {} + + #Initialize the umls_context_list with empty lists. Each sublist functions as the mappings for each word. + for i in sentence: + umls_context_list.append( [] ) + + #Creates and finds the span of a umls concept for each possible substring of length 1 to currentWindowSize. + for currentWindowSize in range( 1 , WINDOW_SIZE ): + for ti in range( 0 , ( len(sentence) - currentWindowSize ) + 1 ): + rawstring = "" + for tj in range( ti , ti + currentWindowSize): + rawstring += ( sentence[tj] + " " ) + + #Each string is of length 1 to currentWindowSize. + rawstring = rawstring[0:-1] + + #If the string is not in cache, look the umls concept up and add to the cache. + if not( umls_string_cache.has_key( rawstring ) ): + #SQLookup returns a tuple if there is a result or None is there is not. + concept = SQLookup.string_lookup( rawstring ) + + if concept != None: + umls_string_cache.add_map( rawstring , concept[0] ) + else: + umls_string_cache.add_map( rawstring , None ) + + #Store the concept into concept_span_dict with its span as a key. + concept_span_dict[(ti,ti+currentWindowSize-1)] = umls_string_cache.get_map( rawstring ) + + #For each substring of the sentence if there is a definition obtained from + #SQLookup assign the concept to every word that is within in the substring. + #If the currrent span is a substring update otherwise if it is not a substring add the new found context. + if umls_string_cache.get_map(rawstring) != None: + + for i in range( ti , ti + currentWindowSize ): + + if len( umls_context_list[i] ) == 0: + umls_context_list[i].append( [ ti , ti + currentWindowSize - 1 ] ) + + else: + updated = 0 + for j in umls_context_list[i]: + if j[0] >= ti and j[1] <= ( ti + currentWindowSize - 1 ): + j[0] = ti + j[1] = ( ti + currentWindowSize - 1 ) + updated += 1 + if not(updated): + if umls_context_list[i].count( [ti,ti+currentWindowSize -1] ) == 0 : + umls_context_list[i].append( [ ti , ti +currentWindowSize - 1 ] ) + + #create a list of sublists each sublist represents the contexts for which the word appears in the sentence + mappings = [] + for i in umls_context_list: + spans = i + if len(spans) == 0: + mappings.append( None ) + else: + sub_mappings = [] + for j in spans: + sub_mappings.append( concept_span_dict[tuple(j)]) + mappings.append( list(set(sub_mappings)) ) + + return mappings + +def umls_semantic_type_sentence( cache , sentence ): + #Defines the largest string span for the sentence. + WINDOW_SIZE = 7 + + #holds the mappings for every substring of size 1 to WINDOW_SIZE + mappings = {} + + #Creates and finds the span of a umls concept for each possible substring of length 1 to currentWindowSize. + for currentWindowSize in range( 1 , WINDOW_SIZE ): + for ti in range( 0 , ( len(sentence) - currentWindowSize ) + 1 ): + + rawstring = "" + + for tj in range( ti , ti + currentWindowSize): + rawstring += ( sentence[tj] + " " ) + + #Each string is of length 1 to currentWindowSize. + rawstring = rawstring[0:-1] + + #If the umls semantic type is already in the cache then us the one stored otherwise lookup and add to cache + if cache.has_key( rawstring ): + mappings[rawstring] = cache.get_map( rawstring ) + + else: + + concept = string_lookup( rawstring ) + + if concept != None: + cache.add_map( rawstring , concept[0] ) + else: + cache.add_map( rawstring , None ) + + mappings[rawstring] = cache.get_map( rawstring ) + + size_s = 0 + + phrase = [] + + #get longest sub string with a mapping + for mapping in mappings.iteritems(): + + if( mapping[1] != None ): + if( len( mapping[0] ) > size_s ): + + phrase = [] + phrase.append( mapping[1] ) + size_s = len( mapping[0] ) + continue + if( len(mapping[0]) == size_s ): + phrase.append( mapping[1] ) + + return phrase + + diff --git a/code/umls_cache.py b/code/umls_cache.py new file mode 100644 index 0000000..62fe6f6 --- /dev/null +++ b/code/umls_cache.py @@ -0,0 +1,20 @@ +import cPickle as pickle + +class UmlsCache: + def __init__(self): + try: + self.cache = pickle.load( open( "umls_cache" , "rb" ) ) ; + except IOError: + self.cache = {} + + def has_key( self , string ): + return self.cache.has_key( string ) + + def add_map( self , string, mapping ): + self.cache[string] = mapping + + def get_map( self , string ): + return self.cache[string] + + def __del__(self): + pickle.dump( self.cache, open("umls_cache", "wb" ) ) From 0b10551dca878ef97053488a24808e0fd91f9c5b Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Sat, 1 Mar 2014 14:19:03 -0500 Subject: [PATCH 024/393] Incorporate all features for both 1st and second pass training --- code/clicon_features.py | 538 +++++++++++++++++++++++++++++++--------- 1 file changed, 427 insertions(+), 111 deletions(-) diff --git a/code/clicon_features.py b/code/clicon_features.py index 691ca00..0ba12ed 100644 --- a/code/clicon_features.py +++ b/code/clicon_features.py @@ -14,18 +14,41 @@ import nltk import re +from sets import ImmutableSet +from wordshape import getWordShapes import clicon_genia_interface +from umls_cache import UmlsCache +import umls + + + class FeatureWrapper: + # Feature Enabling + enabled_IOB_prose_sentence_features = ImmutableSet( ['prev_POS', 'pos', 'GENIA'] ) + enabled_IOB_prose_word_features = ImmutableSet( ['Generic#', 'last_two_letters', 'prev_word', 'uncased_prev_word' ] ) + + enabled_IOB_nonprose_sentence_features = ImmutableSet( ['prev_POS'] ) + enabled_IOB_nonprose_word_features = ImmutableSet( ['word', 'uncased_prev_word'] ) + + enabled_concept_features = ImmutableSet( ['chunk', 'unigram', 'first-four-letters', 'stem_wordnet', 'test_result', 'umls_semantic_type_word'] ) + enabled_concept_word_features = ImmutableSet(['word', 'length', 'mitre', 'stem_porter', 'stem_lancaster', 'word_shape']) + + # Run the GENIA tagger on the given data def __init__(self, data): - #self.GENIA_features = clicon_genia_interface.genia(data) - self.GENIA_counter = 0 + + # Only run GENIA tagger if feature is enabled + if 'GENIA' in self.enabled_IOB_prose_sentence_features: + self.GENIA_features = clicon_genia_interface.genia(data) + self.GENIA_counter = 0 + # cache for the mappings of all umls lookups made + self.umls_lookup_cache = UmlsCache() # Iterate through GENIA Tagger features @@ -54,94 +77,160 @@ def IOB_features_for_sentence(self, sentence): # Different features depending on whether sentence is 'prose' if isProse: - line_features = self.IOB_prose_features_for_sentence(sentence) + features_list = self.IOB_prose_features_for_sentence(sentence) else: - line_features = self.IOB_nonprose_features_for_sentence(sentence) + features_list = self.IOB_nonprose_features_for_sentence(sentence) # Return features as well as indication of whether it is prose or not - return (isProse, line_features) + return (isProse, features_list) # IOB_prose_features_for_sentence() # # input: A sentence - # output: A hash table of features + # output: A list of hash tables of features def IOB_prose_features_for_sentence(self, sentence): + features_list = [] - # List of dictionaries of features - line_features = [ {('dummy',1):1} for _ in sentence ] - - # Feature: Generic# stemmed word + # Get a feature set for each word in the sentence for i,word in enumerate(sentence): - generic = re.sub('[0-9]','0',word) - line_features[i].update( { ('Generic#',generic) : 1}) + features_list.append( self.IOB_prose_features_for_word(sentence,i) ) - # Feature: Previous word - line_features[0].update( { ( 'prev_word', '' ) : 1} ) - for i in range(1,len(sentence)): - line_features[i].update( { ('uncased_prev_word',sentence[i-1].lower()) : 1} ) - # Feature Uncased previous word - line_features[0].update( { ('uncased_prev_word', '' ) : 1} ) - for i in range(1,len(sentence)): - line_features[i].update( { ( 'prev_word',sentence[i-1] ) : 1} ) + # Allow for particular features to be enabled + for feature in self.enabled_IOB_prose_sentence_features: - # Feature: Last two leters of word - for word in sentence: - line_features[i].update( { ('last_two_letters',word[-2:]) : 1} ) + # Feature: Previous POS + if feature == 'prev_POS': + pos_tagged = nltk.pos_tag(sentence) + features_list[0].update( {('prev_POS','') : 1} ) + for i in range(1,len(sentence)): + features_list[i].update( {('prev_POS',pos_tagged[i-1]) : 1} ) - # Feature: Previous POS - pos_tagged = nltk.pos_tag(sentence) - line_features[0].update( { ('prev_POS','') : 1} ) - for i in range(1,len(sentence)): - line_features[i].update( { ('prev_POS',pos_tagged[i-1]) : 1} ) - # Feature: 1-token part-of-speech context - for (i,(_,pos)) in enumerate(pos_tagged): - line_features[i].update( { ('pos',pos) : 1} ) + # Feature: 1-token part-of-speech context + if feature == 'pos': + for (i,(_,pos)) in enumerate(pos_tagged): + features_list[i].update( { ('pos',pos) : 1} ) - # Feature: UMLS concept hypernyms + # Feature: UMLS semantic type for the sentence + if feature == 'umls_semantic_type_sentence': - # GENIA features - for i in range(len(sentence)): + # a list of the uml semantic of the largest substring(s). + sentence_mapping = umls.umls_semantic_type_sentence( self.umls_lookup_cache, sentence ) - # FIXME - Do not call GENIA features right now - # (to speed up runtime during development) - continue + # If there are no mappings. + if( len(sentence_mapping) == 0 ): + for i , features in enumerate( features_list): + features[(feature , None ) ] = 1 + # assign the umls definitions to the vector for each word in the sentence. + else: + for i , features in enumerate( features_list): + for concept in sentence_mapping: + features[(feature , concept ) ] = 1 - # Get the GENIA features of the current sentence - genia_feats = self.next_GENIA_line() - if not genia_feats: genia_feats = self.next_GENIA_line() + # Feature: UMLS semantic concext + if feature == 'umls_semantic_context': + + # a list of lists, each sub list contains the umls definition of the largest string the word is in + umls_semantic_context_mappings = umls.umls_semantic_context_of_words( self.umls_lookup_cache , sentence ) + + # assign the umls definitions in the sublists to the vector of the corresponding word + for i , features in enumerate( features_list ): + + #if there are no mappings + if umls_semantic_context_mappings[i] == None: + features[(feature,None)] = 1 + #if there is a mapping, there could be multiple contexts, iterate through the sublist + else: + for mapping in umls_semantic_context_mappings[i]: + features[(feature,mapping)] = 1 + + # GENIA features + if feature == 'GENIA': + # Get the GENIA features of the current sentence + genia_feats = self.next_GENIA_line() + if not genia_feats: genia_feats = self.next_GENIA_line() - # Feature: Current word's GENIA features - keys = ['GENIA-stem','GENIA-POS','GENIA-chunktag'] - curr = genia_feats[i] - output = dict( (('curr-'+k, curr[k]), 1) for k in keys if k in curr) + for i in range(len(sentence)): - # Feature: Previous word's GENIA features - if i: - prev = genia_feats[i] - output = dict( (('prev-'+k, prev[k]), 1) for k in keys if k in prev) - else: - output = dict( (('prev-'+k, ''), 1) for k in keys if k in curr) + # Feature: Current word's GENIA features + keys = ['GENIA-stem','GENIA-POS','GENIA-chunktag'] + curr = genia_feats[i] + output = dict( (('curr-'+k, curr[k]), 1) for k in keys if k in curr) - # Feature: Next word's GENIA stem - # Note: This is done retroactively, updating the previous token - if i > 0: - line_features[i-1].update( {('next-GENIA-stem',curr['GENIA-stem']) : 1} ) - # Do not accidentally skip the final token - if i == (len(sentence) - 1): - line_features[i].update( { ('next-GENIA-stem','') : 1} ) + # Feature: Previous word's GENIA features + if i == 0: + output = dict( (('prev-'+k, ''), 1) for k in keys if k in curr) + else: + prev = genia_feats[i] + output = dict( (('prev-'+k, prev[k]), 1) for k in keys if k in prev) - line_features[i].update(output) + # Feature: Next word's GENIA stem + # Note: This is done by updating the previous token's dict + if i != (len(sentence) - 1): + features_list[i-1].update( {('next-GENIA-stem',curr['GENIA-stem']) : 1} ) + else: + features_list[i].update( { ('next-GENIA-stem','') : 1} ) - # MetaMap semantic type + features_list[i].update(output) - return line_features + return features_list + + + + # IOB_prose_features_for_word() + # + # input: A single word + # output: A dictionary of features + def IOB_prose_features_for_word(self, sentence, i): + + # Abbreviation for most features, + # although some will require index for context + word = sentence[i] + + + # Feature: + features = {'dummy': 1} # always have >0 dimensions + + + # Allow for particular features to be enabled + for feature in self.enabled_IOB_prose_word_features: + + + # Feature: Generic# stemmed word + if feature == 'Generic#': + generic = re.sub('[0-9]','0',word) + features.update( { ('Generic#',generic) : 1 } ) + + + # Feature: Last two leters of word + if feature == 'last_two_letters': + features.update( { ('last_two_letters',word[-2:]) : 1 } ) + + + # Feature: Previous word + if feature == 'prev_word': + if i == 0: + features.update( {('prev_word', '' ) : 1} ) + else: + features.update( {('prev_word', sentence[i-1]) : 1} ) + + + # Feature Uncased previous word + if feature == 'uncased_prev_word': + if i == 0: + features.update( {('uncased_prev_word', '' ) : 1} ) + else: + features.update( {('uncased_prev_word',sentence[i-1].lower()) : 1} ) + + + return features + @@ -152,40 +241,68 @@ def IOB_prose_features_for_sentence(self, sentence): def IOB_nonprose_features_for_sentence(self, sentence): # Get the GENIA features of the current sentence - # The GENIA featurs are not used for nonprose, but it keeps things aligned for the prose - #genia_feats = self.next_GENIA_line() - #if not genia_feats: genia_feats = self.next_GENIA_line() + # (not used for nonprose, but it keeps things aligned for the prose) + if 'GENIA' in self.enabled_IOB_prose_sentence_features: + genia_feats = self.next_GENIA_line() + if not genia_feats: genia_feats = self.next_GENIA_line() # If sentence is empty if not sentence: return {} - # List of dictionaries of features - line_features = [ {('dummy',1):1} for _ in sentence ] + features_list = [] - # Feature: The word, itself + # Get a feature set for each word in the sentence for i,word in enumerate(sentence): - line_features[i].update( { ('word',word.lower()) : 1} ) + features_list.append( self.IOB_prose_features_for_word(sentence,i) ) + + + # Allow for particular features to be enabled + for feature in self.enabled_IOB_nonprose_sentence_features: - # Feature: QANN uncased word + # Feature: Previous POS + if feature == 'prev_POS': + pos_tagged = nltk.pos_tag(sentence) + features_list[0].update( {('prev_POS','') : 1} ) + for i in range(1,len(sentence)): + features_list[i].update( {('prev_POS',pos_tagged[i-1]) : 1} ) - # Feature: Uncased previous word - line_features[0].update( { ('uncased_prev_word','') : 1} ) - for i in range(1,len(sentence)): - line_features[i].update( { ('uncased_prev_word',sentence[i-1].lower()) : 1} ) - # 3-token part-of-speech context + return features_list - # MetaMap semantic type - # MetaMap CUI - # Feature: Previous POS - pos_tagged = nltk.pos_tag(sentence) - line_features[0].update( { ('prev_POS','') : 1} ) - for i in range(1,len(sentence)): - line_features[i].update( { ('prev_POS',pos_tagged[i-1]) : 1} ) + # IOB_nonprose_features_for_word() + # + # input: A single word + # output: A dictionary of features + def IOB_nonprose_features_for_word(self, sentence, i): + + # Abbreviation for most features, + # although some will require index for context + word = sentence[i] + + + # Feature: + features = {'dummy': 1} # always have >0 dimensions + + + # Allow for particular features to be enabled + for feature in self.enabled_IOB_nonprose_word_features: + + # Feature: The word, itself + if feature == 'word': + features.update( { ('word',word.lower()) : 1} ) + + # Feature: Uncased previous word + if feature == 'uncased_prev_word': + if i == 0: + features.update( {('uncased_prev_word','' ) : 1} ) + else: + features.update( {('uncased_prev_word',sentence[i-1].lower()) : 1} ) + + + return features - return line_features @@ -229,7 +346,7 @@ def at_least_half_nonprose(self, sentence): return False - # prose::word() + # prose_word() # # input: A word # output: Boolean yes/no @@ -344,35 +461,234 @@ def generate_chunks(self, data, IOB_tags, labels=None): # output: A list of hash tables (one hash table per word) def concept_features(self, sentence, ind): - retVal = {} - - retVal.update( { ('chunk',sentence[ind]) : 1 } ) - - # Feature: Uncased unigrams - for i,word in enumerate( sentence[ind].split() ): - featname = 'unigram-%d' % i - retVal.update( { (featname, word.lower()) : 1 } ) - - # Feature: First four letters of each word - prefix_list = [ word[0:4] for word in sentence[ind].split() ] - for i,word in enumerate(prefix_list): - featname = 'first-four-letters-%d' % i - retVal.update( { (featname, word) : 1 } ) - - # Feature: Stemmed previous word - - - # Feature: Uncased previous bigram - - - # Feature: Argument type + nearest predicate - - - # Feature: UMLS concept type - - - # Feature Wikipedia concept type + features = {} + + # Get features for each unigram + for i in range(len(sentence[ind])): + features.update( self.concept_features_for_word( sentence[ind], i ) ) + + + # Allow for particular features to be enabled + for feature in self.enabled_IOB_prose_sentence_features: + + # Feature: the chunk itself + if feature == 'chunk': + features.update( { ('chunk',sentence[ind]) : 1 } ) + + # Feature: Uncased unigrams + if feature == 'unigram': + for i,word in enumerate( sentence[ind].split() ): + featname = 'unigram-%d' % i + features.update( { (featname, word.lower()) : 1 } ) + + # Feature: First four letters of each word + if feature == 'first-four-letters': + prefix_list = [ word[:4] for word in sentence[ind].split() ] + for i,word in enumerate(prefix_list): + featname = 'first-four-letters-%d' % i + features.update( { (featname, word) : 1 } ) + + # Feature: UMLS Semantic Type (ignores context) + if feature == 'umls_semantic_type_word': + + # Get a semantic type for each unigram + for i,word in enmuerate(sentence[ind]): + mapping = umls.umls_semantic_type_word(self.umls_lookup_cache , word ) + #If there is no umls semantic type. + featname = 'umls_semantic_type_word"%d' % i + if( mapping == None ): + features[(feature,None)] = 1 + else: + features[(feature , mapping )] = 1 + + # Feature: Stemmed Word + if feature == "stem_wordnet": + tags = tags or nltk.pos_tag(sentence) + morphy_tags = { + 'NN':nltk.corpus.reader.wordnet.NOUN, + 'JJ':nltk.corpus.reader.wordnet.ADJ, + 'VB':nltk.corpus.reader.wordnet.VERB, + 'RB':nltk.corpus.reader.wordnet.ADV} + morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w,t in tags] + st = nltk.stem.WordNetLemmatizer() + for i, features in enumerate(features_list): + tag = morphy_tags[i] + features[(feature, st.lemmatize(*tag))] = 1 + + # Feature: Test Result + if feature == "test_result": + for index, features in enumerate(features_list): + right = " ".join([w for w in sentence[index:]]) + if self.is_test_result(right): + features[(feature, None)] = 1 + + + return features + + + + # concept_features_for_word() + # + # input: A single word + # output: A dictionary of features + def concept_features_for_word(self, chunk, i): + + # Abbreviation for most features, + # although some will require index for context + word = chunk[i] + + + # Feature: + features = {'dummy': 1} # always have >0 dimensions + + + # word_shape, word, length, mitre, stem_porter, stem_lancaster + for feature in self.enabled_concept_word_features: + + if feature == "word": + features[(feature, word)] = 1 + + if feature == "length": + features[(feature, None)] = len(word) + + if feature == "mitre": + for f in self.mitre_features: + if re.search(self.mitre_features[f], word): + features[(feature, f)] = 1 + + if feature == "stem_porter": + st = nltk.stem.PorterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_lancaster": + st = nltk.stem.LancasterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_snowball": + st = nltk.stem.SnowballStemmer("english") + features[(feature, st.stem(word))] = 1 + + if feature == "word_shape": + wordShapes = getWordShapes(word) + for i, shape in enumerate(wordShapes): + features[(feature + str(i), shape)] = 1 + + if feature == "metric_unit": + unit = 0 + if self.is_weight(word): + unit = 1 + elif self.is_size(word): + unit = 2 + features[(feature, None)] = unit + + # look for prognosis locaiton + #if feature == "radial_loc": + # THIS MIGHT BE BUGGED + # if is_prognosis_location(word): + # features[(feature, None)] = 1 + + if feature == "has_problem_form": + if self.has_problem_form(word): + features[(feature, None)] = 1 + + if feature == "def_class": + features[(feature, None)] = self.get_def_class(word) + + return features + + + + mitre_features = { + "INITCAP": r"^[A-Z].*$", + "ALLCAPS": r"^[A-Z]+$", + "CAPSMIX": r"^[A-Za-z]+$", + "HASDIGIT": r"^.*[0-9].*$", + "SINGLEDIGIT": r"^[0-9]$", + "DOUBLEDIGIT": r"^[0-9][0-9]$", + "FOURDIGITS": r"^[0-9][0-9][0-9][0-9]$", + "NATURALNUM": r"^[0-9]+$", + "REALNUM": r"^[0-9]+.[0-9]+$", + "ALPHANUM": r"^[0-9A-Za-z]+$", + "HASDASH": r"^.*-.*$", + "PUNCTUATION": r"^[^A-Za-z0-9]+$", + "PHONE1": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "PHONE2": r"^[0-9][0-9][0-9]-[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]$", + "FIVEDIGIT": r"^[0-9][0-9][0-9][0-9][0-9]", + "NOVOWELS": r"^[^AaEeIiOoUu]+$", + "HASDASHNUMALPHA": r"^.*[A-z].*-.*[0-9].*$ | *.[0-9].*-.*[0-9].*$", + "DATESEPERATOR": r"^[-/]$", + } + + def is_test_result( context): + # note: make spaces optional? + regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" + if not re.search(regex, context): + return re.search(r"^[A-Za-z]+ was (positive|negative)", context) + return True - return retVal + def is_weight( word): + regex = r"^[0-9]*(mg|g|milligrams|grams)$" + return re.search(regex, word) + + def is_size( word): + regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" + return re.search(regex, word) + + def is_prognosis_location( word): + regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" + return re.search(regex, word) + + def has_problem_form( word): + regex = r".*(ic|is)$" + return re.search(regex, word) + + # checks for a definitive classification at the word level + def get_def_class( word): + test_terms = { + "eval", "evaluation", "evaluations", + "sat", "sats", "saturation", + "exam", "exams", + "rate", "rates", + "test", "tests", + "xray", "xrays", + "screen", "screens", + "level", "levels", + "tox" + } + problem_terms = { + "swelling", + "wound", "wounds", + "symptom", "symptoms", + "shifts", "failure", + "insufficiency", "insufficiencies", + "mass", "masses", + "aneurysm", "aneurysms", + "ulcer", "ulcers", + "trama", "cancer", + "disease", "diseased", + "bacterial", "viral", + "syndrome", "syndromes", + "pain", "pains" + "burns", "burned", + "broken", "fractured" + } + treatment_terms = { + "therapy", + "replacement", + "anesthesia", + "supplement", "supplemental", + "vaccine", "vaccines" + "dose", "doses", + "shot", "shots", + "medication", "medicine", + "treament", "treatments" + } + if word.lower() in test_terms: + return 1 + elif word.lower() in problem_terms: + return 2 + elif word.lower() in treatment_terms: + return 3 + return 0 From 9c54703e3bf8b4d6310388a758e2098a63e27e50 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Sat, 1 Mar 2014 16:23:28 -0500 Subject: [PATCH 025/393] Adding all files to github so Kevin can run it properly --- code/clicon_features.py | 2 +- code/model.py | 27 ++++++++++++++++++--------- code/predict.py | 7 +++++++ code/train.py | 1 - 4 files changed, 26 insertions(+), 11 deletions(-) diff --git a/code/clicon_features.py b/code/clicon_features.py index 0ba12ed..a7c5493 100644 --- a/code/clicon_features.py +++ b/code/clicon_features.py @@ -29,7 +29,7 @@ class FeatureWrapper: # Feature Enabling - enabled_IOB_prose_sentence_features = ImmutableSet( ['prev_POS', 'pos', 'GENIA'] ) + enabled_IOB_prose_sentence_features = ImmutableSet( ['prev_POS', 'pos', 'GENIA', 'umls_semantic_type_sentence', 'umls_semantic_context'] ) enabled_IOB_prose_word_features = ImmutableSet( ['Generic#', 'last_two_letters', 'prev_word', 'uncased_prev_word' ] ) enabled_IOB_nonprose_sentence_features = ImmutableSet( ['prev_POS'] ) diff --git a/code/model.py b/code/model.py index 092e6f9..aafb3d2 100644 --- a/code/model.py +++ b/code/model.py @@ -1,14 +1,13 @@ from __future__ import with_statement import os -import pickle +import cPickle as pickle import helper import libml import clicon_features - class Model: labels = { @@ -287,15 +286,22 @@ def predict(self, note): # Stitch prose and nonprose labels lists together - - labels = [] - prose_ind = 0 - nonprose_ind = 0 - p_end_flag = (len( prose_line_numbers) == 0) - n_end_flag = (len(nonprose_line_numbers) == 0) labels_list = {} - for key in [self.type]: + + # FIXME - incorrect + for key in libml.bits(self.type): + + # FIXME - workaround for key + #if not prose_labels_list[key]: + # labels_list[2] = {} + # continue + + labels = [] + prose_ind = 0 + nonprose_ind = 0 + p_end_flag = (len( prose_line_numbers) == 0) + n_end_flag = (len(nonprose_line_numbers) == 0) # Pretty much renaming just for length/readability pruposes plist = prose_labels_list[key] @@ -327,6 +333,7 @@ def predict(self, note): # translate labels_list into a readable format # ex. change all occurences of 1 -> 'B' for t, labels in labels_list.items(): + if not labels_list[t]: continue tmp = [] for sentence in data: tmp.append([labels.pop(0) for i in range(len(sentence))]) @@ -350,6 +357,8 @@ def predict(self, note): # Create tokens of full concept boundaries for second classifier for t,chunks in labels_list.items(): + # FIXME - workaround + if not labels_list[t]: continue # Merge 'B' words with its 'I's to form phrased chunks tmp = feat_obj.generate_chunks(text,chunks) diff --git a/code/predict.py b/code/predict.py index a4ddc3a..cc2567f 100644 --- a/code/predict.py +++ b/code/predict.py @@ -87,6 +87,12 @@ def main(): con = con[:-3] + 'con' for t in libml.bits(model.type): + + # FIXME - workaround. I'm not sure why it doesnt make some + if t not in labels: + note.write_i2b2(con_path,[]) + continue + if t == libml.SVM: helper.mkpath(os.path.join(args.output, "svm")) con_path = os.path.join(path, "svm", con) @@ -97,6 +103,7 @@ def main(): helper.mkpath(os.path.join(args.output, "crf")) con_path = os.path.join(path, "crf", con) + # Output the concept predictions note.write_i2b2(con_path, labels[t]) #note.write_plain(con_path, labels[t]) # in case of plain format diff --git a/code/train.py b/code/train.py index 4a73b4b..e0e7390 100644 --- a/code/train.py +++ b/code/train.py @@ -13,7 +13,6 @@ def main(): parser = argparse.ArgumentParser() -<<<<<<< HEAD parser.add_argument("-t", dest = "txt", help = "The files that contain the training examples", From b9281614b3d1c1bdefd8e0ad215fb33af639c81a Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Thu, 6 Mar 2014 00:36:41 -0500 Subject: [PATCH 026/393] enabling many features (performance still bad) --- code/model.py | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/code/model.py b/code/model.py index aafb3d2..b83518b 100644 --- a/code/model.py +++ b/code/model.py @@ -226,8 +226,9 @@ def train(self, notes): # @param note. A Note object that contains the data def predict(self, note): - # data - A list of list of the medical text's words - data = note.txtlist() + # data - A list of list of the medical text's words + data = note.txtlist() + # A wrapper for features feat_obj = clicon_features.FeatureWrapper(data) @@ -329,6 +330,7 @@ def predict(self, note): labels_list[key] = labels + # IOB labels # translate labels_list into a readable format # ex. change all occurences of 1 -> 'B' @@ -341,6 +343,8 @@ def predict(self, note): tmp[-1]= map(lambda l: Model.reverse_IOBs_labels[int(l)],tmp[-1]) labels_list[t] = tmp + + #print '-'*80 #print "\nlabels_list" #print labels_list @@ -351,7 +355,8 @@ def predict(self, note): text = data # List of list of tokens (similar to 'text', but concepts are grouped) - chunked = {1:[], 2:[], 4:[]} + chunked = {1:[], 2:[], 4:[]} + hits_list = {1:[], 2:[], 4:[]} # Create tokens of full concept boundaries for second classifier @@ -365,11 +370,18 @@ def predict(self, note): # text_chunks - a merged text # place_holder - ignore. It has a value of [] - # hits - one-to-one concept token indices with text_chunks + # hit_tmp - one-to-one concept token indices with text_chunks text_chunks, place_holder, hits = tmp + print '\n'*5 + '-'*80 + '\n'*5 + print hits + for foo,bar in enumerate(text_chunks): + print foo, ': ', bar + print hits + # Store chunked text - chunked[t] = text_chunks + chunked[t] = text_chunks + hits_list[t] = hits ############################# @@ -379,8 +391,10 @@ def predict(self, note): # Predict classification for chunks # FIXME - possible error - only predicts on 4 - text_chunks = chunked[4] + text_chunks = chunked[1] + hits = hits_list[1] + #print labels_list # rows - the format for representing feats for machine learning @@ -393,6 +407,9 @@ def predict(self, note): text_matches.append(text_chunks[i][j]) + #print text_matches + + # FIXME # Not sure if this should be reset, but it makes sense to me to do it # But why is it a data member if it shouldnt persist @@ -413,6 +430,9 @@ def predict(self, note): rows = tmp_rows + #print rows + + # Predict using model second_pass_model = self.filename + '3' libml.write_features(second_pass_model, [rows], None, self.type); @@ -427,6 +447,8 @@ def predict(self, note): second_pass_labels_list[t] = [] + #print second_pass_labels_list + # translate labels_list into a readable format # ex. change all occurences of 0 -> 'none' for t, labels in second_pass_labels_list.items(): @@ -444,6 +466,9 @@ def predict(self, note): second_pass_labels_list[t] = tmp + #print second_pass_labels_list + + # Put predictions into format for Note class to read retVal = {} for t in [1,2,4]: From 9dc0ccff0c23dfd8c3de4dd23ba7ad0a5216403c Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Thu, 6 Mar 2014 00:37:09 -0500 Subject: [PATCH 027/393] enabling many features (performance still bad) --- code/clicon_features.py | 107 +++++++++++++++++++++++++++++----------- 1 file changed, 77 insertions(+), 30 deletions(-) diff --git a/code/clicon_features.py b/code/clicon_features.py index a7c5493..00c6a60 100644 --- a/code/clicon_features.py +++ b/code/clicon_features.py @@ -29,19 +29,27 @@ class FeatureWrapper: # Feature Enabling - enabled_IOB_prose_sentence_features = ImmutableSet( ['prev_POS', 'pos', 'GENIA', 'umls_semantic_type_sentence', 'umls_semantic_context'] ) - enabled_IOB_prose_word_features = ImmutableSet( ['Generic#', 'last_two_letters', 'prev_word', 'uncased_prev_word' ] ) + enabled_IOB_prose_sentence_features = ImmutableSet( ['prev_POS', 'pos', 'stem_wordnet', 'GENIA', 'umls_semantic_type_sentence', 'umls_semantic_context'] ) + enabled_IOB_prose_word_features = ImmutableSet( ['Generic#', 'last_two_letters', 'prev_word', 'uncased_prev_word', 'word','length', 'mitre', 'stem_porter', 'stem_lancaster', 'word_shape' ] ) enabled_IOB_nonprose_sentence_features = ImmutableSet( ['prev_POS'] ) enabled_IOB_nonprose_word_features = ImmutableSet( ['word', 'uncased_prev_word'] ) - enabled_concept_features = ImmutableSet( ['chunk', 'unigram', 'first-four-letters', 'stem_wordnet', 'test_result', 'umls_semantic_type_word'] ) + enabled_concept_sentence_features = ImmutableSet( ['chunk', 'unigram', 'first-four-letters', 'umls_semantic_type_word'] ) + + #stem_wordnet + #test_result + #all features of previous token + #all features of next token + #metric_unit + #has_problem_form + #def_class enabled_concept_word_features = ImmutableSet(['word', 'length', 'mitre', 'stem_porter', 'stem_lancaster', 'word_shape']) - # Run the GENIA tagger on the given data + # Instantiate an FeatureWrapper object def __init__(self, data): - + # Only run GENIA tagger if feature is enabled if 'GENIA' in self.enabled_IOB_prose_sentence_features: self.GENIA_features = clicon_genia_interface.genia(data) @@ -51,6 +59,7 @@ def __init__(self, data): self.umls_lookup_cache = UmlsCache() + # Iterate through GENIA Tagger features def next_GENIA_line(self): @@ -94,38 +103,44 @@ def IOB_prose_features_for_sentence(self, sentence): features_list = [] + # Get a feature set for each word in the sentence for i,word in enumerate(sentence): features_list.append( self.IOB_prose_features_for_word(sentence,i) ) + # Only POS tag once + pos_tagged = [] + + # Allow for particular features to be enabled for feature in self.enabled_IOB_prose_sentence_features: # Feature: Previous POS if feature == 'prev_POS': - pos_tagged = nltk.pos_tag(sentence) + pos_tagged = pos_tagged or nltk.pos_tag(sentence) features_list[0].update( {('prev_POS','') : 1} ) for i in range(1,len(sentence)): features_list[i].update( {('prev_POS',pos_tagged[i-1]) : 1} ) - # Feature: 1-token part-of-speech context + # Feature: Part of Speech if feature == 'pos': + pos_tagged = pos_tagged or nltk.pos_tag(sentence) for (i,(_,pos)) in enumerate(pos_tagged): features_list[i].update( { ('pos',pos) : 1} ) # Feature: UMLS semantic type for the sentence if feature == 'umls_semantic_type_sentence': - # a list of the uml semantic of the largest substring(s). + # a list of the uml semantic of the largest substring(s). sentence_mapping = umls.umls_semantic_type_sentence( self.umls_lookup_cache, sentence ) - # If there are no mappings. + # If there are no mappings. if( len(sentence_mapping) == 0 ): for i , features in enumerate( features_list): - features[(feature , None ) ] = 1 - # assign the umls definitions to the vector for each word in the sentence. + features[(feature , None ) ] = 1 + # assign the umls definitions to the vector for each word in the sentence. else: for i , features in enumerate( features_list): for concept in sentence_mapping: @@ -134,16 +149,16 @@ def IOB_prose_features_for_sentence(self, sentence): # Feature: UMLS semantic concext if feature == 'umls_semantic_context': - # a list of lists, each sub list contains the umls definition of the largest string the word is in - umls_semantic_context_mappings = umls.umls_semantic_context_of_words( self.umls_lookup_cache , sentence ) - - # assign the umls definitions in the sublists to the vector of the corresponding word + # each sub list contains the umls definition of the largest string the word is in + umls_semantic_context_mappings = umls.umls_semantic_context_of_words( self.umls_lookup_cache , sentence ) + + # assign the umls definitions in the sublists to the vector of the corresponding word for i , features in enumerate( features_list ): #if there are no mappings if umls_semantic_context_mappings[i] == None: features[(feature,None)] = 1 - #if there is a mapping, there could be multiple contexts, iterate through the sublist + #if there is a mapping, there could be multiple contexts, iterate through the sublist else: for mapping in umls_semantic_context_mappings[i]: features[(feature,mapping)] = 1 @@ -218,15 +233,47 @@ def IOB_prose_features_for_word(self, sentence, i): if i == 0: features.update( {('prev_word', '' ) : 1} ) else: + #print len(sentence) + #print i features.update( {('prev_word', sentence[i-1]) : 1} ) - # Feature Uncased previous word - if feature == 'uncased_prev_word': - if i == 0: - features.update( {('uncased_prev_word', '' ) : 1} ) - else: - features.update( {('uncased_prev_word',sentence[i-1].lower()) : 1} ) + # FIXME - adding pass two features to pass 1 (good? bad?) + if feature == "word": + features[(feature, word)] = 1 + + if feature == "length": + features[(feature, None)] = len(word) + + if feature == "mitre": + for f in self.mitre_features: + if re.search(self.mitre_features[f], word): + features[(feature, f)] = 1 + + if feature == "stem_porter": + st = nltk.stem.PorterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_lancaster": + st = nltk.stem.LancasterStemmer() + features[(feature, st.stem(word))] = 1 + + if feature == "stem_snowball": + st = nltk.stem.SnowballStemmer("english") + features[(feature, st.stem(word))] = 1 + + if feature == "word_shape": + wordShapes = getWordShapes(word) + for j, shape in enumerate(wordShapes): + features[(feature + str(j), shape)] = 1 + + if feature == "metric_unit": + unit = 0 + if self.is_weight(word): + unit = 1 + elif self.is_size(word): + unit = 2 + features[(feature, None)] = unit return features @@ -253,7 +300,7 @@ def IOB_nonprose_features_for_sentence(self, sentence): # Get a feature set for each word in the sentence for i,word in enumerate(sentence): - features_list.append( self.IOB_prose_features_for_word(sentence,i) ) + features_list.append( self.IOB_nonprose_features_for_word(sentence,i) ) # Allow for particular features to be enabled @@ -266,7 +313,6 @@ def IOB_nonprose_features_for_sentence(self, sentence): for i in range(1,len(sentence)): features_list[i].update( {('prev_POS',pos_tagged[i-1]) : 1} ) - return features_list @@ -285,7 +331,7 @@ def IOB_nonprose_features_for_word(self, sentence, i): # Feature: features = {'dummy': 1} # always have >0 dimensions - + # Allow for particular features to be enabled for feature in self.enabled_IOB_nonprose_word_features: @@ -467,9 +513,10 @@ def concept_features(self, sentence, ind): for i in range(len(sentence[ind])): features.update( self.concept_features_for_word( sentence[ind], i ) ) + tags = None # Allow for particular features to be enabled - for feature in self.enabled_IOB_prose_sentence_features: + for feature in self.enabled_concept_sentence_features: # Feature: the chunk itself if feature == 'chunk': @@ -492,9 +539,9 @@ def concept_features(self, sentence, ind): if feature == 'umls_semantic_type_word': # Get a semantic type for each unigram - for i,word in enmuerate(sentence[ind]): - mapping = umls.umls_semantic_type_word(self.umls_lookup_cache , word ) - #If there is no umls semantic type. + for i,word in enumerate(sentence[ind]): + mapping = umls.umls_semantic_type_word(self.umls_lookup_cache , word ) + #If there is no umls semantic type. featname = 'umls_semantic_type_word"%d' % i if( mapping == None ): features[(feature,None)] = 1 @@ -523,6 +570,7 @@ def concept_features(self, sentence, ind): features[(feature, None)] = 1 + return features @@ -691,4 +739,3 @@ def get_def_class( word): return 3 return 0 - From 2230b6b7e3baf2e29fb9049f56585b53ab33fa18 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Thu, 6 Mar 2014 00:37:43 -0500 Subject: [PATCH 028/393] blank-line issue not fixed yet --- code/clicon_genia_interface.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/code/clicon_genia_interface.py b/code/clicon_genia_interface.py index 8bc0633..03ad3bf 100644 --- a/code/clicon_genia_interface.py +++ b/code/clicon_genia_interface.py @@ -48,6 +48,8 @@ def genia( data ): @return A list of dcitionaries of the genia tagger's output. ''' + # FIXME - crashes when the is an empty line in the file + # FIXME - write list to file and then feed it to GENIA # FIXME - hard coded directory!! From 39f05914c2bef7228d3d33afb48245b9d5abff9c Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:47:31 -0400 Subject: [PATCH 029/393] added support for cui and hypernym lookups --- code/SQLookup.py | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/code/SQLookup.py b/code/SQLookup.py index fefa13e..0e8bec3 100644 --- a/code/SQLookup.py +++ b/code/SQLookup.py @@ -26,13 +26,45 @@ def SQLConnect(): def SQlookup( c , string ): #queries database and finds first semantic type match, returns a 1 when a match is found. - c.execute( "SELECT sty FROM MRCON a, MRSTY b WHERE a.cui = b.cui AND str = ? LIMIT 1;" , (string,) ) + c.execute( "SELECT sty FROM MRCON a, MRSTY b WHERE a.cui = b.cui AND str = ?; " , (string,) ) #returns a tuple with the match or None if there was no match. - return c.fetchone() + #return c.fetchone() + return c.fetchall() #returns the semantic type of a word def string_lookup( string ): - return SQlookup( c , string ) + #return SQlookup( c , string ) + r = SQlookup( c , string ) + + return r + + +# get all semantic types for a given concept +def cui_lookup( string ): + + # queries database and finds semantic type match + c.execute( "SELECT cui FROM MRCON WHERE str = ?;" , (string,) ) + + #returns a tuple with the match or None if there was no match. + return c.fetchall() + + +def hypernyms_lookup( string ): + + c.execute( "SELECT CUI FROM MRCON WHERE STR = ? LIMIT 1 ;" , (string,) ) + + cui = c.fetchone() + + if cui == None: + return None + else: + c.execute( "SELECT CUI2 FROM MRREL WHERE CUI1 = ? AND REL = 'PAR' LIMIT 5 ;" , (cui[0],) ) + result = c.fetchone() + if not result: + return None + else: + return result + From 96af5519547541e167519f3c8c4ef4161919782b Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:49:02 -0400 Subject: [PATCH 030/393] added lot of features for each of the three models --- code/clicon_features.py | 805 ++++++++++++++++++++++++++++------------ 1 file changed, 564 insertions(+), 241 deletions(-) diff --git a/code/clicon_features.py b/code/clicon_features.py index 00c6a60..2ad33a9 100644 --- a/code/clicon_features.py +++ b/code/clicon_features.py @@ -24,34 +24,24 @@ - class FeatureWrapper: # Feature Enabling - enabled_IOB_prose_sentence_features = ImmutableSet( ['prev_POS', 'pos', 'stem_wordnet', 'GENIA', 'umls_semantic_type_sentence', 'umls_semantic_context'] ) - enabled_IOB_prose_word_features = ImmutableSet( ['Generic#', 'last_two_letters', 'prev_word', 'uncased_prev_word', 'word','length', 'mitre', 'stem_porter', 'stem_lancaster', 'word_shape' ] ) - - enabled_IOB_nonprose_sentence_features = ImmutableSet( ['prev_POS'] ) - enabled_IOB_nonprose_word_features = ImmutableSet( ['word', 'uncased_prev_word'] ) + enabled_IOB_prose_sentence_features = ImmutableSet( [ 'umls_semantic_type_sentence', 'pos', 'stem_wordnet', 'umls_semantic_context', 'GENIA', 'prev', 'next', 'prev_3_pos'] ) + enabled_IOB_prose_word_features = ImmutableSet( ['Generic#', 'last_two_letters', 'word', 'mitre', 'stem_porter', 'word_shape', 'metric_unit','umls_cui' 'umls_hypernyms' ] ) - enabled_concept_sentence_features = ImmutableSet( ['chunk', 'unigram', 'first-four-letters', 'umls_semantic_type_word'] ) + enabled_IOB_nonprose_sentence_features = ImmutableSet( ['prev_pos', 'pos', 'next_pos', 'test_result', 'umls_semantic_context', 'prev', 'next','prev_3_pos']) + enabled_IOB_nonprose_word_features = ImmutableSet( ['word', 'uncased_prev_word','umls_semantic_type_word', 'word_shape', 'metric_unit', 'mitre', 'directive', 'umls_cui', 'umls_hypernyms'] ) - #stem_wordnet - #test_result - #all features of previous token - #all features of next token - #metric_unit - #has_problem_form - #def_class - enabled_concept_word_features = ImmutableSet(['word', 'length', 'mitre', 'stem_porter', 'stem_lancaster', 'word_shape']) + enabled_concept_features = ImmutableSet( [ 'umls_semantic_type_sentence' , 'umls_semantic_type_word','pos','stem_wordnet', 'test_result', 'word', 'length', 'mitre', 'stem_porter', 'stem_lancaster', 'word_shape','prev','next', 'umls_semantic_context', 'prev_3_pos', 'umls_cui']) # Instantiate an FeatureWrapper object - def __init__(self, data): + def __init__(self, data=None): # Only run GENIA tagger if feature is enabled - if 'GENIA' in self.enabled_IOB_prose_sentence_features: + if (data) and ('GENIA' in self.enabled_IOB_prose_sentence_features): self.GENIA_features = clicon_genia_interface.genia(data) self.GENIA_counter = 0 @@ -82,6 +72,8 @@ def next_GENIA_line(self): # output: A hash table of features def IOB_features_for_sentence(self, sentence): + #return (True,self.IOB_prose_features_for_sentence(sentence)) + isProse = self.prose_sentence(sentence) # Different features depending on whether sentence is 'prose' @@ -102,7 +94,7 @@ def IOB_features_for_sentence(self, sentence): def IOB_prose_features_for_sentence(self, sentence): features_list = [] - + flag = 0 # Get a feature set for each word in the sentence for i,word in enumerate(sentence): @@ -112,18 +104,13 @@ def IOB_prose_features_for_sentence(self, sentence): # Only POS tag once pos_tagged = [] + # Used for 'prev' and 'next' features + ngram_features = [{} for i in range(len(features_list))] + # Allow for particular features to be enabled for feature in self.enabled_IOB_prose_sentence_features: - # Feature: Previous POS - if feature == 'prev_POS': - pos_tagged = pos_tagged or nltk.pos_tag(sentence) - features_list[0].update( {('prev_POS','') : 1} ) - for i in range(1,len(sentence)): - features_list[i].update( {('prev_POS',pos_tagged[i-1]) : 1} ) - - # Feature: Part of Speech if feature == 'pos': pos_tagged = pos_tagged or nltk.pos_tag(sentence) @@ -140,28 +127,53 @@ def IOB_prose_features_for_sentence(self, sentence): if( len(sentence_mapping) == 0 ): for i , features in enumerate( features_list): features[(feature , None ) ] = 1 - # assign the umls definitions to the vector for each word in the sentence. + # assign the umls definitions to the vector for each word else: for i , features in enumerate( features_list): - for concept in sentence_mapping: - features[(feature , concept ) ] = 1 + for concepts in sentence_mapping: + if concepts: + for concept in concepts: + features[(feature,concept[0])] = 1 + else: + features[(feature , None ) ] = 1 # Feature: UMLS semantic concext if feature == 'umls_semantic_context': - # each sub list contains the umls definition of the largest string the word is in + # umls definition of the largest string the word is in umls_semantic_context_mappings = umls.umls_semantic_context_of_words( self.umls_lookup_cache , sentence ) - # assign the umls definitions in the sublists to the vector of the corresponding word + #print umls_semantic_context_mappings + + # assign umls definitions to the each chunk for i , features in enumerate( features_list ): #if there are no mappings if umls_semantic_context_mappings[i] == None: features[(feature,None)] = 1 - #if there is a mapping, there could be multiple contexts, iterate through the sublist + # there could be multiple contexts else: for mapping in umls_semantic_context_mappings[i]: - features[(feature,mapping)] = 1 + for concept in mapping: + features[(feature,concept[0])] = 1 + + # Feature: Previous 3 POSs + if feature == 'prev_3_pos': + pos_tagged = pos_tagged or nltk.pos_tag(sentence) + for i in range(len(sentence)): + if i == 0: + prev_pos = ('*','*','*') + features_list[0].update({(feature,prev_pos) :1}) + elif i == 1: + prev_pos = ('*','*',pos_tagged[0][1]) + features_list[1].update({(feature,prev_pos) :1}) + elif i == 2: + prev_pos = ('*',pos_tagged[0][1],pos_tagged[1][1]) + features_list[2].update({(feature,prev_pos) :1}) + else: + prev_pos = (pos_tagged[i-3][1],pos_tagged[i-2][1],pos_tagged[i-1][1]) + features_list[i].update({(feature,pos_tagged[i-1]):1}) + # GENIA features if feature == 'GENIA': @@ -170,8 +182,12 @@ def IOB_prose_features_for_sentence(self, sentence): genia_feats = self.next_GENIA_line() if not genia_feats: genia_feats = self.next_GENIA_line() + #print genia_feats + for i in range(len(sentence)): + #print i + # Feature: Current word's GENIA features keys = ['GENIA-stem','GENIA-POS','GENIA-chunktag'] curr = genia_feats[i] @@ -192,7 +208,29 @@ def IOB_prose_features_for_sentence(self, sentence): features_list[i].update( { ('next-GENIA-stem','') : 1} ) features_list[i].update(output) + + ngram_features = [{} for i in range(len(features_list))] + if "prev" in self.enabled_IOB_prose_sentence_features: + prev = lambda f: {("prev_"+k[0], k[1]): v for k,v in f.items()} + prev_list = map(prev, features_list) + for i in range(len(features_list)): + if i == 0: + ngram_features[i][("prev", "*")] = 1 + else: + ngram_features[i].update(prev_list[i-1]) + + if "next" in self.enabled_IOB_prose_sentence_features: + next = lambda f: {("next_"+k[0], k[1]): v for k,v in f.items()} + next_list = map(next, features_list) + for i in range(len(features_list)): + if i == len(features_list) - 1: + ngram_features[i][("next", "*")] = 1 + else: + ngram_features[i].update(next_list[i+1]) + merged = lambda d1, d2: dict(d1.items() + d2.items()) + features_list = [merged(features_list[i], ngram_features[i]) + for i in range(len(features_list))] return features_list @@ -233,10 +271,34 @@ def IOB_prose_features_for_word(self, sentence, i): if i == 0: features.update( {('prev_word', '' ) : 1} ) else: - #print len(sentence) - #print i features.update( {('prev_word', sentence[i-1]) : 1} ) + # Feature: UMLS Semantic Types + if feature == 'umls_cui': + + # Get UMLS CUIs (could have multiple) + cuis = umls.get_cui(self.umls_lookup_cache , word) + + # Add each CUI + if cuis: + for cui in cuis: + features[(feature,cui)] = 1 + else: + features[(feature,None)] = 1 + + # Feature: UMLS Hypernyms + if feature == 'umls_hypernyms': + + # Get UMLS hypernyms + hyps = umls.umls_hypernyms(self.umls_lookup_cache,word) + + # Add all hypernyms + if hyps: + #for hyp in hyps: + features[(feature,hyps[0])] = 1 + else: + features[(feature,None)] = 1 + # FIXME - adding pass two features to pass 1 (good? bad?) if feature == "word": @@ -258,23 +320,11 @@ def IOB_prose_features_for_word(self, sentence, i): st = nltk.stem.LancasterStemmer() features[(feature, st.stem(word))] = 1 - if feature == "stem_snowball": - st = nltk.stem.SnowballStemmer("english") - features[(feature, st.stem(word))] = 1 - if feature == "word_shape": wordShapes = getWordShapes(word) for j, shape in enumerate(wordShapes): features[(feature + str(j), shape)] = 1 - if feature == "metric_unit": - unit = 0 - if self.is_weight(word): - unit = 1 - elif self.is_size(word): - unit = 2 - features[(feature, None)] = unit - return features @@ -294,7 +344,7 @@ def IOB_nonprose_features_for_sentence(self, sentence): if not genia_feats: genia_feats = self.next_GENIA_line() # If sentence is empty - if not sentence: return {} + #if not sentence: return {} features_list = [] @@ -302,16 +352,101 @@ def IOB_nonprose_features_for_sentence(self, sentence): for i,word in enumerate(sentence): features_list.append( self.IOB_nonprose_features_for_word(sentence,i) ) + # Only POS tag once + pos_tagged = [] # Allow for particular features to be enabled for feature in self.enabled_IOB_nonprose_sentence_features: # Feature: Previous POS - if feature == 'prev_POS': - pos_tagged = nltk.pos_tag(sentence) - features_list[0].update( {('prev_POS','') : 1} ) - for i in range(1,len(sentence)): - features_list[i].update( {('prev_POS',pos_tagged[i-1]) : 1} ) + if feature == 'prev_pos': + pos_tagged = pos_tagged or nltk.pos_tag(sentence) + for i in range(len(sentence)): + if i == 0: + features_list[0].update({('prev_POS','') :1}) + else: + features_list[i].update({('prev_POS',pos_tagged[i-1]):1}) + + # Feature: Part of Speech + if feature == 'pos': + pos_tagged = pos_tagged or nltk.pos_tag(sentence) + for (i,(_,pos)) in enumerate(pos_tagged): + features_list[i].update( { ('pos',pos) : 1} ) + + # Feature: Previous POS + if feature == 'next_pos': + pos_tagged = pos_tagged or nltk.pos_tag(sentence) + for i in range(len(sentence)): + if i == len(sentence)-1: + features_list[-1].update({('prev_POS','') :1}) + else: + features_list[i].update({('prev_POS',pos_tagged[i-1]):1}) + + # Feature: Test Result (for each chunk) + if feature == "test_result": + for index, features in enumerate(features_list): + right = " ".join([w for w in sentence[index:]]) + if self.is_test_result(right): + features[(feature, None)] = 1 + + # Feature: UMLS semantic context + if feature == 'umls_semantic_context': + # the umls definition of the largest string the word is in + umls_semantic_context_mappings = umls.umls_semantic_context_of_words( self.umls_lookup_cache , sentence ) + + # Semantic contxt of each word + for i in range(len(sentence)): + + #if there are no mappings + if umls_semantic_context_mappings[i] == None: + features_list[i][(feature,None)] = 1 + # there could be multiple contexts + else: + for mapping in umls_semantic_context_mappings[i]: + for concept in mapping: + features_list[i][(feature,concept[0])] = 1 + + # Feature: Previous 3 POSs + if feature == 'prev_3_pos': + pos_tagged = pos_tagged or nltk.pos_tag(sentence) + for i in range(len(sentence)): + if i == 0: + prev_pos = ('*','*','*') + features_list[0].update({(feature,prev_pos) :1}) + elif i == 1: + prev_pos = ('*','*',pos_tagged[0][1]) + features_list[1].update({(feature,prev_pos) :1}) + elif i == 2: + prev_pos = ('*',pos_tagged[0][1],pos_tagged[1][1]) + features_list[2].update({(feature,prev_pos) :1}) + else: + prev_pos = (pos_tagged[i-3][1],pos_tagged[i-2][1],pos_tagged[i-1][1]) + features_list[i].update({(feature,pos_tagged[i-1]):1}) + + + + ngram_features = [{} for i in range(len(features_list))] + if "prev" in self.enabled_IOB_nonprose_sentence_features: + prev = lambda f: {("prev_"+k[0], k[1]): v for k,v in f.items()} + prev_list = map(prev, features_list) + for i in range(len(features_list)): + if i == 0: + ngram_features[i][("prev", "*")] = 1 + else: + ngram_features[i].update(prev_list[i-1]) + + if "next" in self.enabled_IOB_nonprose_sentence_features: + next = lambda f: {("next_"+k[0], k[1]): v for k,v in f.items()} + next_list = map(next, features_list) + for i in range(len(features_list)): + if i == len(features_list) - 1: + ngram_features[i][("next", "*")] = 1 + else: + ngram_features[i].update(next_list[i+1]) + + merged = lambda d1, d2: dict(d1.items() + d2.items()) + features_list = [merged(features_list[i], ngram_features[i]) + for i in range(len(features_list))] return features_list @@ -346,6 +481,81 @@ def IOB_nonprose_features_for_word(self, sentence, i): else: features.update( {('uncased_prev_word',sentence[i-1].lower()) : 1} ) + # Feature: UMLS Semantic Type + if feature == 'umls_semantic_type_word': + # Get a semantic type for each unigram + mapping = umls.umls_semantic_type_word(self.umls_lookup_cache,word ) + + #If there is at least one umls semantic type. + if mapping: + for concept in mapping: + features[('umls_semantic_type_word' , concept )] = 1 + else: + features[('umls_semantic_type_word' , None )] = 1 + + # Feature: UMLS Semantic Types + if feature == 'umls_cui': + # Get UMLS CUIs (could have multiple) + cuis = umls.get_cui(self.umls_lookup_cache , word) + + # Add each CUI + if cuis: + for cui in cuis: + features[(feature,cui)] = 1 + else: + features[(feature,None)] = 1 + + # Feature: UMLS Hypernyms + if feature == 'umls_hypernyms': + + # Get UMLS hypernyms + hyps = umls.umls_hypernyms(self.umls_lookup_cache,word) + + # Add all hypernyms + if hyps: + #for hyp in hyps: + features[(feature,hyps[0])] = 1 + else: + features[(feature,None)] = 1 + + + # Feature: Metric Unit + if feature == "metric_unit": + tests = 3 + unit = 0 + if self.is_weight(word): + unit = 1 / tests + elif self.is_size(word): + unit = 2 / tests + elif self.is_volume(word): + unit = 3 / tests + features[(feature, None)] = unit + + # Feature: Date + if feature == 'date': + if self.is_date(word): + features[(feature,None)] = 1 + else: + features[(feature,None)] = 0 + + # Feature: Directive + if feature == 'directive': + if self.is_directive(word): + features[(feature,None)] = 1 + else: + features[(feature,None)] = 0 + + # Feature: Mitre + if feature == "mitre": + for f in self.mitre_features: + if re.search(self.mitre_features[f], word): + features[(feature, f)] = 1 + + # Feature: Word Shape + if feature == "word_shape": + wordShapes = getWordShapes(word) + for j, shape in enumerate(wordShapes): + features[('word_shape', shape)] = 1 return features @@ -416,233 +626,247 @@ def prose_word(self, word): - # generate_chunks() - # - # input: Three arguments: - # 1) A list of list of word (the data of the file) - # 2) A list of list of IOB tags (one-to-one with list from arg 1) - # 3) A list of list of concepts (one-to-one with list from arg 1) - # + # concept_features() # - # output: A 3-tuple of: - # 1) A list of list of chunks (word token phrases) - # 2) A list of list of concepts (one-to-one with list from ret 1) - # 3) A list of all indices into 1 that have been deemed nontrivial - def generate_chunks(self, data, IOB_tags, labels=None): - - # List of list of tokens (similar to 'text', but concepts are grouped) - text_chunks = [] - - # one-to-one concept classification with text_chunks - if labels: - concept_chunks = [] - else: - concept_chunks = None - - # List of (line,token) pairs for classifications that are nont 'none' - hits = [] - - # Create tokens of full concept boundaries for second classifier - for i, concept_line in enumerate(IOB_tags): - - # One line of 'chunked' - line_of_text_chunks = [] - if labels: line_of_concept_chunks = [] - - # stores the current streak - queue = [] - - # Necessary when multiple concepts are on one line - # The second concept's j index is relative to a word-split array - # The j of the new token should be relative to how chunks there are - chunk_offset = 0 - - # C-style array indexing. Probably could be done a better way. - # Used because I needed the ability of lookahead - for j in range(len(concept_line)): - - # Outside - # concet_line in "012" instead of "IOB" - if concept_line[j] == 'O': - line_of_text_chunks.append(data[i][j]) - if labels: line_of_concept_chunks.append('none') - - # Beginning of a concept boundary - else: - - # Increase size of current streak - queue.append(data[i][j]) - - # lookahead (check if streak will continue) - if (j+1 == len(concept_line))or \ - (concept_line[j+1] != 'I'): # end of classifiation - - # Add full concept token - line_of_text_chunks.append(' '.join(queue)) - if labels: line_of_concept_chunks.append(labels[i][j]) - - # Store indices of detected concept - hits.append( ( i, j + 1-len(queue) - chunk_offset ) ) - - # Reminder: used in the case that a concept follows a - # multi-word concept on the same line - chunk_offset += len(queue) - 1 - - # Reset streak - queue = [] - - text_chunks.append(line_of_text_chunks) - if labels: concept_chunks.append(line_of_concept_chunks) - + # input: A sentence/line from a medical text file (list of chunks) + # An index into the sentence to indentify the given chunk + # output: A hash table of features + def concept_features(self, sentence, ind): - return (text_chunks, concept_chunks, hits) + # Create a list of feature sets (one per chunk) + features = self.concept_features_for_chunk(sentence,ind) + + tags = [] + + # Feature: Previous 3 POSs + if 'prev_3_pos' in self.enabled_concept_features: + tags = tags = nltk.pos_tag(sentence) + if ind == 0: + prev_pos = ('*','*','*') + features[('prev_3_pos',prev_pos)] = 1 + elif ind == 1: + prev_pos = ('*','*',tags[0][1]) + features[('prev_3_pos',prev_pos)] = 1 + elif ind == 2: + prev_pos = ('*',tags[0][1],tags[1][1]) + features[('prev_3_pos',prev_pos)] = 1 + else: + prev_pos = (tags[ind-3][1],tags[ind-2][1],tags[ind-1][1]) + features[('prev_3_pos',prev_pos)] = 1 + + # Feature: UMLS semantic type for the sentence + if 'umls_semantic_type_sentence' in self.enabled_concept_features: + # a list of the uml semantic of the largest substring(s). + sentence_mapping = umls.umls_semantic_type_sentence( self.umls_lookup_cache, sentence ) + + #print sentence_mapping + + # if there are no mappings + if not sentence_mapping: + features[('umls_semantic_type_sentence', None ) ] = 1 + # assign the umls definitions to the vector for each word + else: + + for concept in sentence_mapping: + if concept: + for mapping in concept: + features[('umls_semantic_type_sentence' , mapping[0] ) ] = 1 + else: + features[('umls_semantic_type_sentence' , None ) ] = 1 +# print features + # Feature: Previous Chunks's Features + if "prev" in self.enabled_concept_features: + if ind == 0: + features[("prev", "*")] = 1 + else: + # Get features of previous chunks + prev_features = self.concept_features_for_chunk(sentence,ind-1) + prepend = lambda f: {("prev_"+k[0], k[1]): v for k, v in f.items()} + features.update( prepend(prev_features) ) + + + # Feature: Next Chunk's Features + if "next" in self.enabled_concept_features: + if ind == len(sentence) - 1: + features[("next", "*")] = 1 + else: + # Get features of previous chunks + next_features = self.concept_features_for_chunk(sentence,ind+1) + prepend = lambda f: {("next_"+k[0], k[1]): v for k, v in f.items()} + features.update( prepend(next_features) ) + return features - # concept_features() + # concept_features_for_chunk() # # input: A sentence/line from a medical text file (list of chunks) # An index into the sentence to indentify the given chunk - # output: A list of hash tables (one hash table per word) - def concept_features(self, sentence, ind): + # output: A hash table of features + def concept_features_for_chunk(self, sentence, ind): features = {} - # Get features for each unigram - for i in range(len(sentence[ind])): - features.update( self.concept_features_for_word( sentence[ind], i ) ) + # Feature: + features = {'dummy': 1} # always have >0 dimensions + + # Split the chunked sentence into a list of words for (POS tagger) + split_sentence = [] + split_ind = 0 + for chun in sentence: + for word in chun.split(): + split_sentence.append(word) + split_ind += len(chun.split()) tags = None + # Allow for particular features to be enabled - for feature in self.enabled_concept_sentence_features: + for feature in self.enabled_concept_features: - # Feature: the chunk itself - if feature == 'chunk': - features.update( { ('chunk',sentence[ind]) : 1 } ) + # Feature: Word (each word) + if feature == "word": + for i,word in enumerate(sentence[ind].split()): + featname = 'word-%d' % i + features.update( { (featname,word) : 1} ) - # Feature: Uncased unigrams - if feature == 'unigram': - for i,word in enumerate( sentence[ind].split() ): - featname = 'unigram-%d' % i - features.update( { (featname, word.lower()) : 1 } ) + # Feature: Length (of each word) + if feature == "length": + for i,word in enumerate(sentence[ind].split()): + featname = 'length-%d' % i + features.update( { (featname,None) : len(word)} ) - # Feature: First four letters of each word - if feature == 'first-four-letters': - prefix_list = [ word[:4] for word in sentence[ind].split() ] - for i,word in enumerate(prefix_list): - featname = 'first-four-letters-%d' % i - features.update( { (featname, word) : 1 } ) + # Feature: Mitre (of each word) + if feature == "mitre": + for i,word in enumerate(sentence[ind].split()): + for f in self.mitre_features: + if re.search(self.mitre_features[f], word): + featname = 'mitre' + features.update( { (featname,f) : 1} ) - # Feature: UMLS Semantic Type (ignores context) + # Feature: Porter Stem (of each word) + if feature == "stem_porter": + for i,word in enumerate(sentence[ind].split()): + featname = 'stem_porter-%d' % i + st = nltk.stem.PorterStemmer() + features[(featname, st.stem(word))] = 1 + + # Feature: Lancaster Stem (of each word) + if feature == "stem_lancaster": + for i,word in enumerate(sentence[ind].split()): + featname = 'stem_lancaster-%d' % i + st = nltk.stem.LancasterStemmer() + features[(featname, st.stem(word))] = 1 + + # Feature: Word Shape (of each word) + if feature == "word_shape": + for i,word in enumerate(sentence[ind].split()): + featname = 'word_shape-%d' % i + wordShapes = getWordShapes(word) + for j, shape in enumerate(wordShapes): + features[(featname + str(j), shape)] = 1 + + # Feature: UMLS Semantic Type (for each word) if feature == 'umls_semantic_type_word': # Get a semantic type for each unigram for i,word in enumerate(sentence[ind]): mapping = umls.umls_semantic_type_word(self.umls_lookup_cache , word ) - #If there is no umls semantic type. - featname = 'umls_semantic_type_word"%d' % i - if( mapping == None ): - features[(feature,None)] = 1 + # If is at least one semantic type + featname = 'umls_semantic_type_word-%d' % i + if mapping: + for concept in mapping: + features[(featname, concept )] = 1 else: - features[(feature , mapping )] = 1 + features[(featname , None )] = 1 - # Feature: Stemmed Word - if feature == "stem_wordnet": - tags = tags or nltk.pos_tag(sentence) - morphy_tags = { - 'NN':nltk.corpus.reader.wordnet.NOUN, - 'JJ':nltk.corpus.reader.wordnet.ADJ, - 'VB':nltk.corpus.reader.wordnet.VERB, - 'RB':nltk.corpus.reader.wordnet.ADV} - morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w,t in tags] - st = nltk.stem.WordNetLemmatizer() - for i, features in enumerate(features_list): - tag = morphy_tags[i] - features[(feature, st.lemmatize(*tag))] = 1 - # Feature: Test Result - if feature == "test_result": - for index, features in enumerate(features_list): - right = " ".join([w for w in sentence[index:]]) - if self.is_test_result(right): - features[(feature, None)] = 1 + # Feature: UMLS Semantic Types + if feature == 'umls_cui': + # Get UMLS CUIs (could have multiple) + cuis = umls.get_cui(self.umls_lookup_cache , word) + # Add each CUI + if cuis: + for cui in cuis: + features[(feature,cui)] = 1 + else: + features[(feature,None)] = 1 + - return features - - - - # concept_features_for_word() - # - # input: A single word - # output: A dictionary of features - def concept_features_for_word(self, chunk, i): - - # Abbreviation for most features, - # although some will require index for context - word = chunk[i] - + # Feature: Part of Speech (of each word) + if feature == "pos": + tags = tags or nltk.pos_tag(split_sentence) + for j,pos_tag in enumerate(tags[ind:ind+len(sentence[ind].split())]): + featname = 'pos-%d' % j + features[(featname, pos_tag[1])] = 1 - # Feature: - features = {'dummy': 1} # always have >0 dimensions + # Feature: Wordnet Stem (for each chunk) + if feature == "stem_wordnet": + tags = tags or nltk.pos_tag(sentence) + morphy_tags = { + 'NN': nltk.corpus.reader.wordnet.NOUN, + 'JJ': nltk.corpus.reader.wordnet.ADJ, + 'VB': nltk.corpus.reader.wordnet.VERB, + 'RB': nltk.corpus.reader.wordnet.ADV} + morphy_tags = [(w, morphy_tags.setdefault(t[:2], nltk.corpus.reader.wordnet.NOUN)) for w, t in tags] + st = nltk.stem.WordNetLemmatizer() + tag = morphy_tags[ind] + features[(feature, st.lemmatize(*tag))] = 1 + # Feature: Test Result (for each chunk) + if feature == "test_result": + right = " ".join([w for w in sentence[ind:]]) + if self.is_test_result(right): + features[(feature, None)] = 1 - # word_shape, word, length, mitre, stem_porter, stem_lancaster - for feature in self.enabled_concept_word_features: + # Feature: UMLS semantic concext + if feature == 'umls_semantic_context': - if feature == "word": - features[(feature, word)] = 1 + # the umls definition of the largest string the word is in + umls_semantic_context_mappings = umls.umls_semantic_context_of_words( self.umls_lookup_cache , sentence ) - if feature == "length": - features[(feature, None)] = len(word) + #if there are no mappings + if umls_semantic_context_mappings[ind] == None: + features[(feature,None)] = 1 + # there could be multiple contexts, iterate through the sublist + else: + for mapping in umls_semantic_context_mappings[ind]: + for concept in mapping: + features[(feature,concept)] = 1 - if feature == "mitre": - for f in self.mitre_features: - if re.search(self.mitre_features[f], word): - features[(feature, f)] = 1 + return features - if feature == "stem_porter": - st = nltk.stem.PorterStemmer() - features[(feature, st.stem(word))] = 1 - if feature == "stem_lancaster": - st = nltk.stem.LancasterStemmer() - features[(feature, st.stem(word))] = 1 - if feature == "stem_snowball": - st = nltk.stem.SnowballStemmer("english") - features[(feature, st.stem(word))] = 1 - if feature == "word_shape": - wordShapes = getWordShapes(word) - for i, shape in enumerate(wordShapes): - features[(feature + str(i), shape)] = 1 + # Features that will be added back in + # + # Currently not used for reversion back to original one-pass + # + # Removed from concept_features() for visual clarity + def concept_features_currently_not_used_features(self): - if feature == "metric_unit": - unit = 0 - if self.is_weight(word): - unit = 1 - elif self.is_size(word): - unit = 2 - features[(feature, None)] = unit + # Feature: the chunk itself + if feature == 'chunk': + features.update( { ('chunk',sentence[ind]) : 1 } ) - # look for prognosis locaiton - #if feature == "radial_loc": - # THIS MIGHT BE BUGGED - # if is_prognosis_location(word): - # features[(feature, None)] = 1 + # Feature: Uncased unigrams + if feature == 'unigram': + for i,word in enumerate( sentence[ind].split() ): + featname = 'unigram-%d' % i + features.update( { (featname, word.lower()) : 1 } ) - if feature == "has_problem_form": - if self.has_problem_form(word): - features[(feature, None)] = 1 + # Feature: First four letters of each word + if feature == 'first-four-letters': + prefix_list = [ word[:4] for word in sentence[ind].split() ] + for i,word in enumerate(prefix_list): + featname = 'first-four-letters-%d' % i + features.update( { (featname, word) : 1 } ) - if feature == "def_class": - features[(feature, None)] = self.get_def_class(word) - return features @@ -667,31 +891,48 @@ def concept_features_for_word(self, chunk, i): "DATESEPERATOR": r"^[-/]$", } - def is_test_result( context): + def is_test_result(self, context): # note: make spaces optional? regex = r"^[A-Za-z]+( )*(-|--|:|was|of|\*|>|<|more than|less than)( )*[0-9]+(%)*" if not re.search(regex, context): return re.search(r"^[A-Za-z]+ was (positive|negative)", context) return True - def is_weight( word): - regex = r"^[0-9]*(mg|g|milligrams|grams)$" + # Try to get QANN features + def is_meaurement(self, word): + regex = r"^[0-9]*(unit(s)|cc|L|mL|dL)$" + return re.search(regex, word) + + def is_directive(self, word): + regex = r"^(q\..*|q..|PRM|bid|prm|p\..*)$" return re.search(regex, word) + + def is_date(self, word): + regex = r'^(\d\d\d\d-\d\d-\d|\d\d?-\d\d?-\d\d\d\d?|\d\d\d\d-\d\d?-\d\d?)$' + return re.search(regex,word) - def is_size( word): + def is_volume(self, word): + regex = r"^[0-9]*(ml|mL|dL)$" + return re.search(regex, word) + + def is_weight(self, word): + regex = r"^[0-9]*(mg|g|mcg|milligrams|grams)$" + return re.search(regex, word) + + def is_size(self, word): regex = r"^[0-9]*(mm|cm|millimeters|centimeters)$" return re.search(regex, word) - def is_prognosis_location( word): + def is_prognosis_location(self, word): regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$" return re.search(regex, word) - def has_problem_form( word): + def has_problem_form(self, word): regex = r".*(ic|is)$" return re.search(regex, word) # checks for a definitive classification at the word level - def get_def_class( word): + def get_def_class(self, word): test_terms = { "eval", "evaluation", "evaluations", "sat", "sats", "saturation", @@ -739,3 +980,85 @@ def get_def_class( word): return 3 return 0 + +# generate_chunks() +# +# input: Three arguments: +# 1) A list of list of word (the data of the file) +# 2) A list of list of IOB tags (one-to-one with list from arg 1) +# 3) A list of list of concepts (one-to-one with list from arg 1) +# +# +# output: A 3-tuple of: +# 1) A list of list of chunks (word token phrases) +# 2) A list of list of concepts (one-to-one with list from ret 1) +# 3) A list of all indices into 1 that have been deemed nontrivial +def generate_chunks(data, IOB_tags, labels=None): + + # List of list of tokens (similar to 'text', but concepts are grouped) + text_chunks = [] + + # one-to-one concept classification with text_chunks + if labels: + concept_chunks = [] + else: + concept_chunks = None + + # List of (line,token) pairs for classifications that are nont 'none' + hits = [] + + # Create tokens of full concept boundaries for second classifier + for i, concept_line in enumerate(IOB_tags): + + # One line of 'chunked' + line_of_text_chunks = [] + if labels: line_of_concept_chunks = [] + + # stores the current streak + queue = [] + + # Necessary when multiple concepts are on one line + # The second concept's j index is relative to a word-split array + # The j of the new token should be relative to how chunks there are + chunk_offset = 0 + + # C-style array indexing. Probably could be done a better way. + # Used because I needed the ability of lookahead + for j in range(len(concept_line)): + + # Outside + # concept_line in "012" instead of "IOB" + if concept_line[j] == 'O': + line_of_text_chunks.append(data[i][j]) + if labels: line_of_concept_chunks.append('none') + + # Beginning of a concept boundary + else: + + # Increase size of current streak + queue.append(data[i][j]) + + # lookahead (check if streak will continue) + if (j+1 == len(concept_line))or \ + (concept_line[j+1] != 'I'): # end of classifiation + + # Add full concept token + line_of_text_chunks.append(' '.join(queue)) + if labels: line_of_concept_chunks.append(labels[i][j]) + + # Store indices of detected concept + hits.append( ( i, j + 1-len(queue) - chunk_offset ) ) + + # Reminder: used in the case that a concept follows a + # multi-word concept on the same line + chunk_offset += len(queue) - 1 + + # Reset streak + queue = [] + + text_chunks.append(line_of_text_chunks) + if labels: concept_chunks.append(line_of_concept_chunks) + + + return (text_chunks, concept_chunks, hits) + From 097b4d3594da8040e3f98b53a298a53029395007 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:49:25 -0400 Subject: [PATCH 031/393] added MRREL table for hypernyms --- code/create_sqliteDB.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/code/create_sqliteDB.py b/code/create_sqliteDB.py index 71d3ef9..33b8d5f 100644 --- a/code/create_sqliteDB.py +++ b/code/create_sqliteDB.py @@ -10,6 +10,7 @@ def create_db(): conn.text_factory = str + print "opening files" #load data in files. try: MRSTY_TABLE = open( ( "../umls_tables/MRSTY"), "r" ) @@ -25,27 +26,49 @@ def create_db(): conn.close() sys.exit() - MRSTY_TABLE = MRSTY_TABLE.read() ; + try: + MRREL_TABLE = open( ( "../umls_tables/MRREL") , "r" ) + except IOError: + print "\nNo file to use for creating MRREL table\n" + conn.close() + sys.exit() + + print "reading files" + + MRSTY_TABLE = MRSTY_TABLE.read() MRSTY_TABLE = MRSTY_TABLE.split('\n') - MRCON_TABLE = MRCON_TABLE.read() ; + MRCON_TABLE = MRCON_TABLE.read() MRCON_TABLE = MRCON_TABLE.split( '\n' ) + MRREL_TABLE = MRREL_TABLE.read() + MRREL_TABLE = MRREL_TABLE.split( '\n' ) + #data that will be inserted into tables. MRTSY_DATA = [] MRCON_DATA = [] + MRREL_DATA = [] c = conn.cursor() + print "parsing files" + #parse and store the data from the files. for line in MRSTY_TABLE: MRTSY_DATA.append( tuple(line.split('|')) ) for line in MRCON_TABLE: MRCON_DATA.append( tuple(line.split('|')) ) + for line in MRREL_TABLE: + MRREL_DATA.append( tuple(line.split('|')) ) + + print "creating tables" #create tables. c.execute( "CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, STR, LRL, EMPTY ) ;" ) c.execute( "CREATE TABLE MRSTY( CUI, TUI, STY, EMPTY ) ;" ) + c.execute( "CREATE TABLE MRREL( CUI1, REL, CUI2, RELA, SAB, SL, MG, EMPTY ) ;" ) + + print "inserting data" #insert data onto database for line in MRCON_DATA: @@ -58,11 +81,21 @@ def create_db(): c.execute( "INSERT INTO MRSTY( CUI, TUI, STY, EMPTY) values( ?, ?, ?, ?)" , line ) except sqlite3.ProgrammingError: continue + for line in MRREL_DATA: + try: + c.execute( "INSERT INTO MRREL( CUI1, REL, CUI2, RELA, SAB, SL, MG, EMPTY ) values( ?, ?, ?, ?,?, ? ,? ,? )" , line ) + except sqlite3.ProgrammingError: + continue + + print "creating indices" #create indices for faster queries c.execute( "CREATE INDEX mrsty_cui_map ON MRSTY(CUI)") c.execute( "CREATE INDEX mrcon_str_map ON MRCON(STR)") c.execute( "CREATE INDEX mrcon_cui_map ON MRCON(CUI)") + c.execute( "CREATE INDEX mrrel_cui2_map ON MRREL( CUI2 )" ) + c.execute( "CREATE INDEX mrrel_cui1_map on MRREL( CUI1 ) " ) + c.execute( "CREATE INDEX mrrel_rel_map on MRREL( REL )" ) #save changes to .db conn.commit() From e647d0ec60db45804961c213b8d3cfd7128adc4c Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:51:47 -0400 Subject: [PATCH 032/393] parameter from grid search --- code/libml.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/code/libml.py b/code/libml.py index bc5d2d1..d2e8f09 100644 --- a/code/libml.py +++ b/code/libml.py @@ -174,7 +174,7 @@ def run(self): # print('worker {0} stop.'.format(self.name)) break try: - rate = self.run_one(2.0**cexp,2.0**gexp) + rate = self.run_one(1.0,1e-5) if rate is None: raise RuntimeError("get no rate") except: # we failed, let others do that and we just quit @@ -344,4 +344,4 @@ def read_labels(model_filename, type=ALL): lines = f.readlines() labels[t] = [line.strip() for line in lines] - return labels \ No newline at end of file + return labels From cd96e7a8b9f013aebfb3d6f925fb76453addb13c Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:54:45 -0400 Subject: [PATCH 033/393] isolate first and second passes somewhat --- code/model.py | 431 ++++++++++++++++++++++---------------------------- 1 file changed, 185 insertions(+), 246 deletions(-) diff --git a/code/model.py b/code/model.py index b83518b..f688525 100644 --- a/code/model.py +++ b/code/model.py @@ -49,12 +49,12 @@ def __init__(self, filename='awesome.model', type=libml.ALL): self.IOB_vocab = {} self.concept_vocab = {} - - + + # Model::train() # - # @param notes. A Note object that has data for training the model + # @param notes. A list of Note objects that has data for training the model def train(self, notes): # Get the data and annotations from the Note object @@ -63,18 +63,40 @@ def train(self, notes): # labels - A list of list of concepts (1:1 with data) data = [] labels = [] - chunks = [] + bounds = [] for note in notes: data += note.txtlist() labels += note.conlist() - chunks += note.boundlist() + bounds += note.boundlist() # Create object that is a wrapper for the features feat_obj = clicon_features.FeatureWrapper(data) + # First pass + self.first_train(data, labels, bounds, feat_obj) + + # Second pass + self.second_train(data, labels, bounds, feat_obj) + + # Pickle dump + with open(self.filename, "w") as model: + pickle.dump(self, model) + + + + + # Model::train_first() + # + # @param notes. A list of Note objects that has data for training the model + def first_train(self, data, labels, chunks, feat_obj=None): + + # If not given + if not feat_obj: + # Create object that is a wrapper for the features + feat_obj = clicon_features.FeatureWrapper(data) - # IOB tagging + # IOB tagging prose = [] nonprose = [] prose_line_numbers = [] @@ -89,14 +111,10 @@ def train(self, notes): nonprose_line_numbers.append(i) - # each list of hash tables (one list per line in file) - #for row in rows: + # Encode each feature as a unique number for row in prose + nonprose: - # each hash table (one hash table per word in the line) for features in row: - # each key (tuple) pair in hash table (one key per feature) for feature in features: - # assigning a unique number to each (feature,value) pair if feature not in self.IOB_vocab: self.IOB_vocab[feature] = len(self.IOB_vocab) + 1 @@ -111,7 +129,6 @@ def train(self, notes): feat_lu = lambda f: {self.IOB_vocab[item]:f[item] for item in f} prose = [map(feat_lu, x) for x in prose] nonprose = [map(feat_lu, x) for x in nonprose] - # Segregate chunks into 'Prose CHUNKS' and 'Nonprose CHUNKS' prose_ind = 0 @@ -134,30 +151,40 @@ def train(self, notes): print 'Line #%d is neither prose nor nonprose!' % i print line, '\n' - prose_model = self.filename + '1' nonprose_model = self.filename + '2' - libml.write_features( prose_model, prose, pchunks, self.type) - libml.write_features(nonprose_model, nonprose, nchunks, self.type) + # Use CRF + libml.write_features( prose_model, prose, pchunks, libml.CRF) + libml.write_features(nonprose_model, nonprose, nchunks, libml.CRF) - libml.train( prose_model, self.type) - libml.train(nonprose_model, self.type) + libml.train( prose_model, libml.CRF) + libml.train(nonprose_model, libml.CRF) + #libml.write_features( prose_model, prose, pchunks, self.type) + #libml.write_features(nonprose_model, nonprose, nchunks, self.type) - #################### - # Second Pass # - #################### + #libml.train( prose_model, self.type) + #libml.train(nonprose_model, self.type) + # Pickle dump - Done during train(), not first_train() + #with open(self.filename, "w") as model: + # pickle.dump(self, model) - # IOB labels - # undo encodings of concept labels (ex. 1 => 'B') - label_lu = lambda l: Model.reverse_IOBs_labels[l] - chunks = [map(label_lu, x) for x in chunks] + # Model::second_train() + # + # + def second_train(self, data, labels, bounds, feat_obj=None): + + # If not given + if not feat_obj: + # Create object that is a wrapper for the features + feat_obj = clicon_features.FeatureWrapper() + # Merge 'B' words with its 'I's (and account for minor change in indices) - tmp = feat_obj.generate_chunks(data,chunks,labels) + tmp = clicon_features.generate_chunks(data,bounds,labels) # text_chunks - a merged text (highly similiar to data, except merged) # concept_chunks - one-to-one concept classification with text_chunks @@ -168,56 +195,52 @@ def train(self, notes): # rows is a list of a list of hash tables # it is used for holding the features that will be used for training rows = [] - text_matches = [] concept_matches = [] - for hit in hits: - i,j = hit - rows.append(feat_obj.concept_features(text_chunks[i], j)) - - text_matches.append(text_chunks[i][j]) - concept_matches.append(concept_chunks[i][j]) - - - # each hash table (one hash table per word in the line) - for features in rows: - # each key (tuple) pair in hash table (one key per feature) - for feature in features: - # assigning a unique number to each (feature,value) pair - if feature not in self.concept_vocab: - self.concept_vocab[feature] = len(self.concept_vocab) + 1 - - - # Encode concept labels to numbers (ex. 'treatment' => 1) - # NOTE: There are no longer 'none' classifications - # ex. [1,2,1] - labels = [] - for con in concept_matches: - #print con - tmp = Model.labels[con] - labels.append(tmp) + row_line = [] + con_line = [] + for ind in range(len((hits))): + i,j = hits[ind] + + # Get features + + row_line.append(feat_obj.concept_features(text_chunks[i], j)) + + # Corresponding labels (libml encodings of 'treatment','problem',etc) + con_tmp = concept_chunks[i][j] + con_tmp = Model.labels[con_tmp] + con_line.append( con_tmp ) + + if (ind == len(hits)-1) or (i != hits[ind+1][0]): + rows.append(row_line) + row_line = [] + concept_matches.append(con_line) + con_line = [] + + + # Encode each feature as a unique number + for row in rows: + for features in row: + for feature in features: + if feature not in self.concept_vocab: + self.concept_vocab[feature] = len(self.concept_vocab) + 1 # Purpose: Encode something like ('chunk', 'rehabilitation') as a unique # number, as determined by the self.concept_vocab hash table - #feat_lu = lambda f: {self.concept_vocab[item]:f[item] for item in f} - #rows = [map(feat_lu, x) for x in rows] - tmp_rows = [] - for fdict in rows: - #print fdict - tmp = {self.concept_vocab[key]:fdict[key] for key in fdict} - tmp_rows.append(tmp) - rows = tmp_rows + feat_lu = lambda f: {self.concept_vocab[item]: f[item] for item in f} + rows = [map(feat_lu, x) for x in rows] + # Write second pass model to file second_pass_model = self.filename + '3' - libml.write_features(second_pass_model, [rows], [labels], self.type) - libml.train(second_pass_model, self.type) + mtype = libml.LIN + libml.write_features(second_pass_model, rows, concept_matches, mtype) + #libml.write_features(second_pass_model, rows, concept_matches, self.type) - # Pickle dump - with open(self.filename, "w") as model: - pickle.dump(self, model) - + # Train the model + libml.train(second_pass_model, mtype) # Use LIN + #libml.train(second_pass_model, self.type) @@ -229,10 +252,34 @@ def predict(self, note): # data - A list of list of the medical text's words data = note.txtlist() - # A wrapper for features feat_obj = clicon_features.FeatureWrapper(data) + # First Pass + bounds = self.first_predict(data, feat_obj) + + # Merge 'B' words with its 'I's to form phrased chunks + text_chunks, _, hits = clicon_features.generate_chunks(data,bounds) + + # Second Pass + retVal = self.second_predict(text_chunks,hits,feat_obj) + + + return retVal + + + + # Model::first_predit() + # + # @param data. A list of list of words + # @return A list of list of IOB tags + def first_predict(self, data, feat_obj=None): + + # If not given + if not feat_obj: + # Create object that is a wrapper for the features + feat_obj = clicon_features.FeatureWrapper(data) + # prose and nonprose - each store a list of sentence feature dicts prose = [] nonprose = [] @@ -249,19 +296,6 @@ def predict(self, note): nonprose_line_numbers.append(i) - # FIXME - # Not sure if this should be reset, but it makes sense to me to do it - # But why is it a data member if it shouldnt persist - self.IOB_vocab = {} - - # Create a mapping of each (feature,value) pair to a unique number - for row in prose + nonprose: - for features in row: - for feature in features: - if feature not in self.IOB_vocab: - self.IOB_vocab[feature] = len(self.IOB_vocab) + 1 - - # For applying the (key,value) mapping feat_lu = lambda f: {self.IOB_vocab[item]:f[item] for item in f if item in self.IOB_vocab} @@ -270,219 +304,124 @@ def predict(self, note): prose = [map(feat_lu, x) for x in prose] prose_model = self.filename + '1' - libml.write_features(prose_model, prose, None, self.type); - libml.predict(prose_model, self.type) + libml.write_features(prose_model, prose, None, libml.CRF); + libml.predict(prose_model, libml.CRF) - prose_labels_list = libml.read_labels(prose_model, self.type) + prose_labels_list = libml.read_labels(prose_model, libml.CRF)[libml.CRF] # Nonprose (predict, and read predictions) nonprose = [map(feat_lu, x) for x in nonprose] nonprose_model = self.filename + '2' - libml.write_features(nonprose_model, nonprose, None, self.type); - libml.predict(nonprose_model, self.type) - - nonprose_labels_list = libml.read_labels(nonprose_model, self.type) + libml.write_features(nonprose_model, nonprose, None, libml.CRF); + libml.predict(nonprose_model, libml.CRF) + nonprose_labels_list = libml.read_labels(nonprose_model, libml.CRF)[libml.CRF] # Stitch prose and nonprose labels lists together - labels_list = {} - - - # FIXME - incorrect - for key in libml.bits(self.type): - - # FIXME - workaround for key - #if not prose_labels_list[key]: - # labels_list[2] = {} - # continue - - labels = [] - prose_ind = 0 - nonprose_ind = 0 - p_end_flag = (len( prose_line_numbers) == 0) - n_end_flag = (len(nonprose_line_numbers) == 0) + labels = [] + prose_ind = 0 + nonprose_ind = 0 + p_end_flag = (len( prose_line_numbers) == 0) + n_end_flag = (len(nonprose_line_numbers) == 0) - # Pretty much renaming just for length/readability pruposes - plist = prose_labels_list[key] - nlist = nonprose_labels_list[key] + # Pretty much renaming just for length/readability pruposes + plist = prose_labels_list + nlist = nonprose_labels_list - for i in range( len(data) ): - if (not p_end_flag) and (i == prose_line_numbers[prose_ind]): - line = plist[0:len(data[i]) ] # Beginning - plist = plist[ len(data[i]):] # The rest - labels += line - prose_ind += 1 - if prose_ind == len(prose_line_numbers): p_end_flag = True + for i in range( len(data) ): + if (not p_end_flag) and (i == prose_line_numbers[prose_ind]): + line = plist[0:len(data[i]) ] # Beginning + plist = plist[ len(data[i]):] # The rest + labels += line + prose_ind += 1 + if prose_ind == len(prose_line_numbers): p_end_flag = True - elif (not n_end_flag) and (i == nonprose_line_numbers[nonprose_ind]): - line = nlist[0:len(data[i]) ] # Beginning - nlist = nlist[ len(data[i]):] # The rest - labels += line - nonprose_ind += 1 - if nonprose_ind == len(nonprose_line_numbers): n_end_flag = True + elif (not n_end_flag) and (i == nonprose_line_numbers[nonprose_ind]): + line = nlist[0:len(data[i]) ] # Beginning + nlist = nlist[ len(data[i]):] # The rest + labels += line + nonprose_ind += 1 + if nonprose_ind == len(nonprose_line_numbers): n_end_flag = True - else: - # Shouldn't really get here ever - print 'Line #%d is neither prose nor nonprose!' % i - - labels_list[key] = labels + else: + # Shouldn't really get here ever + print 'Line #%d is neither prose nor nonprose!' % i # IOB labels # translate labels_list into a readable format # ex. change all occurences of 1 -> 'B' - for t, labels in labels_list.items(): - if not labels_list[t]: continue - tmp = [] - for sentence in data: - tmp.append([labels.pop(0) for i in range(len(sentence))]) - tmp[-1]= map(lambda l: l.strip(), tmp[-1]) - tmp[-1]= map(lambda l: Model.reverse_IOBs_labels[int(l)],tmp[-1]) - labels_list[t] = tmp - - - - #print '-'*80 - #print "\nlabels_list" - #print labels_list - #print "\n" + "-" * 80 - - - # Reminder: list of list of words (line-by-line) - text = data - - # List of list of tokens (similar to 'text', but concepts are grouped) - chunked = {1:[], 2:[], 4:[]} - hits_list = {1:[], 2:[], 4:[]} - - - # Create tokens of full concept boundaries for second classifier - for t,chunks in labels_list.items(): - - # FIXME - workaround - if not labels_list[t]: continue - - # Merge 'B' words with its 'I's to form phrased chunks - tmp = feat_obj.generate_chunks(text,chunks) - - # text_chunks - a merged text - # place_holder - ignore. It has a value of [] - # hit_tmp - one-to-one concept token indices with text_chunks - text_chunks, place_holder, hits = tmp + tmp = [] + for sentence in data: + tmp.append([labels.pop(0) for i in range(len(sentence))]) + tmp[-1]= map(lambda l: l.strip(), tmp[-1]) + tmp[-1]= map(lambda l: Model.reverse_IOBs_labels[int(l)],tmp[-1]) - print '\n'*5 + '-'*80 + '\n'*5 - print hits - for foo,bar in enumerate(text_chunks): - print foo, ': ', bar - print hits + # list of list of IOB labels + return tmp - # Store chunked text - chunked[t] = text_chunks - hits_list[t] = hits - ############################# - # Second Pass # - ############################# + def second_predict(self ,text_chunks, hits, feat_obj=None): - # Predict classification for chunks - # FIXME - possible error - only predicts on 4 - text_chunks = chunked[1] - hits = hits_list[1] + # If not given + if not feat_obj: + # Create object that is a wrapper for the features + feat_obj = clicon_features.FeatureWrapper() - #print labels_list - - - # rows - the format for representing feats for machine learning - # text_matches - the phrase chunks corresponding to classifications + # rows - the format for representing feats for libml + # concept_matches - labels (ex. 'treatment') encoded for libml rows = [] - text_matches = [] - for hit in hits: - i,j = hit - rows.append(feat_obj.concept_features(text_chunks[i], j)) - text_matches.append(text_chunks[i][j]) + row_line = [] + for ind in range(len((hits))): + i,j = hits[ind] + + # Get features + row_line.append(feat_obj.concept_features(text_chunks[i], j)) + if (ind == len(hits)-1) or (i != hits[ind+1][0]): + rows.append(row_line) + row_line = [] - #print text_matches - - - # FIXME - # Not sure if this should be reset, but it makes sense to me to do it - # But why is it a data member if it shouldnt persist - self.concept_vocab = {} - - for features in rows: - for feature in features: - if feature not in self.concept_vocab: - self.concept_vocab[feature] = len(self.concept_vocab) + 1 # Purpose: Encode something like ('chunk', 'rehabilitation') as a unique # number, as determined by the self.concept_vocab hash table - tmp_rows = [] - for fdict in rows: - #print fdict - tmp = {self.concept_vocab[key]:fdict[key] for key in fdict} - tmp_rows.append(tmp) - rows = tmp_rows - - - #print rows - + feat_lu = lambda f: {self.concept_vocab[item]: f[item] for item in f if item in self.concept_vocab} + rows = [map(feat_lu, x) for x in rows] # Predict using model second_pass_model = self.filename + '3' - libml.write_features(second_pass_model, [rows], None, self.type); + mtype = libml.LIN + libml.write_features(second_pass_model, rows, None, mtype); libml.predict(second_pass_model, self.type) - second_pass_labels_list = libml.read_labels(second_pass_model, self.type) - - - # FIXME - I probably shouldn't have to do this - # I don't know why it doesn't use all ML libs - for t in [1,2,4]: - if t not in second_pass_labels_list: - second_pass_labels_list[t] = [] - - - #print second_pass_labels_list - - # translate labels_list into a readable format - # ex. change all occurences of 0 -> 'none' - for t, labels in second_pass_labels_list.items(): - - if labels == []: - # FIXME - this means that there are ML libs not being used - #print '\nNot predicting on: ', t, '\n' - continue - - tmp = [] - for sentence in [text_matches]: - tmp.append([labels.pop(0) for i in range(len(sentence))]) - tmp[-1] = map(lambda l: l.strip(), tmp[-1]) - tmp[-1] = map(lambda l: Model.reverse_labels[int(l)],tmp[-1]) - second_pass_labels_list[t] = tmp - - - #print second_pass_labels_list + second_pass_labels_list = libml.read_labels(second_pass_model, mtype) # Put predictions into format for Note class to read retVal = {} for t in [1,2,4]: - + # Skip non-predictions - if second_pass_labels_list[t] == []: continue - + if t not in second_pass_labels_list: continue + classifications = [] - for hit,concept in zip(hits, second_pass_labels_list[t][0]): + for hit,concept in zip(hits, second_pass_labels_list[t]): + concept = Model.reverse_labels[int(concept)] i,j = hit - length = len(text_chunks[i][j].split()) - #print (concept, i, j, j+length-1 ) - classifications.append( (concept, i+1, j, j+length-1 ) ) + # Get start position (ex. 7th word of line) + start = 0 + for k in range(len(text_chunks[i])): + if k == j: break; + start += len( text_chunks[i][k].split() ) + + length = len(text_chunks[i][j].split()) + classifications.append( (concept, i, start, start+length-1 ) ) + retVal[t] = classifications From abd0f01288696a76db3f2f92f6449915aebb0cba Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:55:24 -0400 Subject: [PATCH 034/393] Change in prediction output representation --- code/note.py | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/code/note.py b/code/note.py index 24ba1ec..e85cae4 100644 --- a/code/note.py +++ b/code/note.py @@ -41,11 +41,11 @@ def read_i2b2(self, txt, con=None): # concept prefix, suffix = line.split('||') - txt = prefix.split() - con = suffix[3:-2] + text = prefix.split() + conc = suffix[3:-2] - start = txt[-2].split(':') - end = txt[-1].split(':') + start = text[-2].split(':') + end = text[-1].split(':') assert "concept spans one line", start[0] == end[0] @@ -58,7 +58,7 @@ def read_i2b2(self, txt, con=None): end = int( end[1]) # Add the classification to the Note object - self.classifications.append( (con,l,start,end) ) + self.classifications.append( (conc,l,start,end) ) #print "txt: ", txt #print "l: ", l @@ -67,7 +67,11 @@ def read_i2b2(self, txt, con=None): #print "line: ", self.data[l-1] # Beginning of a concept - self.boundaries[l-1][start] = 'B' + try: + self.boundaries[l-1][start] = 'B' + except: + print 'txt: ', txt + print 'con: ', con # Inside of a concept for i in range(start,end): @@ -101,7 +105,7 @@ def write_i2b2(self, con, labels): continue concept = classification[0] - lineno = classification[1] + lineno = classification[1] + 1 start = classification[2] end = classification[3] @@ -110,8 +114,10 @@ def write_i2b2(self, con, labels): #print "\n" + "-" * 80 #print "start: ", start + #print "end ", end #print "text: ", text #print "text[start]: ", text[start] + #print "concept: ", concept # The text string of words that has been classified datum = text[start] @@ -125,6 +131,10 @@ def write_i2b2(self, con, labels): # Classification label = concept + # Fixing issue involving i2b2 format (remove capitalization) + lowercased = [w.lower() for w in datum.split()] + datum = ' '.join(lowercased) + # Print format print >>f, "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx1, idx2, label) #print "c=\"%s\" %s %s||t=\"%s\"" % (datum, idx1, idx2, label) @@ -137,7 +147,9 @@ def write_i2b2(self, con, labels): # @param labels. A list of list of BIOs labels # # Print the prediction of BIOs concept boundary classification - def write_BIOs_labels(self, _, labels): + def write_BIOs_labels(self, filename, labels): + + fid = open(filename,"w") # List of list of words (line-by-line) text = self.txtlist() @@ -160,9 +172,9 @@ def write_BIOs_labels(self, _, labels): # lookahead (check if streak will continue) if (j+1 == len(concept_line))or \ (concept_line[j+1] != 'I'): - print '%d:%d %d:%d' % (i+1,j-len(queue)+1,i+1,j) - print ' '.join(queue) - print '' + print >>fid, '%d:%d %d:%d' % (i+1,j-len(queue)+1,i+1,j) + print >>fid, ' '.join(queue) + print >>fid, '' # Reset streak queue = [] From da0a4dd5d8d3c4483324667f74fa35670e175cbd Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:56:04 -0400 Subject: [PATCH 035/393] disable model selection --- code/predict.py | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/code/predict.py b/code/predict.py index cc2567f..09ea4f1 100644 --- a/code/predict.py +++ b/code/predict.py @@ -15,14 +15,12 @@ def main(): dest = "input", help = "The input files to predict", default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_data/*') - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '/home/wboag/ConceptExtraction-master/data/test_data/*') ) parser.add_argument("-o", dest = "output", help = "The directory to write the output", default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/test_predictions') - #default = os.path.join(os.path.dirname(os.path.realpath(__file__)), '/home/wboag/ConceptExtraction-master/data/test_predictions') ) parser.add_argument("-m", @@ -58,15 +56,8 @@ def main(): path = args.output helper.mkpath(args.output) - # Determine what type of models to use (ex SVM vs. CRF) + # Load model model = Model.load(args.model) - if args.no_svm: - model.type &= ~libml.SVM - if args.no_lin: - model.type &= ~libml.LIN - if args.no_crf: - model.type &= ~libml.CRF - for txt in files: @@ -80,7 +71,10 @@ def main(): # Returns a hash table with: # keys as 1,2,4 # values as list of list of concept tokens (one-to-one with dat_list) - labels = model.predict(note) + try: + labels = model.predict(note) + except IndexError: # FIXME - Not sure what causes this (something GENIA-related) + continue con = os.path.split(txt)[-1] @@ -90,7 +84,6 @@ def main(): # FIXME - workaround. I'm not sure why it doesnt make some if t not in labels: - note.write_i2b2(con_path,[]) continue if t == libml.SVM: @@ -99,9 +92,6 @@ def main(): if t == libml.LIN: helper.mkpath(os.path.join(args.output, "lin")) con_path = os.path.join(path, "lin", con) - if t == libml.CRF: - helper.mkpath(os.path.join(args.output, "crf")) - con_path = os.path.join(path, "crf", con) # Output the concept predictions From b39d1368d8126b2e5cc164dd731e5abc99d2ebc9 Mon Sep 17 00:00:00 2001 From: Willie Boag Date: Tue, 1 Apr 2014 18:56:39 -0400 Subject: [PATCH 036/393] add cui and hypernym features --- code/umls.py | 120 ++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 104 insertions(+), 16 deletions(-) diff --git a/code/umls.py b/code/umls.py index 0aa842c..1217817 100644 --- a/code/umls.py +++ b/code/umls.py @@ -8,14 +8,22 @@ def umls_semantic_type_word( umls_string_cache , sentence ): mapping = umls_string_cache.get_map( sentence ) else: concept = SQLookup.string_lookup( sentence ) + #print sentence, ' - NOT FOUND' + #print '\t', concept if concept != None: - umls_string_cache.add_map( sentence , concept[0] ) + #umls_string_cache.add_map( sentence , concept[0] ) + umls_string_cache.add_map( sentence , concept ) else: umls_string_cache.add_map( sentence , None ) mapping = umls_string_cache.get_map(sentence) + #print 'umls_semantic_type_word - returning:' + #print mapping + #print '' + return mapping + def umls_semantic_context_of_words( umls_string_cache, sentence ): #Defines the largest string span for the sentence. @@ -45,12 +53,19 @@ def umls_semantic_context_of_words( umls_string_cache, sentence ): #If the string is not in cache, look the umls concept up and add to the cache. if not( umls_string_cache.has_key( rawstring ) ): #SQLookup returns a tuple if there is a result or None is there is not. - concept = SQLookup.string_lookup( rawstring ) + concept = SQLookup.string_lookup( rawstring ) - if concept != None: - umls_string_cache.add_map( rawstring , concept[0] ) +# print concept + + if not concept: + umls_string_cache.add_map( rawstring, None ) else: - umls_string_cache.add_map( rawstring , None ) + umls_string_cache.add_map( rawstring, concept ) ; + +# if concept != None: + # umls_string_cache.add_map( rawstring , concept[0] ) + # else: + # umls_string_cache.add_map( rawstring , None ) #Store the concept into concept_span_dict with its span as a key. concept_span_dict[(ti,ti+currentWindowSize-1)] = umls_string_cache.get_map( rawstring ) @@ -58,7 +73,7 @@ def umls_semantic_context_of_words( umls_string_cache, sentence ): #For each substring of the sentence if there is a definition obtained from #SQLookup assign the concept to every word that is within in the substring. #If the currrent span is a substring update otherwise if it is not a substring add the new found context. - if umls_string_cache.get_map(rawstring) != None: + if umls_string_cache.get_map(rawstring): for i in range( ti , ti + currentWindowSize ): @@ -76,7 +91,13 @@ def umls_semantic_context_of_words( umls_string_cache, sentence ): if umls_context_list[i].count( [ti,ti+currentWindowSize -1] ) == 0 : umls_context_list[i].append( [ ti , ti +currentWindowSize - 1 ] ) + + #print '\t\t', umls_context_list + #create a list of sublists each sublist represents the contexts for which the word appears in the sentence + + # print umls_context_list + mappings = [] for i in umls_context_list: spans = i @@ -86,12 +107,22 @@ def umls_semantic_context_of_words( umls_string_cache, sentence ): sub_mappings = [] for j in spans: sub_mappings.append( concept_span_dict[tuple(j)]) - mappings.append( list(set(sub_mappings)) ) - + #print '\t\t\t', sub_mappings + #print '' + #mappings.append( list(set(sub_mappings)) ) + mappings.append( sub_mappings ) +# print mappings +# print sentence + # print '\t\t\t', mappings + return mappings def umls_semantic_type_sentence( cache , sentence ): + + #print sentence + #Defines the largest string span for the sentence. + WINDOW_SIZE = 7 #holds the mappings for every substring of size 1 to WINDOW_SIZE @@ -117,21 +148,28 @@ def umls_semantic_type_sentence( cache , sentence ): concept = string_lookup( rawstring ) - if concept != None: - cache.add_map( rawstring , concept[0] ) + #cache.add_map( rawstring , concept ) + + if concept: + cache.add_map( rawstring , concept ) else: - cache.add_map( rawstring , None ) + cache.add_map( rawstring , [] ) mappings[rawstring] = cache.get_map( rawstring ) - +# print "rawstring: ", rawstring + # print "mappings: " ,mappings[rawstring] size_s = 0 phrase = [] - #get longest sub string with a mapping - for mapping in mappings.iteritems(): + + #print "mappings: " , mappings + #get longest sub string with a mapping - if( mapping[1] != None ): + for mapping in mappings.iteritems(): + #print mapping + if( mapping[1] ): + # print mapping if( len( mapping[0] ) > size_s ): phrase = [] @@ -140,7 +178,57 @@ def umls_semantic_type_sentence( cache , sentence ): continue if( len(mapping[0]) == size_s ): phrase.append( mapping[1] ) - + +# print "phrase: " , phrase +# print phrase return phrase + +# Get the semantic types for a given word +def get_cui( cache , word ): + + # If hypernyms already in cache + if cache.has_key( word + '--cuis' ): + + cuis = cache.get_map( word + '--cuis' ) + + else: + + # Get cui + cuis = SQLookup.cui_lookup(word) + + # Eliminate duplicates + cuis = list(set(cuis)) + cuis = [c[0] for c in cuis] + + # Store result in cache + cache.add_map( word + '--cuis', cuis ) + + return cuis + + + +# Get the hypernyms of a string +def umls_hypernyms( cache, string ): + + # If hypernyms already in cache + if cache.has_key( string + '--hypernyms' ): + + hyps = cache.get_map( string + '--hypernyms' ) + + else: + + # Get hypernyms + hyps = SQLookup.hypernyms_lookup(string) + + # Eliminate duplicates + #hyps = list(set(hyps)) + #hyps = [cui[0] for cui in hyps] + #hyps = hyps[0] + + # Store result in cache + cache.add_map( string + '--hypernyms' , hyps ) + + return hyps + From 21791c8ebe91758dddb7c1e3c61c569ccba8055c Mon Sep 17 00:00:00 2001 From: Tristan Naumann Date: Tue, 15 Apr 2014 15:10:06 -0400 Subject: [PATCH 037/393] Moving code to package format. --- {code => clicon}/SQLookup.py | 0 {code => clicon}/clicon_features.py | 0 {code => clicon}/clicon_genia_interface.py | 0 {code => clicon}/create_sqliteDB.py | 0 {code => clicon}/evaluate.py | 0 {code => clicon}/features.py | 0 {code => clicon}/helper.py | 0 {code => clicon}/libml.py | 0 {code => clicon}/model.py | 0 {code => clicon}/note.py | 0 {code => clicon}/predict.py | 0 {code => clicon}/runall.py | 0 {code => clicon}/statistics.py | 0 {code => clicon}/train.py | 0 {code => clicon}/umls.py | 0 {code => clicon}/umls_cache.py | 0 {code => clicon}/web.py | 0 {code => clicon}/web/__init__.py | 0 {code => clicon}/web/app.py | 0 .../static/bootstrap/css/bootstrap-responsive.css | 0 .../bootstrap/css/bootstrap-responsive.min.css | 0 .../web/static/bootstrap/css/bootstrap.css | 0 .../web/static/bootstrap/css/bootstrap.min.css | 0 .../bootstrap/img/glyphicons-halflings-white.png | Bin .../static/bootstrap/img/glyphicons-halflings.png | Bin .../web/static/bootstrap/js/bootstrap.js | 0 .../web/static/bootstrap/js/bootstrap.min.js | 0 .../web/static/javascripts/jquery.min.js | 0 .../web/static/stylesheets/application.css | 0 {code => clicon}/web/templates/form.html | 0 {code => clicon}/web/templates/layout.html | 0 {code => clicon}/web/templates/result.html | 0 {code => clicon}/wordshape.py | 0 33 files changed, 0 insertions(+), 0 deletions(-) rename {code => clicon}/SQLookup.py (100%) rename {code => clicon}/clicon_features.py (100%) rename {code => clicon}/clicon_genia_interface.py (100%) rename {code => clicon}/create_sqliteDB.py (100%) rename {code => clicon}/evaluate.py (100%) rename {code => clicon}/features.py (100%) rename {code => clicon}/helper.py (100%) rename {code => clicon}/libml.py (100%) rename {code => clicon}/model.py (100%) rename {code => clicon}/note.py (100%) rename {code => clicon}/predict.py (100%) rename {code => clicon}/runall.py (100%) rename {code => clicon}/statistics.py (100%) rename {code => clicon}/train.py (100%) rename {code => clicon}/umls.py (100%) rename {code => clicon}/umls_cache.py (100%) rename {code => clicon}/web.py (100%) rename {code => clicon}/web/__init__.py (100%) rename {code => clicon}/web/app.py (100%) rename {code => clicon}/web/static/bootstrap/css/bootstrap-responsive.css (100%) rename {code => clicon}/web/static/bootstrap/css/bootstrap-responsive.min.css (100%) rename {code => clicon}/web/static/bootstrap/css/bootstrap.css (100%) rename {code => clicon}/web/static/bootstrap/css/bootstrap.min.css (100%) rename {code => clicon}/web/static/bootstrap/img/glyphicons-halflings-white.png (100%) rename {code => clicon}/web/static/bootstrap/img/glyphicons-halflings.png (100%) rename {code => clicon}/web/static/bootstrap/js/bootstrap.js (100%) rename {code => clicon}/web/static/bootstrap/js/bootstrap.min.js (100%) rename {code => clicon}/web/static/javascripts/jquery.min.js (100%) rename {code => clicon}/web/static/stylesheets/application.css (100%) rename {code => clicon}/web/templates/form.html (100%) rename {code => clicon}/web/templates/layout.html (100%) rename {code => clicon}/web/templates/result.html (100%) rename {code => clicon}/wordshape.py (100%) diff --git a/code/SQLookup.py b/clicon/SQLookup.py similarity index 100% rename from code/SQLookup.py rename to clicon/SQLookup.py diff --git a/code/clicon_features.py b/clicon/clicon_features.py similarity index 100% rename from code/clicon_features.py rename to clicon/clicon_features.py diff --git a/code/clicon_genia_interface.py b/clicon/clicon_genia_interface.py similarity index 100% rename from code/clicon_genia_interface.py rename to clicon/clicon_genia_interface.py diff --git a/code/create_sqliteDB.py b/clicon/create_sqliteDB.py similarity index 100% rename from code/create_sqliteDB.py rename to clicon/create_sqliteDB.py diff --git a/code/evaluate.py b/clicon/evaluate.py similarity index 100% rename from code/evaluate.py rename to clicon/evaluate.py diff --git a/code/features.py b/clicon/features.py similarity index 100% rename from code/features.py rename to clicon/features.py diff --git a/code/helper.py b/clicon/helper.py similarity index 100% rename from code/helper.py rename to clicon/helper.py diff --git a/code/libml.py b/clicon/libml.py similarity index 100% rename from code/libml.py rename to clicon/libml.py diff --git a/code/model.py b/clicon/model.py similarity index 100% rename from code/model.py rename to clicon/model.py diff --git a/code/note.py b/clicon/note.py similarity index 100% rename from code/note.py rename to clicon/note.py diff --git a/code/predict.py b/clicon/predict.py similarity index 100% rename from code/predict.py rename to clicon/predict.py diff --git a/code/runall.py b/clicon/runall.py similarity index 100% rename from code/runall.py rename to clicon/runall.py diff --git a/code/statistics.py b/clicon/statistics.py similarity index 100% rename from code/statistics.py rename to clicon/statistics.py diff --git a/code/train.py b/clicon/train.py similarity index 100% rename from code/train.py rename to clicon/train.py diff --git a/code/umls.py b/clicon/umls.py similarity index 100% rename from code/umls.py rename to clicon/umls.py diff --git a/code/umls_cache.py b/clicon/umls_cache.py similarity index 100% rename from code/umls_cache.py rename to clicon/umls_cache.py diff --git a/code/web.py b/clicon/web.py similarity index 100% rename from code/web.py rename to clicon/web.py diff --git a/code/web/__init__.py b/clicon/web/__init__.py similarity index 100% rename from code/web/__init__.py rename to clicon/web/__init__.py diff --git a/code/web/app.py b/clicon/web/app.py similarity index 100% rename from code/web/app.py rename to clicon/web/app.py diff --git a/code/web/static/bootstrap/css/bootstrap-responsive.css b/clicon/web/static/bootstrap/css/bootstrap-responsive.css similarity index 100% rename from code/web/static/bootstrap/css/bootstrap-responsive.css rename to clicon/web/static/bootstrap/css/bootstrap-responsive.css diff --git a/code/web/static/bootstrap/css/bootstrap-responsive.min.css b/clicon/web/static/bootstrap/css/bootstrap-responsive.min.css similarity index 100% rename from code/web/static/bootstrap/css/bootstrap-responsive.min.css rename to clicon/web/static/bootstrap/css/bootstrap-responsive.min.css diff --git a/code/web/static/bootstrap/css/bootstrap.css b/clicon/web/static/bootstrap/css/bootstrap.css similarity index 100% rename from code/web/static/bootstrap/css/bootstrap.css rename to clicon/web/static/bootstrap/css/bootstrap.css diff --git a/code/web/static/bootstrap/css/bootstrap.min.css b/clicon/web/static/bootstrap/css/bootstrap.min.css similarity index 100% rename from code/web/static/bootstrap/css/bootstrap.min.css rename to clicon/web/static/bootstrap/css/bootstrap.min.css diff --git a/code/web/static/bootstrap/img/glyphicons-halflings-white.png b/clicon/web/static/bootstrap/img/glyphicons-halflings-white.png similarity index 100% rename from code/web/static/bootstrap/img/glyphicons-halflings-white.png rename to clicon/web/static/bootstrap/img/glyphicons-halflings-white.png diff --git a/code/web/static/bootstrap/img/glyphicons-halflings.png b/clicon/web/static/bootstrap/img/glyphicons-halflings.png similarity index 100% rename from code/web/static/bootstrap/img/glyphicons-halflings.png rename to clicon/web/static/bootstrap/img/glyphicons-halflings.png diff --git a/code/web/static/bootstrap/js/bootstrap.js b/clicon/web/static/bootstrap/js/bootstrap.js similarity index 100% rename from code/web/static/bootstrap/js/bootstrap.js rename to clicon/web/static/bootstrap/js/bootstrap.js diff --git a/code/web/static/bootstrap/js/bootstrap.min.js b/clicon/web/static/bootstrap/js/bootstrap.min.js similarity index 100% rename from code/web/static/bootstrap/js/bootstrap.min.js rename to clicon/web/static/bootstrap/js/bootstrap.min.js diff --git a/code/web/static/javascripts/jquery.min.js b/clicon/web/static/javascripts/jquery.min.js similarity index 100% rename from code/web/static/javascripts/jquery.min.js rename to clicon/web/static/javascripts/jquery.min.js diff --git a/code/web/static/stylesheets/application.css b/clicon/web/static/stylesheets/application.css similarity index 100% rename from code/web/static/stylesheets/application.css rename to clicon/web/static/stylesheets/application.css diff --git a/code/web/templates/form.html b/clicon/web/templates/form.html similarity index 100% rename from code/web/templates/form.html rename to clicon/web/templates/form.html diff --git a/code/web/templates/layout.html b/clicon/web/templates/layout.html similarity index 100% rename from code/web/templates/layout.html rename to clicon/web/templates/layout.html diff --git a/code/web/templates/result.html b/clicon/web/templates/result.html similarity index 100% rename from code/web/templates/result.html rename to clicon/web/templates/result.html diff --git a/code/wordshape.py b/clicon/wordshape.py similarity index 100% rename from code/wordshape.py rename to clicon/wordshape.py From 80ed6622b8b80b631b3b14a27c2437c2b275d709 Mon Sep 17 00:00:00 2001 From: Tristan Naumann Date: Tue, 15 Apr 2014 15:12:19 -0400 Subject: [PATCH 038/393] Removing web demo support. --- clicon/web.py | 4 - clicon/web/__init__.py | 1 - clicon/web/app.py | 75 - .../bootstrap/css/bootstrap-responsive.css | 1092 --- .../css/bootstrap-responsive.min.css | 9 - clicon/web/static/bootstrap/css/bootstrap.css | 6039 ----------------- .../static/bootstrap/css/bootstrap.min.css | 9 - .../img/glyphicons-halflings-white.png | Bin 8777 -> 0 bytes .../bootstrap/img/glyphicons-halflings.png | Bin 12799 -> 0 bytes clicon/web/static/bootstrap/js/bootstrap.js | 2159 ------ .../web/static/bootstrap/js/bootstrap.min.js | 6 - clicon/web/static/javascripts/jquery.min.js | 2 - clicon/web/static/stylesheets/application.css | 18 - clicon/web/templates/form.html | 24 - clicon/web/templates/layout.html | 26 - clicon/web/templates/result.html | 29 - 16 files changed, 9493 deletions(-) delete mode 100644 clicon/web.py delete mode 100644 clicon/web/__init__.py delete mode 100644 clicon/web/app.py delete mode 100644 clicon/web/static/bootstrap/css/bootstrap-responsive.css delete mode 100644 clicon/web/static/bootstrap/css/bootstrap-responsive.min.css delete mode 100644 clicon/web/static/bootstrap/css/bootstrap.css delete mode 100644 clicon/web/static/bootstrap/css/bootstrap.min.css delete mode 100644 clicon/web/static/bootstrap/img/glyphicons-halflings-white.png delete mode 100644 clicon/web/static/bootstrap/img/glyphicons-halflings.png delete mode 100644 clicon/web/static/bootstrap/js/bootstrap.js delete mode 100644 clicon/web/static/bootstrap/js/bootstrap.min.js delete mode 100644 clicon/web/static/javascripts/jquery.min.js delete mode 100644 clicon/web/static/stylesheets/application.css delete mode 100644 clicon/web/templates/form.html delete mode 100644 clicon/web/templates/layout.html delete mode 100644 clicon/web/templates/result.html diff --git a/clicon/web.py b/clicon/web.py deleted file mode 100644 index 4f9169e..0000000 --- a/clicon/web.py +++ /dev/null @@ -1,4 +0,0 @@ -import web.app - -if __name__ == '__main__': - web.app.run() diff --git a/clicon/web/__init__.py b/clicon/web/__init__.py deleted file mode 100644 index e7c9954..0000000 --- a/clicon/web/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Intentionally blank \ No newline at end of file diff --git a/clicon/web/app.py b/clicon/web/app.py deleted file mode 100644 index 8944ac2..0000000 --- a/clicon/web/app.py +++ /dev/null @@ -1,75 +0,0 @@ -import os -import os.path -import nltk -import glob -import json -import libml - -from sets import Set -from model import Model -from flask import Flask -from flask import render_template -from flask import request - -app = Flask(__name__) - -models_directory = os.path.join(os.path.dirname(__file__), "../../models") - -@app.route('/') -def index(): - items = [] - - models = glob.glob(os.path.join(models_directory, '*')) - - for model in models: - name = os.path.basename(model) - available_models = Set(os.listdir(model)) - - if "model" not in available_models: - continue - - for type in ["svm", "crf", "lin"]: - if "model." + str(type) in available_models: - properties = { - "name": name, - "type": type - } - items.append((json.dumps(properties), name + " - " + type.upper())) - - - items = sorted(items, key = lambda t: t[1].lower()) - return render_template("form.html", models = items) - -@app.route('/process', methods=['POST', 'GET']) -def process(): - data = request.form['input'] - data = nltk.sent_tokenize(data) - data = map(nltk.word_tokenize, data) - - properties = request.form['model'] - properties = json.loads(properties) - - model = Model.load(os.path.join(models_directory, properties["name"], "model")) - labels = model.predict(data) - output = None - - - if properties["type"] == "svm": - output = labels[libml.SVM] - elif properties["type"] == "crf": - output = labels[libml.CRF] - elif properties["type"] == "lin": - output = labels[libml.LIN] - - output = sum(output, []) - data = sum(data, []) - output = zip(data, output) - - return render_template("result.html", input = request.form["input"], model = properties["name"] + " - " + properties["type"].upper(), output = output) - -def run(): - app.debug = True - app.run(host = "0.0.0.0") - -if __name__ == '__main__': - run() diff --git a/clicon/web/static/bootstrap/css/bootstrap-responsive.css b/clicon/web/static/bootstrap/css/bootstrap-responsive.css deleted file mode 100644 index a3352d7..0000000 --- a/clicon/web/static/bootstrap/css/bootstrap-responsive.css +++ /dev/null @@ -1,1092 +0,0 @@ -/*! - * Bootstrap Responsive v2.2.2 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */ - -@-ms-viewport { - width: device-width; -} - -.clearfix { - *zoom: 1; -} - -.clearfix:before, -.clearfix:after { - display: table; - line-height: 0; - content: ""; -} - -.clearfix:after { - clear: both; -} - -.hide-text { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} - -.input-block-level { - display: block; - width: 100%; - min-height: 30px; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -.hidden { - display: none; - visibility: hidden; -} - -.visible-phone { - display: none !important; -} - -.visible-tablet { - display: none !important; -} - -.hidden-desktop { - display: none !important; -} - -.visible-desktop { - display: inherit !important; -} - -@media (min-width: 768px) and (max-width: 979px) { - .hidden-desktop { - display: inherit !important; - } - .visible-desktop { - display: none !important ; - } - .visible-tablet { - display: inherit !important; - } - .hidden-tablet { - display: none !important; - } -} - -@media (max-width: 767px) { - .hidden-desktop { - display: inherit !important; - } - .visible-desktop { - display: none !important; - } - .visible-phone { - display: inherit !important; - } - .hidden-phone { - display: none !important; - } -} - -@media (min-width: 1200px) { - .row { - margin-left: -30px; - *zoom: 1; - } - .row:before, - .row:after { - display: table; - line-height: 0; - content: ""; - } - .row:after { - clear: both; - } - [class*="span"] { - float: left; - min-height: 1px; - margin-left: 30px; - } - .container, - .navbar-static-top .container, - .navbar-fixed-top .container, - .navbar-fixed-bottom .container { - width: 1170px; - } - .span12 { - width: 1170px; - } - .span11 { - width: 1070px; - } - .span10 { - width: 970px; - } - .span9 { - width: 870px; - } - .span8 { - width: 770px; - } - .span7 { - width: 670px; - } - .span6 { - width: 570px; - } - .span5 { - width: 470px; - } - .span4 { - width: 370px; - } - .span3 { - width: 270px; - } - .span2 { - width: 170px; - } - .span1 { - width: 70px; - } - .offset12 { - margin-left: 1230px; - } - .offset11 { - margin-left: 1130px; - } - .offset10 { - margin-left: 1030px; - } - .offset9 { - margin-left: 930px; - } - .offset8 { - margin-left: 830px; - } - .offset7 { - margin-left: 730px; - } - .offset6 { - margin-left: 630px; - } - .offset5 { - margin-left: 530px; - } - .offset4 { - margin-left: 430px; - } - .offset3 { - margin-left: 330px; - } - .offset2 { - margin-left: 230px; - } - .offset1 { - margin-left: 130px; - } - .row-fluid { - width: 100%; - *zoom: 1; - } - .row-fluid:before, - .row-fluid:after { - display: table; - line-height: 0; - content: ""; - } - .row-fluid:after { - clear: both; - } - .row-fluid [class*="span"] { - display: block; - float: left; - width: 100%; - min-height: 30px; - margin-left: 2.564102564102564%; - *margin-left: 2.5109110747408616%; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - } - .row-fluid [class*="span"]:first-child { - margin-left: 0; - } - .row-fluid .controls-row [class*="span"] + [class*="span"] { - margin-left: 2.564102564102564%; - } - .row-fluid .span12 { - width: 100%; - *width: 99.94680851063829%; - } - .row-fluid .span11 { - width: 91.45299145299145%; - *width: 91.39979996362975%; - } - .row-fluid .span10 { - width: 82.90598290598291%; - *width: 82.8527914166212%; - } - .row-fluid .span9 { - width: 74.35897435897436%; - *width: 74.30578286961266%; - } - .row-fluid .span8 { - width: 65.81196581196582%; - *width: 65.75877432260411%; - } - .row-fluid .span7 { - width: 57.26495726495726%; - *width: 57.21176577559556%; - } - .row-fluid .span6 { - width: 48.717948717948715%; - *width: 48.664757228587014%; - } - .row-fluid .span5 { - width: 40.17094017094017%; - *width: 40.11774868157847%; - } - .row-fluid .span4 { - width: 31.623931623931625%; - *width: 31.570740134569924%; - } - .row-fluid .span3 { - width: 23.076923076923077%; - *width: 23.023731587561375%; - } - .row-fluid .span2 { - width: 14.52991452991453%; - *width: 14.476723040552828%; - } - .row-fluid .span1 { - width: 5.982905982905983%; - *width: 5.929714493544281%; - } - .row-fluid .offset12 { - margin-left: 105.12820512820512%; - *margin-left: 105.02182214948171%; - } - .row-fluid .offset12:first-child { - margin-left: 102.56410256410257%; - *margin-left: 102.45771958537915%; - } - .row-fluid .offset11 { - margin-left: 96.58119658119658%; - *margin-left: 96.47481360247316%; - } - .row-fluid .offset11:first-child { - margin-left: 94.01709401709402%; - *margin-left: 93.91071103837061%; - } - .row-fluid .offset10 { - margin-left: 88.03418803418803%; - *margin-left: 87.92780505546462%; - } - .row-fluid .offset10:first-child { - margin-left: 85.47008547008548%; - *margin-left: 85.36370249136206%; - } - .row-fluid .offset9 { - margin-left: 79.48717948717949%; - *margin-left: 79.38079650845607%; - } - .row-fluid .offset9:first-child { - margin-left: 76.92307692307693%; - *margin-left: 76.81669394435352%; - } - .row-fluid .offset8 { - margin-left: 70.94017094017094%; - *margin-left: 70.83378796144753%; - } - .row-fluid .offset8:first-child { - margin-left: 68.37606837606839%; - *margin-left: 68.26968539734497%; - } - .row-fluid .offset7 { - margin-left: 62.393162393162385%; - *margin-left: 62.28677941443899%; - } - .row-fluid .offset7:first-child { - margin-left: 59.82905982905982%; - *margin-left: 59.72267685033642%; - } - .row-fluid .offset6 { - margin-left: 53.84615384615384%; - *margin-left: 53.739770867430444%; - } - .row-fluid .offset6:first-child { - margin-left: 51.28205128205128%; - *margin-left: 51.175668303327875%; - } - .row-fluid .offset5 { - margin-left: 45.299145299145295%; - *margin-left: 45.1927623204219%; - } - .row-fluid .offset5:first-child { - margin-left: 42.73504273504273%; - *margin-left: 42.62865975631933%; - } - .row-fluid .offset4 { - margin-left: 36.75213675213675%; - *margin-left: 36.645753773413354%; - } - .row-fluid .offset4:first-child { - margin-left: 34.18803418803419%; - *margin-left: 34.081651209310785%; - } - .row-fluid .offset3 { - margin-left: 28.205128205128204%; - *margin-left: 28.0987452264048%; - } - .row-fluid .offset3:first-child { - margin-left: 25.641025641025642%; - *margin-left: 25.53464266230224%; - } - .row-fluid .offset2 { - margin-left: 19.65811965811966%; - *margin-left: 19.551736679396257%; - } - .row-fluid .offset2:first-child { - margin-left: 17.094017094017094%; - *margin-left: 16.98763411529369%; - } - .row-fluid .offset1 { - margin-left: 11.11111111111111%; - *margin-left: 11.004728132387708%; - } - .row-fluid .offset1:first-child { - margin-left: 8.547008547008547%; - *margin-left: 8.440625568285142%; - } - input, - textarea, - .uneditable-input { - margin-left: 0; - } - .controls-row [class*="span"] + [class*="span"] { - margin-left: 30px; - } - input.span12, - textarea.span12, - .uneditable-input.span12 { - width: 1156px; - } - input.span11, - textarea.span11, - .uneditable-input.span11 { - width: 1056px; - } - input.span10, - textarea.span10, - .uneditable-input.span10 { - width: 956px; - } - input.span9, - textarea.span9, - .uneditable-input.span9 { - width: 856px; - } - input.span8, - textarea.span8, - .uneditable-input.span8 { - width: 756px; - } - input.span7, - textarea.span7, - .uneditable-input.span7 { - width: 656px; - } - input.span6, - textarea.span6, - .uneditable-input.span6 { - width: 556px; - } - input.span5, - textarea.span5, - .uneditable-input.span5 { - width: 456px; - } - input.span4, - textarea.span4, - .uneditable-input.span4 { - width: 356px; - } - input.span3, - textarea.span3, - .uneditable-input.span3 { - width: 256px; - } - input.span2, - textarea.span2, - .uneditable-input.span2 { - width: 156px; - } - input.span1, - textarea.span1, - .uneditable-input.span1 { - width: 56px; - } - .thumbnails { - margin-left: -30px; - } - .thumbnails > li { - margin-left: 30px; - } - .row-fluid .thumbnails { - margin-left: 0; - } -} - -@media (min-width: 768px) and (max-width: 979px) { - .row { - margin-left: -20px; - *zoom: 1; - } - .row:before, - .row:after { - display: table; - line-height: 0; - content: ""; - } - .row:after { - clear: both; - } - [class*="span"] { - float: left; - min-height: 1px; - margin-left: 20px; - } - .container, - .navbar-static-top .container, - .navbar-fixed-top .container, - .navbar-fixed-bottom .container { - width: 724px; - } - .span12 { - width: 724px; - } - .span11 { - width: 662px; - } - .span10 { - width: 600px; - } - .span9 { - width: 538px; - } - .span8 { - width: 476px; - } - .span7 { - width: 414px; - } - .span6 { - width: 352px; - } - .span5 { - width: 290px; - } - .span4 { - width: 228px; - } - .span3 { - width: 166px; - } - .span2 { - width: 104px; - } - .span1 { - width: 42px; - } - .offset12 { - margin-left: 764px; - } - .offset11 { - margin-left: 702px; - } - .offset10 { - margin-left: 640px; - } - .offset9 { - margin-left: 578px; - } - .offset8 { - margin-left: 516px; - } - .offset7 { - margin-left: 454px; - } - .offset6 { - margin-left: 392px; - } - .offset5 { - margin-left: 330px; - } - .offset4 { - margin-left: 268px; - } - .offset3 { - margin-left: 206px; - } - .offset2 { - margin-left: 144px; - } - .offset1 { - margin-left: 82px; - } - .row-fluid { - width: 100%; - *zoom: 1; - } - .row-fluid:before, - .row-fluid:after { - display: table; - line-height: 0; - content: ""; - } - .row-fluid:after { - clear: both; - } - .row-fluid [class*="span"] { - display: block; - float: left; - width: 100%; - min-height: 30px; - margin-left: 2.7624309392265194%; - *margin-left: 2.709239449864817%; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - } - .row-fluid [class*="span"]:first-child { - margin-left: 0; - } - .row-fluid .controls-row [class*="span"] + [class*="span"] { - margin-left: 2.7624309392265194%; - } - .row-fluid .span12 { - width: 100%; - *width: 99.94680851063829%; - } - .row-fluid .span11 { - width: 91.43646408839778%; - *width: 91.38327259903608%; - } - .row-fluid .span10 { - width: 82.87292817679558%; - *width: 82.81973668743387%; - } - .row-fluid .span9 { - width: 74.30939226519337%; - *width: 74.25620077583166%; - } - .row-fluid .span8 { - width: 65.74585635359117%; - *width: 65.69266486422946%; - } - .row-fluid .span7 { - width: 57.18232044198895%; - *width: 57.12912895262725%; - } - .row-fluid .span6 { - width: 48.61878453038674%; - *width: 48.56559304102504%; - } - .row-fluid .span5 { - width: 40.05524861878453%; - *width: 40.00205712942283%; - } - .row-fluid .span4 { - width: 31.491712707182323%; - *width: 31.43852121782062%; - } - .row-fluid .span3 { - width: 22.92817679558011%; - *width: 22.87498530621841%; - } - .row-fluid .span2 { - width: 14.3646408839779%; - *width: 14.311449394616199%; - } - .row-fluid .span1 { - width: 5.801104972375691%; - *width: 5.747913483013988%; - } - .row-fluid .offset12 { - margin-left: 105.52486187845304%; - *margin-left: 105.41847889972962%; - } - .row-fluid .offset12:first-child { - margin-left: 102.76243093922652%; - *margin-left: 102.6560479605031%; - } - .row-fluid .offset11 { - margin-left: 96.96132596685082%; - *margin-left: 96.8549429881274%; - } - .row-fluid .offset11:first-child { - margin-left: 94.1988950276243%; - *margin-left: 94.09251204890089%; - } - .row-fluid .offset10 { - margin-left: 88.39779005524862%; - *margin-left: 88.2914070765252%; - } - .row-fluid .offset10:first-child { - margin-left: 85.6353591160221%; - *margin-left: 85.52897613729868%; - } - .row-fluid .offset9 { - margin-left: 79.8342541436464%; - *margin-left: 79.72787116492299%; - } - .row-fluid .offset9:first-child { - margin-left: 77.07182320441989%; - *margin-left: 76.96544022569647%; - } - .row-fluid .offset8 { - margin-left: 71.2707182320442%; - *margin-left: 71.16433525332079%; - } - .row-fluid .offset8:first-child { - margin-left: 68.50828729281768%; - *margin-left: 68.40190431409427%; - } - .row-fluid .offset7 { - margin-left: 62.70718232044199%; - *margin-left: 62.600799341718584%; - } - .row-fluid .offset7:first-child { - margin-left: 59.94475138121547%; - *margin-left: 59.838368402492065%; - } - .row-fluid .offset6 { - margin-left: 54.14364640883978%; - *margin-left: 54.037263430116376%; - } - .row-fluid .offset6:first-child { - margin-left: 51.38121546961326%; - *margin-left: 51.27483249088986%; - } - .row-fluid .offset5 { - margin-left: 45.58011049723757%; - *margin-left: 45.47372751851417%; - } - .row-fluid .offset5:first-child { - margin-left: 42.81767955801105%; - *margin-left: 42.71129657928765%; - } - .row-fluid .offset4 { - margin-left: 37.01657458563536%; - *margin-left: 36.91019160691196%; - } - .row-fluid .offset4:first-child { - margin-left: 34.25414364640884%; - *margin-left: 34.14776066768544%; - } - .row-fluid .offset3 { - margin-left: 28.45303867403315%; - *margin-left: 28.346655695309746%; - } - .row-fluid .offset3:first-child { - margin-left: 25.69060773480663%; - *margin-left: 25.584224756083227%; - } - .row-fluid .offset2 { - margin-left: 19.88950276243094%; - *margin-left: 19.783119783707537%; - } - .row-fluid .offset2:first-child { - margin-left: 17.12707182320442%; - *margin-left: 17.02068884448102%; - } - .row-fluid .offset1 { - margin-left: 11.32596685082873%; - *margin-left: 11.219583872105325%; - } - .row-fluid .offset1:first-child { - margin-left: 8.56353591160221%; - *margin-left: 8.457152932878806%; - } - input, - textarea, - .uneditable-input { - margin-left: 0; - } - .controls-row [class*="span"] + [class*="span"] { - margin-left: 20px; - } - input.span12, - textarea.span12, - .uneditable-input.span12 { - width: 710px; - } - input.span11, - textarea.span11, - .uneditable-input.span11 { - width: 648px; - } - input.span10, - textarea.span10, - .uneditable-input.span10 { - width: 586px; - } - input.span9, - textarea.span9, - .uneditable-input.span9 { - width: 524px; - } - input.span8, - textarea.span8, - .uneditable-input.span8 { - width: 462px; - } - input.span7, - textarea.span7, - .uneditable-input.span7 { - width: 400px; - } - input.span6, - textarea.span6, - .uneditable-input.span6 { - width: 338px; - } - input.span5, - textarea.span5, - .uneditable-input.span5 { - width: 276px; - } - input.span4, - textarea.span4, - .uneditable-input.span4 { - width: 214px; - } - input.span3, - textarea.span3, - .uneditable-input.span3 { - width: 152px; - } - input.span2, - textarea.span2, - .uneditable-input.span2 { - width: 90px; - } - input.span1, - textarea.span1, - .uneditable-input.span1 { - width: 28px; - } -} - -@media (max-width: 767px) { - body { - padding-right: 20px; - padding-left: 20px; - } - .navbar-fixed-top, - .navbar-fixed-bottom, - .navbar-static-top { - margin-right: -20px; - margin-left: -20px; - } - .container-fluid { - padding: 0; - } - .dl-horizontal dt { - float: none; - width: auto; - clear: none; - text-align: left; - } - .dl-horizontal dd { - margin-left: 0; - } - .container { - width: auto; - } - .row-fluid { - width: 100%; - } - .row, - .thumbnails { - margin-left: 0; - } - .thumbnails > li { - float: none; - margin-left: 0; - } - [class*="span"], - .uneditable-input[class*="span"], - .row-fluid [class*="span"] { - display: block; - float: none; - width: 100%; - margin-left: 0; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - } - .span12, - .row-fluid .span12 { - width: 100%; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - } - .row-fluid [class*="offset"]:first-child { - margin-left: 0; - } - .input-large, - .input-xlarge, - .input-xxlarge, - input[class*="span"], - select[class*="span"], - textarea[class*="span"], - .uneditable-input { - display: block; - width: 100%; - min-height: 30px; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - } - .input-prepend input, - .input-append input, - .input-prepend input[class*="span"], - .input-append input[class*="span"] { - display: inline-block; - width: auto; - } - .controls-row [class*="span"] + [class*="span"] { - margin-left: 0; - } - .modal { - position: fixed; - top: 20px; - right: 20px; - left: 20px; - width: auto; - margin: 0; - } - .modal.fade { - top: -100px; - } - .modal.fade.in { - top: 20px; - } -} - -@media (max-width: 480px) { - .nav-collapse { - -webkit-transform: translate3d(0, 0, 0); - } - .page-header h1 small { - display: block; - line-height: 20px; - } - input[type="checkbox"], - input[type="radio"] { - border: 1px solid #ccc; - } - .form-horizontal .control-label { - float: none; - width: auto; - padding-top: 0; - text-align: left; - } - .form-horizontal .controls { - margin-left: 0; - } - .form-horizontal .control-list { - padding-top: 0; - } - .form-horizontal .form-actions { - padding-right: 10px; - padding-left: 10px; - } - .media .pull-left, - .media .pull-right { - display: block; - float: none; - margin-bottom: 10px; - } - .media-object { - margin-right: 0; - margin-left: 0; - } - .modal { - top: 10px; - right: 10px; - left: 10px; - } - .modal-header .close { - padding: 10px; - margin: -10px; - } - .carousel-caption { - position: static; - } -} - -@media (max-width: 979px) { - body { - padding-top: 0; - } - .navbar-fixed-top, - .navbar-fixed-bottom { - position: static; - } - .navbar-fixed-top { - margin-bottom: 20px; - } - .navbar-fixed-bottom { - margin-top: 20px; - } - .navbar-fixed-top .navbar-inner, - .navbar-fixed-bottom .navbar-inner { - padding: 5px; - } - .navbar .container { - width: auto; - padding: 0; - } - .navbar .brand { - padding-right: 10px; - padding-left: 10px; - margin: 0 0 0 -5px; - } - .nav-collapse { - clear: both; - } - .nav-collapse .nav { - float: none; - margin: 0 0 10px; - } - .nav-collapse .nav > li { - float: none; - } - .nav-collapse .nav > li > a { - margin-bottom: 2px; - } - .nav-collapse .nav > .divider-vertical { - display: none; - } - .nav-collapse .nav .nav-header { - color: #777777; - text-shadow: none; - } - .nav-collapse .nav > li > a, - .nav-collapse .dropdown-menu a { - padding: 9px 15px; - font-weight: bold; - color: #777777; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; - } - .nav-collapse .btn { - padding: 4px 10px 4px; - font-weight: normal; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - } - .nav-collapse .dropdown-menu li + li a { - margin-bottom: 2px; - } - .nav-collapse .nav > li > a:hover, - .nav-collapse .dropdown-menu a:hover { - background-color: #f2f2f2; - } - .navbar-inverse .nav-collapse .nav > li > a, - .navbar-inverse .nav-collapse .dropdown-menu a { - color: #999999; - } - .navbar-inverse .nav-collapse .nav > li > a:hover, - .navbar-inverse .nav-collapse .dropdown-menu a:hover { - background-color: #111111; - } - .nav-collapse.in .btn-group { - padding: 0; - margin-top: 5px; - } - .nav-collapse .dropdown-menu { - position: static; - top: auto; - left: auto; - display: none; - float: none; - max-width: none; - padding: 0; - margin: 0 15px; - background-color: transparent; - border: none; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; - -webkit-box-shadow: none; - -moz-box-shadow: none; - box-shadow: none; - } - .nav-collapse .open > .dropdown-menu { - display: block; - } - .nav-collapse .dropdown-menu:before, - .nav-collapse .dropdown-menu:after { - display: none; - } - .nav-collapse .dropdown-menu .divider { - display: none; - } - .nav-collapse .nav > li > .dropdown-menu:before, - .nav-collapse .nav > li > .dropdown-menu:after { - display: none; - } - .nav-collapse .navbar-form, - .nav-collapse .navbar-search { - float: none; - padding: 10px 15px; - margin: 10px 0; - border-top: 1px solid #f2f2f2; - border-bottom: 1px solid #f2f2f2; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - } - .navbar-inverse .nav-collapse .navbar-form, - .navbar-inverse .nav-collapse .navbar-search { - border-top-color: #111111; - border-bottom-color: #111111; - } - .navbar .nav-collapse .nav.pull-right { - float: none; - margin-left: 0; - } - .nav-collapse, - .nav-collapse.collapse { - height: 0; - overflow: hidden; - } - .navbar .btn-navbar { - display: block; - } - .navbar-static .navbar-inner { - padding-right: 10px; - padding-left: 10px; - } -} - -@media (min-width: 980px) { - .nav-collapse.collapse { - height: auto !important; - overflow: visible !important; - } -} diff --git a/clicon/web/static/bootstrap/css/bootstrap-responsive.min.css b/clicon/web/static/bootstrap/css/bootstrap-responsive.min.css deleted file mode 100644 index 5cb833f..0000000 --- a/clicon/web/static/bootstrap/css/bootstrap-responsive.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Bootstrap Responsive v2.2.2 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */@-ms-viewport{width:device-width}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.hidden{display:none;visibility:hidden}.visible-phone{display:none!important}.visible-tablet{display:none!important}.hidden-desktop{display:none!important}.visible-desktop{display:inherit!important}@media(min-width:768px) and (max-width:979px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-tablet{display:inherit!important}.hidden-tablet{display:none!important}}@media(max-width:767px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-phone{display:inherit!important}.hidden-phone{display:none!important}}@media(min-width:1200px){.row{margin-left:-30px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;min-height:1px;margin-left:30px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:1170px}.span12{width:1170px}.span11{width:1070px}.span10{width:970px}.span9{width:870px}.span8{width:770px}.span7{width:670px}.span6{width:570px}.span5{width:470px}.span4{width:370px}.span3{width:270px}.span2{width:170px}.span1{width:70px}.offset12{margin-left:1230px}.offset11{margin-left:1130px}.offset10{margin-left:1030px}.offset9{margin-left:930px}.offset8{margin-left:830px}.offset7{margin-left:730px}.offset6{margin-left:630px}.offset5{margin-left:530px}.offset4{margin-left:430px}.offset3{margin-left:330px}.offset2{margin-left:230px}.offset1{margin-left:130px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.564102564102564%;*margin-left:2.5109110747408616%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .controls-row [class*="span"]+[class*="span"]{margin-left:2.564102564102564%}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.45299145299145%;*width:91.39979996362975%}.row-fluid .span10{width:82.90598290598291%;*width:82.8527914166212%}.row-fluid .span9{width:74.35897435897436%;*width:74.30578286961266%}.row-fluid .span8{width:65.81196581196582%;*width:65.75877432260411%}.row-fluid .span7{width:57.26495726495726%;*width:57.21176577559556%}.row-fluid .span6{width:48.717948717948715%;*width:48.664757228587014%}.row-fluid .span5{width:40.17094017094017%;*width:40.11774868157847%}.row-fluid .span4{width:31.623931623931625%;*width:31.570740134569924%}.row-fluid .span3{width:23.076923076923077%;*width:23.023731587561375%}.row-fluid .span2{width:14.52991452991453%;*width:14.476723040552828%}.row-fluid .span1{width:5.982905982905983%;*width:5.929714493544281%}.row-fluid .offset12{margin-left:105.12820512820512%;*margin-left:105.02182214948171%}.row-fluid .offset12:first-child{margin-left:102.56410256410257%;*margin-left:102.45771958537915%}.row-fluid .offset11{margin-left:96.58119658119658%;*margin-left:96.47481360247316%}.row-fluid .offset11:first-child{margin-left:94.01709401709402%;*margin-left:93.91071103837061%}.row-fluid .offset10{margin-left:88.03418803418803%;*margin-left:87.92780505546462%}.row-fluid .offset10:first-child{margin-left:85.47008547008548%;*margin-left:85.36370249136206%}.row-fluid .offset9{margin-left:79.48717948717949%;*margin-left:79.38079650845607%}.row-fluid .offset9:first-child{margin-left:76.92307692307693%;*margin-left:76.81669394435352%}.row-fluid .offset8{margin-left:70.94017094017094%;*margin-left:70.83378796144753%}.row-fluid .offset8:first-child{margin-left:68.37606837606839%;*margin-left:68.26968539734497%}.row-fluid .offset7{margin-left:62.393162393162385%;*margin-left:62.28677941443899%}.row-fluid .offset7:first-child{margin-left:59.82905982905982%;*margin-left:59.72267685033642%}.row-fluid .offset6{margin-left:53.84615384615384%;*margin-left:53.739770867430444%}.row-fluid .offset6:first-child{margin-left:51.28205128205128%;*margin-left:51.175668303327875%}.row-fluid .offset5{margin-left:45.299145299145295%;*margin-left:45.1927623204219%}.row-fluid .offset5:first-child{margin-left:42.73504273504273%;*margin-left:42.62865975631933%}.row-fluid .offset4{margin-left:36.75213675213675%;*margin-left:36.645753773413354%}.row-fluid .offset4:first-child{margin-left:34.18803418803419%;*margin-left:34.081651209310785%}.row-fluid .offset3{margin-left:28.205128205128204%;*margin-left:28.0987452264048%}.row-fluid .offset3:first-child{margin-left:25.641025641025642%;*margin-left:25.53464266230224%}.row-fluid .offset2{margin-left:19.65811965811966%;*margin-left:19.551736679396257%}.row-fluid .offset2:first-child{margin-left:17.094017094017094%;*margin-left:16.98763411529369%}.row-fluid .offset1{margin-left:11.11111111111111%;*margin-left:11.004728132387708%}.row-fluid .offset1:first-child{margin-left:8.547008547008547%;*margin-left:8.440625568285142%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:30px}input.span12,textarea.span12,.uneditable-input.span12{width:1156px}input.span11,textarea.span11,.uneditable-input.span11{width:1056px}input.span10,textarea.span10,.uneditable-input.span10{width:956px}input.span9,textarea.span9,.uneditable-input.span9{width:856px}input.span8,textarea.span8,.uneditable-input.span8{width:756px}input.span7,textarea.span7,.uneditable-input.span7{width:656px}input.span6,textarea.span6,.uneditable-input.span6{width:556px}input.span5,textarea.span5,.uneditable-input.span5{width:456px}input.span4,textarea.span4,.uneditable-input.span4{width:356px}input.span3,textarea.span3,.uneditable-input.span3{width:256px}input.span2,textarea.span2,.uneditable-input.span2{width:156px}input.span1,textarea.span1,.uneditable-input.span1{width:56px}.thumbnails{margin-left:-30px}.thumbnails>li{margin-left:30px}.row-fluid .thumbnails{margin-left:0}}@media(min-width:768px) and (max-width:979px){.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;min-height:1px;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:724px}.span12{width:724px}.span11{width:662px}.span10{width:600px}.span9{width:538px}.span8{width:476px}.span7{width:414px}.span6{width:352px}.span5{width:290px}.span4{width:228px}.span3{width:166px}.span2{width:104px}.span1{width:42px}.offset12{margin-left:764px}.offset11{margin-left:702px}.offset10{margin-left:640px}.offset9{margin-left:578px}.offset8{margin-left:516px}.offset7{margin-left:454px}.offset6{margin-left:392px}.offset5{margin-left:330px}.offset4{margin-left:268px}.offset3{margin-left:206px}.offset2{margin-left:144px}.offset1{margin-left:82px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.7624309392265194%;*margin-left:2.709239449864817%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .controls-row [class*="span"]+[class*="span"]{margin-left:2.7624309392265194%}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.43646408839778%;*width:91.38327259903608%}.row-fluid .span10{width:82.87292817679558%;*width:82.81973668743387%}.row-fluid .span9{width:74.30939226519337%;*width:74.25620077583166%}.row-fluid .span8{width:65.74585635359117%;*width:65.69266486422946%}.row-fluid .span7{width:57.18232044198895%;*width:57.12912895262725%}.row-fluid .span6{width:48.61878453038674%;*width:48.56559304102504%}.row-fluid .span5{width:40.05524861878453%;*width:40.00205712942283%}.row-fluid .span4{width:31.491712707182323%;*width:31.43852121782062%}.row-fluid .span3{width:22.92817679558011%;*width:22.87498530621841%}.row-fluid .span2{width:14.3646408839779%;*width:14.311449394616199%}.row-fluid .span1{width:5.801104972375691%;*width:5.747913483013988%}.row-fluid .offset12{margin-left:105.52486187845304%;*margin-left:105.41847889972962%}.row-fluid .offset12:first-child{margin-left:102.76243093922652%;*margin-left:102.6560479605031%}.row-fluid .offset11{margin-left:96.96132596685082%;*margin-left:96.8549429881274%}.row-fluid .offset11:first-child{margin-left:94.1988950276243%;*margin-left:94.09251204890089%}.row-fluid .offset10{margin-left:88.39779005524862%;*margin-left:88.2914070765252%}.row-fluid .offset10:first-child{margin-left:85.6353591160221%;*margin-left:85.52897613729868%}.row-fluid .offset9{margin-left:79.8342541436464%;*margin-left:79.72787116492299%}.row-fluid .offset9:first-child{margin-left:77.07182320441989%;*margin-left:76.96544022569647%}.row-fluid .offset8{margin-left:71.2707182320442%;*margin-left:71.16433525332079%}.row-fluid .offset8:first-child{margin-left:68.50828729281768%;*margin-left:68.40190431409427%}.row-fluid .offset7{margin-left:62.70718232044199%;*margin-left:62.600799341718584%}.row-fluid .offset7:first-child{margin-left:59.94475138121547%;*margin-left:59.838368402492065%}.row-fluid .offset6{margin-left:54.14364640883978%;*margin-left:54.037263430116376%}.row-fluid .offset6:first-child{margin-left:51.38121546961326%;*margin-left:51.27483249088986%}.row-fluid .offset5{margin-left:45.58011049723757%;*margin-left:45.47372751851417%}.row-fluid .offset5:first-child{margin-left:42.81767955801105%;*margin-left:42.71129657928765%}.row-fluid .offset4{margin-left:37.01657458563536%;*margin-left:36.91019160691196%}.row-fluid .offset4:first-child{margin-left:34.25414364640884%;*margin-left:34.14776066768544%}.row-fluid .offset3{margin-left:28.45303867403315%;*margin-left:28.346655695309746%}.row-fluid .offset3:first-child{margin-left:25.69060773480663%;*margin-left:25.584224756083227%}.row-fluid .offset2{margin-left:19.88950276243094%;*margin-left:19.783119783707537%}.row-fluid .offset2:first-child{margin-left:17.12707182320442%;*margin-left:17.02068884448102%}.row-fluid .offset1{margin-left:11.32596685082873%;*margin-left:11.219583872105325%}.row-fluid .offset1:first-child{margin-left:8.56353591160221%;*margin-left:8.457152932878806%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:710px}input.span11,textarea.span11,.uneditable-input.span11{width:648px}input.span10,textarea.span10,.uneditable-input.span10{width:586px}input.span9,textarea.span9,.uneditable-input.span9{width:524px}input.span8,textarea.span8,.uneditable-input.span8{width:462px}input.span7,textarea.span7,.uneditable-input.span7{width:400px}input.span6,textarea.span6,.uneditable-input.span6{width:338px}input.span5,textarea.span5,.uneditable-input.span5{width:276px}input.span4,textarea.span4,.uneditable-input.span4{width:214px}input.span3,textarea.span3,.uneditable-input.span3{width:152px}input.span2,textarea.span2,.uneditable-input.span2{width:90px}input.span1,textarea.span1,.uneditable-input.span1{width:28px}}@media(max-width:767px){body{padding-right:20px;padding-left:20px}.navbar-fixed-top,.navbar-fixed-bottom,.navbar-static-top{margin-right:-20px;margin-left:-20px}.container-fluid{padding:0}.dl-horizontal dt{float:none;width:auto;clear:none;text-align:left}.dl-horizontal dd{margin-left:0}.container{width:auto}.row-fluid{width:100%}.row,.thumbnails{margin-left:0}.thumbnails>li{float:none;margin-left:0}[class*="span"],.uneditable-input[class*="span"],.row-fluid [class*="span"]{display:block;float:none;width:100%;margin-left:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.span12,.row-fluid .span12{width:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="offset"]:first-child{margin-left:0}.input-large,.input-xlarge,.input-xxlarge,input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.input-prepend input,.input-append input,.input-prepend input[class*="span"],.input-append input[class*="span"]{display:inline-block;width:auto}.controls-row [class*="span"]+[class*="span"]{margin-left:0}.modal{position:fixed;top:20px;right:20px;left:20px;width:auto;margin:0}.modal.fade{top:-100px}.modal.fade.in{top:20px}}@media(max-width:480px){.nav-collapse{-webkit-transform:translate3d(0,0,0)}.page-header h1 small{display:block;line-height:20px}input[type="checkbox"],input[type="radio"]{border:1px solid #ccc}.form-horizontal .control-label{float:none;width:auto;padding-top:0;text-align:left}.form-horizontal .controls{margin-left:0}.form-horizontal .control-list{padding-top:0}.form-horizontal .form-actions{padding-right:10px;padding-left:10px}.media .pull-left,.media .pull-right{display:block;float:none;margin-bottom:10px}.media-object{margin-right:0;margin-left:0}.modal{top:10px;right:10px;left:10px}.modal-header .close{padding:10px;margin:-10px}.carousel-caption{position:static}}@media(max-width:979px){body{padding-top:0}.navbar-fixed-top,.navbar-fixed-bottom{position:static}.navbar-fixed-top{margin-bottom:20px}.navbar-fixed-bottom{margin-top:20px}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding:5px}.navbar .container{width:auto;padding:0}.navbar .brand{padding-right:10px;padding-left:10px;margin:0 0 0 -5px}.nav-collapse{clear:both}.nav-collapse .nav{float:none;margin:0 0 10px}.nav-collapse .nav>li{float:none}.nav-collapse .nav>li>a{margin-bottom:2px}.nav-collapse .nav>.divider-vertical{display:none}.nav-collapse .nav .nav-header{color:#777;text-shadow:none}.nav-collapse .nav>li>a,.nav-collapse .dropdown-menu a{padding:9px 15px;font-weight:bold;color:#777;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.nav-collapse .btn{padding:4px 10px 4px;font-weight:normal;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.nav-collapse .dropdown-menu li+li a{margin-bottom:2px}.nav-collapse .nav>li>a:hover,.nav-collapse .dropdown-menu a:hover{background-color:#f2f2f2}.navbar-inverse .nav-collapse .nav>li>a,.navbar-inverse .nav-collapse .dropdown-menu a{color:#999}.navbar-inverse .nav-collapse .nav>li>a:hover,.navbar-inverse .nav-collapse .dropdown-menu a:hover{background-color:#111}.nav-collapse.in .btn-group{padding:0;margin-top:5px}.nav-collapse .dropdown-menu{position:static;top:auto;left:auto;display:none;float:none;max-width:none;padding:0;margin:0 15px;background-color:transparent;border:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.nav-collapse .open>.dropdown-menu{display:block}.nav-collapse .dropdown-menu:before,.nav-collapse .dropdown-menu:after{display:none}.nav-collapse .dropdown-menu .divider{display:none}.nav-collapse .nav>li>.dropdown-menu:before,.nav-collapse .nav>li>.dropdown-menu:after{display:none}.nav-collapse .navbar-form,.nav-collapse .navbar-search{float:none;padding:10px 15px;margin:10px 0;border-top:1px solid #f2f2f2;border-bottom:1px solid #f2f2f2;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}.navbar-inverse .nav-collapse .navbar-form,.navbar-inverse .nav-collapse .navbar-search{border-top-color:#111;border-bottom-color:#111}.navbar .nav-collapse .nav.pull-right{float:none;margin-left:0}.nav-collapse,.nav-collapse.collapse{height:0;overflow:hidden}.navbar .btn-navbar{display:block}.navbar-static .navbar-inner{padding-right:10px;padding-left:10px}}@media(min-width:980px){.nav-collapse.collapse{height:auto!important;overflow:visible!important}} diff --git a/clicon/web/static/bootstrap/css/bootstrap.css b/clicon/web/static/bootstrap/css/bootstrap.css deleted file mode 100644 index 8ab3cef..0000000 --- a/clicon/web/static/bootstrap/css/bootstrap.css +++ /dev/null @@ -1,6039 +0,0 @@ -/*! - * Bootstrap v2.2.2 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */ - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -nav, -section { - display: block; -} - -audio, -canvas, -video { - display: inline-block; - *display: inline; - *zoom: 1; -} - -audio:not([controls]) { - display: none; -} - -html { - font-size: 100%; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100%; -} - -a:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -a:hover, -a:active { - outline: 0; -} - -sub, -sup { - position: relative; - font-size: 75%; - line-height: 0; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -img { - width: auto\9; - height: auto; - max-width: 100%; - vertical-align: middle; - border: 0; - -ms-interpolation-mode: bicubic; -} - -#map_canvas img, -.google-maps img { - max-width: none; -} - -button, -input, -select, -textarea { - margin: 0; - font-size: 100%; - vertical-align: middle; -} - -button, -input { - *overflow: visible; - line-height: normal; -} - -button::-moz-focus-inner, -input::-moz-focus-inner { - padding: 0; - border: 0; -} - -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - cursor: pointer; - -webkit-appearance: button; -} - -label, -select, -button, -input[type="button"], -input[type="reset"], -input[type="submit"], -input[type="radio"], -input[type="checkbox"] { - cursor: pointer; -} - -input[type="search"] { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - -webkit-appearance: textfield; -} - -input[type="search"]::-webkit-search-decoration, -input[type="search"]::-webkit-search-cancel-button { - -webkit-appearance: none; -} - -textarea { - overflow: auto; - vertical-align: top; -} - -@media print { - * { - color: #000 !important; - text-shadow: none !important; - background: transparent !important; - box-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - .ir a:after, - a[href^="javascript:"]:after, - a[href^="#"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - @page { - margin: 0.5cm; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } -} - -.clearfix { - *zoom: 1; -} - -.clearfix:before, -.clearfix:after { - display: table; - line-height: 0; - content: ""; -} - -.clearfix:after { - clear: both; -} - -.hide-text { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} - -.input-block-level { - display: block; - width: 100%; - min-height: 30px; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -body { - margin: 0; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 20px; - color: #333333; - background-color: #ffffff; -} - -a { - color: #0088cc; - text-decoration: none; -} - -a:hover { - color: #005580; - text-decoration: underline; -} - -.img-rounded { - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.img-polaroid { - padding: 4px; - background-color: #fff; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.2); - -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); - -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); - box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); -} - -.img-circle { - -webkit-border-radius: 500px; - -moz-border-radius: 500px; - border-radius: 500px; -} - -.row { - margin-left: -20px; - *zoom: 1; -} - -.row:before, -.row:after { - display: table; - line-height: 0; - content: ""; -} - -.row:after { - clear: both; -} - -[class*="span"] { - float: left; - min-height: 1px; - margin-left: 20px; -} - -.container, -.navbar-static-top .container, -.navbar-fixed-top .container, -.navbar-fixed-bottom .container { - width: 940px; -} - -.span12 { - width: 940px; -} - -.span11 { - width: 860px; -} - -.span10 { - width: 780px; -} - -.span9 { - width: 700px; -} - -.span8 { - width: 620px; -} - -.span7 { - width: 540px; -} - -.span6 { - width: 460px; -} - -.span5 { - width: 380px; -} - -.span4 { - width: 300px; -} - -.span3 { - width: 220px; -} - -.span2 { - width: 140px; -} - -.span1 { - width: 60px; -} - -.offset12 { - margin-left: 980px; -} - -.offset11 { - margin-left: 900px; -} - -.offset10 { - margin-left: 820px; -} - -.offset9 { - margin-left: 740px; -} - -.offset8 { - margin-left: 660px; -} - -.offset7 { - margin-left: 580px; -} - -.offset6 { - margin-left: 500px; -} - -.offset5 { - margin-left: 420px; -} - -.offset4 { - margin-left: 340px; -} - -.offset3 { - margin-left: 260px; -} - -.offset2 { - margin-left: 180px; -} - -.offset1 { - margin-left: 100px; -} - -.row-fluid { - width: 100%; - *zoom: 1; -} - -.row-fluid:before, -.row-fluid:after { - display: table; - line-height: 0; - content: ""; -} - -.row-fluid:after { - clear: both; -} - -.row-fluid [class*="span"] { - display: block; - float: left; - width: 100%; - min-height: 30px; - margin-left: 2.127659574468085%; - *margin-left: 2.074468085106383%; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -.row-fluid [class*="span"]:first-child { - margin-left: 0; -} - -.row-fluid .controls-row [class*="span"] + [class*="span"] { - margin-left: 2.127659574468085%; -} - -.row-fluid .span12 { - width: 100%; - *width: 99.94680851063829%; -} - -.row-fluid .span11 { - width: 91.48936170212765%; - *width: 91.43617021276594%; -} - -.row-fluid .span10 { - width: 82.97872340425532%; - *width: 82.92553191489361%; -} - -.row-fluid .span9 { - width: 74.46808510638297%; - *width: 74.41489361702126%; -} - -.row-fluid .span8 { - width: 65.95744680851064%; - *width: 65.90425531914893%; -} - -.row-fluid .span7 { - width: 57.44680851063829%; - *width: 57.39361702127659%; -} - -.row-fluid .span6 { - width: 48.93617021276595%; - *width: 48.88297872340425%; -} - -.row-fluid .span5 { - width: 40.42553191489362%; - *width: 40.37234042553192%; -} - -.row-fluid .span4 { - width: 31.914893617021278%; - *width: 31.861702127659576%; -} - -.row-fluid .span3 { - width: 23.404255319148934%; - *width: 23.351063829787233%; -} - -.row-fluid .span2 { - width: 14.893617021276595%; - *width: 14.840425531914894%; -} - -.row-fluid .span1 { - width: 6.382978723404255%; - *width: 6.329787234042553%; -} - -.row-fluid .offset12 { - margin-left: 104.25531914893617%; - *margin-left: 104.14893617021275%; -} - -.row-fluid .offset12:first-child { - margin-left: 102.12765957446808%; - *margin-left: 102.02127659574467%; -} - -.row-fluid .offset11 { - margin-left: 95.74468085106382%; - *margin-left: 95.6382978723404%; -} - -.row-fluid .offset11:first-child { - margin-left: 93.61702127659574%; - *margin-left: 93.51063829787232%; -} - -.row-fluid .offset10 { - margin-left: 87.23404255319149%; - *margin-left: 87.12765957446807%; -} - -.row-fluid .offset10:first-child { - margin-left: 85.1063829787234%; - *margin-left: 84.99999999999999%; -} - -.row-fluid .offset9 { - margin-left: 78.72340425531914%; - *margin-left: 78.61702127659572%; -} - -.row-fluid .offset9:first-child { - margin-left: 76.59574468085106%; - *margin-left: 76.48936170212764%; -} - -.row-fluid .offset8 { - margin-left: 70.2127659574468%; - *margin-left: 70.10638297872339%; -} - -.row-fluid .offset8:first-child { - margin-left: 68.08510638297872%; - *margin-left: 67.9787234042553%; -} - -.row-fluid .offset7 { - margin-left: 61.70212765957446%; - *margin-left: 61.59574468085106%; -} - -.row-fluid .offset7:first-child { - margin-left: 59.574468085106375%; - *margin-left: 59.46808510638297%; -} - -.row-fluid .offset6 { - margin-left: 53.191489361702125%; - *margin-left: 53.085106382978715%; -} - -.row-fluid .offset6:first-child { - margin-left: 51.063829787234035%; - *margin-left: 50.95744680851063%; -} - -.row-fluid .offset5 { - margin-left: 44.68085106382979%; - *margin-left: 44.57446808510638%; -} - -.row-fluid .offset5:first-child { - margin-left: 42.5531914893617%; - *margin-left: 42.4468085106383%; -} - -.row-fluid .offset4 { - margin-left: 36.170212765957444%; - *margin-left: 36.06382978723405%; -} - -.row-fluid .offset4:first-child { - margin-left: 34.04255319148936%; - *margin-left: 33.93617021276596%; -} - -.row-fluid .offset3 { - margin-left: 27.659574468085104%; - *margin-left: 27.5531914893617%; -} - -.row-fluid .offset3:first-child { - margin-left: 25.53191489361702%; - *margin-left: 25.425531914893618%; -} - -.row-fluid .offset2 { - margin-left: 19.148936170212764%; - *margin-left: 19.04255319148936%; -} - -.row-fluid .offset2:first-child { - margin-left: 17.02127659574468%; - *margin-left: 16.914893617021278%; -} - -.row-fluid .offset1 { - margin-left: 10.638297872340425%; - *margin-left: 10.53191489361702%; -} - -.row-fluid .offset1:first-child { - margin-left: 8.51063829787234%; - *margin-left: 8.404255319148938%; -} - -[class*="span"].hide, -.row-fluid [class*="span"].hide { - display: none; -} - -[class*="span"].pull-right, -.row-fluid [class*="span"].pull-right { - float: right; -} - -.container { - margin-right: auto; - margin-left: auto; - *zoom: 1; -} - -.container:before, -.container:after { - display: table; - line-height: 0; - content: ""; -} - -.container:after { - clear: both; -} - -.container-fluid { - padding-right: 20px; - padding-left: 20px; - *zoom: 1; -} - -.container-fluid:before, -.container-fluid:after { - display: table; - line-height: 0; - content: ""; -} - -.container-fluid:after { - clear: both; -} - -p { - margin: 0 0 10px; -} - -.lead { - margin-bottom: 20px; - font-size: 21px; - font-weight: 200; - line-height: 30px; -} - -small { - font-size: 85%; -} - -strong { - font-weight: bold; -} - -em { - font-style: italic; -} - -cite { - font-style: normal; -} - -.muted { - color: #999999; -} - -a.muted:hover { - color: #808080; -} - -.text-warning { - color: #c09853; -} - -a.text-warning:hover { - color: #a47e3c; -} - -.text-error { - color: #b94a48; -} - -a.text-error:hover { - color: #953b39; -} - -.text-info { - color: #3a87ad; -} - -a.text-info:hover { - color: #2d6987; -} - -.text-success { - color: #468847; -} - -a.text-success:hover { - color: #356635; -} - -h1, -h2, -h3, -h4, -h5, -h6 { - margin: 10px 0; - font-family: inherit; - font-weight: bold; - line-height: 20px; - color: inherit; - text-rendering: optimizelegibility; -} - -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small { - font-weight: normal; - line-height: 1; - color: #999999; -} - -h1, -h2, -h3 { - line-height: 40px; -} - -h1 { - font-size: 38.5px; -} - -h2 { - font-size: 31.5px; -} - -h3 { - font-size: 24.5px; -} - -h4 { - font-size: 17.5px; -} - -h5 { - font-size: 14px; -} - -h6 { - font-size: 11.9px; -} - -h1 small { - font-size: 24.5px; -} - -h2 small { - font-size: 17.5px; -} - -h3 small { - font-size: 14px; -} - -h4 small { - font-size: 14px; -} - -.page-header { - padding-bottom: 9px; - margin: 20px 0 30px; - border-bottom: 1px solid #eeeeee; -} - -ul, -ol { - padding: 0; - margin: 0 0 10px 25px; -} - -ul ul, -ul ol, -ol ol, -ol ul { - margin-bottom: 0; -} - -li { - line-height: 20px; -} - -ul.unstyled, -ol.unstyled { - margin-left: 0; - list-style: none; -} - -ul.inline, -ol.inline { - margin-left: 0; - list-style: none; -} - -ul.inline > li, -ol.inline > li { - display: inline-block; - padding-right: 5px; - padding-left: 5px; -} - -dl { - margin-bottom: 20px; -} - -dt, -dd { - line-height: 20px; -} - -dt { - font-weight: bold; -} - -dd { - margin-left: 10px; -} - -.dl-horizontal { - *zoom: 1; -} - -.dl-horizontal:before, -.dl-horizontal:after { - display: table; - line-height: 0; - content: ""; -} - -.dl-horizontal:after { - clear: both; -} - -.dl-horizontal dt { - float: left; - width: 160px; - overflow: hidden; - clear: left; - text-align: right; - text-overflow: ellipsis; - white-space: nowrap; -} - -.dl-horizontal dd { - margin-left: 180px; -} - -hr { - margin: 20px 0; - border: 0; - border-top: 1px solid #eeeeee; - border-bottom: 1px solid #ffffff; -} - -abbr[title], -abbr[data-original-title] { - cursor: help; - border-bottom: 1px dotted #999999; -} - -abbr.initialism { - font-size: 90%; - text-transform: uppercase; -} - -blockquote { - padding: 0 0 0 15px; - margin: 0 0 20px; - border-left: 5px solid #eeeeee; -} - -blockquote p { - margin-bottom: 0; - font-size: 16px; - font-weight: 300; - line-height: 25px; -} - -blockquote small { - display: block; - line-height: 20px; - color: #999999; -} - -blockquote small:before { - content: '\2014 \00A0'; -} - -blockquote.pull-right { - float: right; - padding-right: 15px; - padding-left: 0; - border-right: 5px solid #eeeeee; - border-left: 0; -} - -blockquote.pull-right p, -blockquote.pull-right small { - text-align: right; -} - -blockquote.pull-right small:before { - content: ''; -} - -blockquote.pull-right small:after { - content: '\00A0 \2014'; -} - -q:before, -q:after, -blockquote:before, -blockquote:after { - content: ""; -} - -address { - display: block; - margin-bottom: 20px; - font-style: normal; - line-height: 20px; -} - -code, -pre { - padding: 0 3px 2px; - font-family: Monaco, Menlo, Consolas, "Courier New", monospace; - font-size: 12px; - color: #333333; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -code { - padding: 2px 4px; - color: #d14; - white-space: nowrap; - background-color: #f7f7f9; - border: 1px solid #e1e1e8; -} - -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 20px; - word-break: break-all; - word-wrap: break-word; - white-space: pre; - white-space: pre-wrap; - background-color: #f5f5f5; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.15); - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -pre.prettyprint { - margin-bottom: 20px; -} - -pre code { - padding: 0; - color: inherit; - white-space: pre; - white-space: pre-wrap; - background-color: transparent; - border: 0; -} - -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} - -form { - margin: 0 0 20px; -} - -fieldset { - padding: 0; - margin: 0; - border: 0; -} - -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: 40px; - color: #333333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} - -legend small { - font-size: 15px; - color: #999999; -} - -label, -input, -button, -select, -textarea { - font-size: 14px; - font-weight: normal; - line-height: 20px; -} - -input, -button, -select, -textarea { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; -} - -label { - display: block; - margin-bottom: 5px; -} - -select, -textarea, -input[type="text"], -input[type="password"], -input[type="datetime"], -input[type="datetime-local"], -input[type="date"], -input[type="month"], -input[type="time"], -input[type="week"], -input[type="number"], -input[type="email"], -input[type="url"], -input[type="search"], -input[type="tel"], -input[type="color"], -.uneditable-input { - display: inline-block; - height: 20px; - padding: 4px 6px; - margin-bottom: 10px; - font-size: 14px; - line-height: 20px; - color: #555555; - vertical-align: middle; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -input, -textarea, -.uneditable-input { - width: 206px; -} - -textarea { - height: auto; -} - -textarea, -input[type="text"], -input[type="password"], -input[type="datetime"], -input[type="datetime-local"], -input[type="date"], -input[type="month"], -input[type="time"], -input[type="week"], -input[type="number"], -input[type="email"], -input[type="url"], -input[type="search"], -input[type="tel"], -input[type="color"], -.uneditable-input { - background-color: #ffffff; - border: 1px solid #cccccc; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border linear 0.2s, box-shadow linear 0.2s; - -moz-transition: border linear 0.2s, box-shadow linear 0.2s; - -o-transition: border linear 0.2s, box-shadow linear 0.2s; - transition: border linear 0.2s, box-shadow linear 0.2s; -} - -textarea:focus, -input[type="text"]:focus, -input[type="password"]:focus, -input[type="datetime"]:focus, -input[type="datetime-local"]:focus, -input[type="date"]:focus, -input[type="month"]:focus, -input[type="time"]:focus, -input[type="week"]:focus, -input[type="number"]:focus, -input[type="email"]:focus, -input[type="url"]:focus, -input[type="search"]:focus, -input[type="tel"]:focus, -input[type="color"]:focus, -.uneditable-input:focus { - border-color: rgba(82, 168, 236, 0.8); - outline: 0; - outline: thin dotted \9; - /* IE6-9 */ - - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6); -} - -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - *margin-top: 0; - line-height: normal; -} - -input[type="file"], -input[type="image"], -input[type="submit"], -input[type="reset"], -input[type="button"], -input[type="radio"], -input[type="checkbox"] { - width: auto; -} - -select, -input[type="file"] { - height: 30px; - /* In IE7, the height of the select element cannot be changed by height, only font-size */ - - *margin-top: 4px; - /* For IE7, add top margin to align select with labels */ - - line-height: 30px; -} - -select { - width: 220px; - background-color: #ffffff; - border: 1px solid #cccccc; -} - -select[multiple], -select[size] { - height: auto; -} - -select:focus, -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -.uneditable-input, -.uneditable-textarea { - color: #999999; - cursor: not-allowed; - background-color: #fcfcfc; - border-color: #cccccc; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025); - -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025); -} - -.uneditable-input { - overflow: hidden; - white-space: nowrap; -} - -.uneditable-textarea { - width: auto; - height: auto; -} - -input:-moz-placeholder, -textarea:-moz-placeholder { - color: #999999; -} - -input:-ms-input-placeholder, -textarea:-ms-input-placeholder { - color: #999999; -} - -input::-webkit-input-placeholder, -textarea::-webkit-input-placeholder { - color: #999999; -} - -.radio, -.checkbox { - min-height: 20px; - padding-left: 20px; -} - -.radio input[type="radio"], -.checkbox input[type="checkbox"] { - float: left; - margin-left: -20px; -} - -.controls > .radio:first-child, -.controls > .checkbox:first-child { - padding-top: 5px; -} - -.radio.inline, -.checkbox.inline { - display: inline-block; - padding-top: 5px; - margin-bottom: 0; - vertical-align: middle; -} - -.radio.inline + .radio.inline, -.checkbox.inline + .checkbox.inline { - margin-left: 10px; -} - -.input-mini { - width: 60px; -} - -.input-small { - width: 90px; -} - -.input-medium { - width: 150px; -} - -.input-large { - width: 210px; -} - -.input-xlarge { - width: 270px; -} - -.input-xxlarge { - width: 530px; -} - -input[class*="span"], -select[class*="span"], -textarea[class*="span"], -.uneditable-input[class*="span"], -.row-fluid input[class*="span"], -.row-fluid select[class*="span"], -.row-fluid textarea[class*="span"], -.row-fluid .uneditable-input[class*="span"] { - float: none; - margin-left: 0; -} - -.input-append input[class*="span"], -.input-append .uneditable-input[class*="span"], -.input-prepend input[class*="span"], -.input-prepend .uneditable-input[class*="span"], -.row-fluid input[class*="span"], -.row-fluid select[class*="span"], -.row-fluid textarea[class*="span"], -.row-fluid .uneditable-input[class*="span"], -.row-fluid .input-prepend [class*="span"], -.row-fluid .input-append [class*="span"] { - display: inline-block; -} - -input, -textarea, -.uneditable-input { - margin-left: 0; -} - -.controls-row [class*="span"] + [class*="span"] { - margin-left: 20px; -} - -input.span12, -textarea.span12, -.uneditable-input.span12 { - width: 926px; -} - -input.span11, -textarea.span11, -.uneditable-input.span11 { - width: 846px; -} - -input.span10, -textarea.span10, -.uneditable-input.span10 { - width: 766px; -} - -input.span9, -textarea.span9, -.uneditable-input.span9 { - width: 686px; -} - -input.span8, -textarea.span8, -.uneditable-input.span8 { - width: 606px; -} - -input.span7, -textarea.span7, -.uneditable-input.span7 { - width: 526px; -} - -input.span6, -textarea.span6, -.uneditable-input.span6 { - width: 446px; -} - -input.span5, -textarea.span5, -.uneditable-input.span5 { - width: 366px; -} - -input.span4, -textarea.span4, -.uneditable-input.span4 { - width: 286px; -} - -input.span3, -textarea.span3, -.uneditable-input.span3 { - width: 206px; -} - -input.span2, -textarea.span2, -.uneditable-input.span2 { - width: 126px; -} - -input.span1, -textarea.span1, -.uneditable-input.span1 { - width: 46px; -} - -.controls-row { - *zoom: 1; -} - -.controls-row:before, -.controls-row:after { - display: table; - line-height: 0; - content: ""; -} - -.controls-row:after { - clear: both; -} - -.controls-row [class*="span"], -.row-fluid .controls-row [class*="span"] { - float: left; -} - -.controls-row .checkbox[class*="span"], -.controls-row .radio[class*="span"] { - padding-top: 5px; -} - -input[disabled], -select[disabled], -textarea[disabled], -input[readonly], -select[readonly], -textarea[readonly] { - cursor: not-allowed; - background-color: #eeeeee; -} - -input[type="radio"][disabled], -input[type="checkbox"][disabled], -input[type="radio"][readonly], -input[type="checkbox"][readonly] { - background-color: transparent; -} - -.control-group.warning .control-label, -.control-group.warning .help-block, -.control-group.warning .help-inline { - color: #c09853; -} - -.control-group.warning .checkbox, -.control-group.warning .radio, -.control-group.warning input, -.control-group.warning select, -.control-group.warning textarea { - color: #c09853; -} - -.control-group.warning input, -.control-group.warning select, -.control-group.warning textarea { - border-color: #c09853; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.warning input:focus, -.control-group.warning select:focus, -.control-group.warning textarea:focus { - border-color: #a47e3c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; -} - -.control-group.warning .input-prepend .add-on, -.control-group.warning .input-append .add-on { - color: #c09853; - background-color: #fcf8e3; - border-color: #c09853; -} - -.control-group.error .control-label, -.control-group.error .help-block, -.control-group.error .help-inline { - color: #b94a48; -} - -.control-group.error .checkbox, -.control-group.error .radio, -.control-group.error input, -.control-group.error select, -.control-group.error textarea { - color: #b94a48; -} - -.control-group.error input, -.control-group.error select, -.control-group.error textarea { - border-color: #b94a48; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.error input:focus, -.control-group.error select:focus, -.control-group.error textarea:focus { - border-color: #953b39; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; -} - -.control-group.error .input-prepend .add-on, -.control-group.error .input-append .add-on { - color: #b94a48; - background-color: #f2dede; - border-color: #b94a48; -} - -.control-group.success .control-label, -.control-group.success .help-block, -.control-group.success .help-inline { - color: #468847; -} - -.control-group.success .checkbox, -.control-group.success .radio, -.control-group.success input, -.control-group.success select, -.control-group.success textarea { - color: #468847; -} - -.control-group.success input, -.control-group.success select, -.control-group.success textarea { - border-color: #468847; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.success input:focus, -.control-group.success select:focus, -.control-group.success textarea:focus { - border-color: #356635; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; -} - -.control-group.success .input-prepend .add-on, -.control-group.success .input-append .add-on { - color: #468847; - background-color: #dff0d8; - border-color: #468847; -} - -.control-group.info .control-label, -.control-group.info .help-block, -.control-group.info .help-inline { - color: #3a87ad; -} - -.control-group.info .checkbox, -.control-group.info .radio, -.control-group.info input, -.control-group.info select, -.control-group.info textarea { - color: #3a87ad; -} - -.control-group.info input, -.control-group.info select, -.control-group.info textarea { - border-color: #3a87ad; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.info input:focus, -.control-group.info select:focus, -.control-group.info textarea:focus { - border-color: #2d6987; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3; -} - -.control-group.info .input-prepend .add-on, -.control-group.info .input-append .add-on { - color: #3a87ad; - background-color: #d9edf7; - border-color: #3a87ad; -} - -input:focus:invalid, -textarea:focus:invalid, -select:focus:invalid { - color: #b94a48; - border-color: #ee5f5b; -} - -input:focus:invalid:focus, -textarea:focus:invalid:focus, -select:focus:invalid:focus { - border-color: #e9322d; - -webkit-box-shadow: 0 0 6px #f8b9b7; - -moz-box-shadow: 0 0 6px #f8b9b7; - box-shadow: 0 0 6px #f8b9b7; -} - -.form-actions { - padding: 19px 20px 20px; - margin-top: 20px; - margin-bottom: 20px; - background-color: #f5f5f5; - border-top: 1px solid #e5e5e5; - *zoom: 1; -} - -.form-actions:before, -.form-actions:after { - display: table; - line-height: 0; - content: ""; -} - -.form-actions:after { - clear: both; -} - -.help-block, -.help-inline { - color: #595959; -} - -.help-block { - display: block; - margin-bottom: 10px; -} - -.help-inline { - display: inline-block; - *display: inline; - padding-left: 5px; - vertical-align: middle; - *zoom: 1; -} - -.input-append, -.input-prepend { - margin-bottom: 5px; - font-size: 0; - white-space: nowrap; -} - -.input-append input, -.input-prepend input, -.input-append select, -.input-prepend select, -.input-append .uneditable-input, -.input-prepend .uneditable-input, -.input-append .dropdown-menu, -.input-prepend .dropdown-menu { - font-size: 14px; -} - -.input-append input, -.input-prepend input, -.input-append select, -.input-prepend select, -.input-append .uneditable-input, -.input-prepend .uneditable-input { - position: relative; - margin-bottom: 0; - *margin-left: 0; - vertical-align: top; - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-append input:focus, -.input-prepend input:focus, -.input-append select:focus, -.input-prepend select:focus, -.input-append .uneditable-input:focus, -.input-prepend .uneditable-input:focus { - z-index: 2; -} - -.input-append .add-on, -.input-prepend .add-on { - display: inline-block; - width: auto; - height: 20px; - min-width: 16px; - padding: 4px 5px; - font-size: 14px; - font-weight: normal; - line-height: 20px; - text-align: center; - text-shadow: 0 1px 0 #ffffff; - background-color: #eeeeee; - border: 1px solid #ccc; -} - -.input-append .add-on, -.input-prepend .add-on, -.input-append .btn, -.input-prepend .btn, -.input-append .btn-group > .dropdown-toggle, -.input-prepend .btn-group > .dropdown-toggle { - vertical-align: top; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.input-append .active, -.input-prepend .active { - background-color: #a9dba9; - border-color: #46a546; -} - -.input-prepend .add-on, -.input-prepend .btn { - margin-right: -1px; -} - -.input-prepend .add-on:first-child, -.input-prepend .btn:first-child { - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.input-append input, -.input-append select, -.input-append .uneditable-input { - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.input-append input + .btn-group .btn:last-child, -.input-append select + .btn-group .btn:last-child, -.input-append .uneditable-input + .btn-group .btn:last-child { - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-append .add-on, -.input-append .btn, -.input-append .btn-group { - margin-left: -1px; -} - -.input-append .add-on:last-child, -.input-append .btn:last-child, -.input-append .btn-group:last-child > .dropdown-toggle { - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-prepend.input-append input, -.input-prepend.input-append select, -.input-prepend.input-append .uneditable-input { - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.input-prepend.input-append input + .btn-group .btn, -.input-prepend.input-append select + .btn-group .btn, -.input-prepend.input-append .uneditable-input + .btn-group .btn { - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-prepend.input-append .add-on:first-child, -.input-prepend.input-append .btn:first-child { - margin-right: -1px; - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.input-prepend.input-append .add-on:last-child, -.input-prepend.input-append .btn:last-child { - margin-left: -1px; - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-prepend.input-append .btn-group:first-child { - margin-left: 0; -} - -input.search-query { - padding-right: 14px; - padding-right: 4px \9; - padding-left: 14px; - padding-left: 4px \9; - /* IE7-8 doesn't have border-radius, so don't indent the padding */ - - margin-bottom: 0; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - border-radius: 15px; -} - -/* Allow for input prepend/append in search forms */ - -.form-search .input-append .search-query, -.form-search .input-prepend .search-query { - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.form-search .input-append .search-query { - -webkit-border-radius: 14px 0 0 14px; - -moz-border-radius: 14px 0 0 14px; - border-radius: 14px 0 0 14px; -} - -.form-search .input-append .btn { - -webkit-border-radius: 0 14px 14px 0; - -moz-border-radius: 0 14px 14px 0; - border-radius: 0 14px 14px 0; -} - -.form-search .input-prepend .search-query { - -webkit-border-radius: 0 14px 14px 0; - -moz-border-radius: 0 14px 14px 0; - border-radius: 0 14px 14px 0; -} - -.form-search .input-prepend .btn { - -webkit-border-radius: 14px 0 0 14px; - -moz-border-radius: 14px 0 0 14px; - border-radius: 14px 0 0 14px; -} - -.form-search input, -.form-inline input, -.form-horizontal input, -.form-search textarea, -.form-inline textarea, -.form-horizontal textarea, -.form-search select, -.form-inline select, -.form-horizontal select, -.form-search .help-inline, -.form-inline .help-inline, -.form-horizontal .help-inline, -.form-search .uneditable-input, -.form-inline .uneditable-input, -.form-horizontal .uneditable-input, -.form-search .input-prepend, -.form-inline .input-prepend, -.form-horizontal .input-prepend, -.form-search .input-append, -.form-inline .input-append, -.form-horizontal .input-append { - display: inline-block; - *display: inline; - margin-bottom: 0; - vertical-align: middle; - *zoom: 1; -} - -.form-search .hide, -.form-inline .hide, -.form-horizontal .hide { - display: none; -} - -.form-search label, -.form-inline label, -.form-search .btn-group, -.form-inline .btn-group { - display: inline-block; -} - -.form-search .input-append, -.form-inline .input-append, -.form-search .input-prepend, -.form-inline .input-prepend { - margin-bottom: 0; -} - -.form-search .radio, -.form-search .checkbox, -.form-inline .radio, -.form-inline .checkbox { - padding-left: 0; - margin-bottom: 0; - vertical-align: middle; -} - -.form-search .radio input[type="radio"], -.form-search .checkbox input[type="checkbox"], -.form-inline .radio input[type="radio"], -.form-inline .checkbox input[type="checkbox"] { - float: left; - margin-right: 3px; - margin-left: 0; -} - -.control-group { - margin-bottom: 10px; -} - -legend + .control-group { - margin-top: 20px; - -webkit-margin-top-collapse: separate; -} - -.form-horizontal .control-group { - margin-bottom: 20px; - *zoom: 1; -} - -.form-horizontal .control-group:before, -.form-horizontal .control-group:after { - display: table; - line-height: 0; - content: ""; -} - -.form-horizontal .control-group:after { - clear: both; -} - -.form-horizontal .control-label { - float: left; - width: 160px; - padding-top: 5px; - text-align: right; -} - -.form-horizontal .controls { - *display: inline-block; - *padding-left: 20px; - margin-left: 180px; - *margin-left: 0; -} - -.form-horizontal .controls:first-child { - *padding-left: 180px; -} - -.form-horizontal .help-block { - margin-bottom: 0; -} - -.form-horizontal input + .help-block, -.form-horizontal select + .help-block, -.form-horizontal textarea + .help-block, -.form-horizontal .uneditable-input + .help-block, -.form-horizontal .input-prepend + .help-block, -.form-horizontal .input-append + .help-block { - margin-top: 10px; -} - -.form-horizontal .form-actions { - padding-left: 180px; -} - -table { - max-width: 100%; - background-color: transparent; - border-collapse: collapse; - border-spacing: 0; -} - -.table { - width: 100%; - margin-bottom: 20px; -} - -.table th, -.table td { - padding: 8px; - line-height: 20px; - text-align: left; - vertical-align: top; - border-top: 1px solid #dddddd; -} - -.table th { - font-weight: bold; -} - -.table thead th { - vertical-align: bottom; -} - -.table caption + thead tr:first-child th, -.table caption + thead tr:first-child td, -.table colgroup + thead tr:first-child th, -.table colgroup + thead tr:first-child td, -.table thead:first-child tr:first-child th, -.table thead:first-child tr:first-child td { - border-top: 0; -} - -.table tbody + tbody { - border-top: 2px solid #dddddd; -} - -.table .table { - background-color: #ffffff; -} - -.table-condensed th, -.table-condensed td { - padding: 4px 5px; -} - -.table-bordered { - border: 1px solid #dddddd; - border-collapse: separate; - *border-collapse: collapse; - border-left: 0; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.table-bordered th, -.table-bordered td { - border-left: 1px solid #dddddd; -} - -.table-bordered caption + thead tr:first-child th, -.table-bordered caption + tbody tr:first-child th, -.table-bordered caption + tbody tr:first-child td, -.table-bordered colgroup + thead tr:first-child th, -.table-bordered colgroup + tbody tr:first-child th, -.table-bordered colgroup + tbody tr:first-child td, -.table-bordered thead:first-child tr:first-child th, -.table-bordered tbody:first-child tr:first-child th, -.table-bordered tbody:first-child tr:first-child td { - border-top: 0; -} - -.table-bordered thead:first-child tr:first-child > th:first-child, -.table-bordered tbody:first-child tr:first-child > td:first-child { - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topleft: 4px; -} - -.table-bordered thead:first-child tr:first-child > th:last-child, -.table-bordered tbody:first-child tr:first-child > td:last-child { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -moz-border-radius-topright: 4px; -} - -.table-bordered thead:last-child tr:last-child > th:first-child, -.table-bordered tbody:last-child tr:last-child > td:first-child, -.table-bordered tfoot:last-child tr:last-child > td:first-child { - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -moz-border-radius-bottomleft: 4px; -} - -.table-bordered thead:last-child tr:last-child > th:last-child, -.table-bordered tbody:last-child tr:last-child > td:last-child, -.table-bordered tfoot:last-child tr:last-child > td:last-child { - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-bottomright: 4px; -} - -.table-bordered tfoot + tbody:last-child tr:last-child td:first-child { - -webkit-border-bottom-left-radius: 0; - border-bottom-left-radius: 0; - -moz-border-radius-bottomleft: 0; -} - -.table-bordered tfoot + tbody:last-child tr:last-child td:last-child { - -webkit-border-bottom-right-radius: 0; - border-bottom-right-radius: 0; - -moz-border-radius-bottomright: 0; -} - -.table-bordered caption + thead tr:first-child th:first-child, -.table-bordered caption + tbody tr:first-child td:first-child, -.table-bordered colgroup + thead tr:first-child th:first-child, -.table-bordered colgroup + tbody tr:first-child td:first-child { - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topleft: 4px; -} - -.table-bordered caption + thead tr:first-child th:last-child, -.table-bordered caption + tbody tr:first-child td:last-child, -.table-bordered colgroup + thead tr:first-child th:last-child, -.table-bordered colgroup + tbody tr:first-child td:last-child { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -moz-border-radius-topright: 4px; -} - -.table-striped tbody > tr:nth-child(odd) > td, -.table-striped tbody > tr:nth-child(odd) > th { - background-color: #f9f9f9; -} - -.table-hover tbody tr:hover td, -.table-hover tbody tr:hover th { - background-color: #f5f5f5; -} - -table td[class*="span"], -table th[class*="span"], -.row-fluid table td[class*="span"], -.row-fluid table th[class*="span"] { - display: table-cell; - float: none; - margin-left: 0; -} - -.table td.span1, -.table th.span1 { - float: none; - width: 44px; - margin-left: 0; -} - -.table td.span2, -.table th.span2 { - float: none; - width: 124px; - margin-left: 0; -} - -.table td.span3, -.table th.span3 { - float: none; - width: 204px; - margin-left: 0; -} - -.table td.span4, -.table th.span4 { - float: none; - width: 284px; - margin-left: 0; -} - -.table td.span5, -.table th.span5 { - float: none; - width: 364px; - margin-left: 0; -} - -.table td.span6, -.table th.span6 { - float: none; - width: 444px; - margin-left: 0; -} - -.table td.span7, -.table th.span7 { - float: none; - width: 524px; - margin-left: 0; -} - -.table td.span8, -.table th.span8 { - float: none; - width: 604px; - margin-left: 0; -} - -.table td.span9, -.table th.span9 { - float: none; - width: 684px; - margin-left: 0; -} - -.table td.span10, -.table th.span10 { - float: none; - width: 764px; - margin-left: 0; -} - -.table td.span11, -.table th.span11 { - float: none; - width: 844px; - margin-left: 0; -} - -.table td.span12, -.table th.span12 { - float: none; - width: 924px; - margin-left: 0; -} - -.table tbody tr.success td { - background-color: #dff0d8; -} - -.table tbody tr.error td { - background-color: #f2dede; -} - -.table tbody tr.warning td { - background-color: #fcf8e3; -} - -.table tbody tr.info td { - background-color: #d9edf7; -} - -.table-hover tbody tr.success:hover td { - background-color: #d0e9c6; -} - -.table-hover tbody tr.error:hover td { - background-color: #ebcccc; -} - -.table-hover tbody tr.warning:hover td { - background-color: #faf2cc; -} - -.table-hover tbody tr.info:hover td { - background-color: #c4e3f3; -} - -[class^="icon-"], -[class*=" icon-"] { - display: inline-block; - width: 14px; - height: 14px; - margin-top: 1px; - *margin-right: .3em; - line-height: 14px; - vertical-align: text-top; - background-image: url("../img/glyphicons-halflings.png"); - background-position: 14px 14px; - background-repeat: no-repeat; -} - -/* White icons with optional class, or on hover/active states of certain elements */ - -.icon-white, -.nav-pills > .active > a > [class^="icon-"], -.nav-pills > .active > a > [class*=" icon-"], -.nav-list > .active > a > [class^="icon-"], -.nav-list > .active > a > [class*=" icon-"], -.navbar-inverse .nav > .active > a > [class^="icon-"], -.navbar-inverse .nav > .active > a > [class*=" icon-"], -.dropdown-menu > li > a:hover > [class^="icon-"], -.dropdown-menu > li > a:hover > [class*=" icon-"], -.dropdown-menu > .active > a > [class^="icon-"], -.dropdown-menu > .active > a > [class*=" icon-"], -.dropdown-submenu:hover > a > [class^="icon-"], -.dropdown-submenu:hover > a > [class*=" icon-"] { - background-image: url("../img/glyphicons-halflings-white.png"); -} - -.icon-glass { - background-position: 0 0; -} - -.icon-music { - background-position: -24px 0; -} - -.icon-search { - background-position: -48px 0; -} - -.icon-envelope { - background-position: -72px 0; -} - -.icon-heart { - background-position: -96px 0; -} - -.icon-star { - background-position: -120px 0; -} - -.icon-star-empty { - background-position: -144px 0; -} - -.icon-user { - background-position: -168px 0; -} - -.icon-film { - background-position: -192px 0; -} - -.icon-th-large { - background-position: -216px 0; -} - -.icon-th { - background-position: -240px 0; -} - -.icon-th-list { - background-position: -264px 0; -} - -.icon-ok { - background-position: -288px 0; -} - -.icon-remove { - background-position: -312px 0; -} - -.icon-zoom-in { - background-position: -336px 0; -} - -.icon-zoom-out { - background-position: -360px 0; -} - -.icon-off { - background-position: -384px 0; -} - -.icon-signal { - background-position: -408px 0; -} - -.icon-cog { - background-position: -432px 0; -} - -.icon-trash { - background-position: -456px 0; -} - -.icon-home { - background-position: 0 -24px; -} - -.icon-file { - background-position: -24px -24px; -} - -.icon-time { - background-position: -48px -24px; -} - -.icon-road { - background-position: -72px -24px; -} - -.icon-download-alt { - background-position: -96px -24px; -} - -.icon-download { - background-position: -120px -24px; -} - -.icon-upload { - background-position: -144px -24px; -} - -.icon-inbox { - background-position: -168px -24px; -} - -.icon-play-circle { - background-position: -192px -24px; -} - -.icon-repeat { - background-position: -216px -24px; -} - -.icon-refresh { - background-position: -240px -24px; -} - -.icon-list-alt { - background-position: -264px -24px; -} - -.icon-lock { - background-position: -287px -24px; -} - -.icon-flag { - background-position: -312px -24px; -} - -.icon-headphones { - background-position: -336px -24px; -} - -.icon-volume-off { - background-position: -360px -24px; -} - -.icon-volume-down { - background-position: -384px -24px; -} - -.icon-volume-up { - background-position: -408px -24px; -} - -.icon-qrcode { - background-position: -432px -24px; -} - -.icon-barcode { - background-position: -456px -24px; -} - -.icon-tag { - background-position: 0 -48px; -} - -.icon-tags { - background-position: -25px -48px; -} - -.icon-book { - background-position: -48px -48px; -} - -.icon-bookmark { - background-position: -72px -48px; -} - -.icon-print { - background-position: -96px -48px; -} - -.icon-camera { - background-position: -120px -48px; -} - -.icon-font { - background-position: -144px -48px; -} - -.icon-bold { - background-position: -167px -48px; -} - -.icon-italic { - background-position: -192px -48px; -} - -.icon-text-height { - background-position: -216px -48px; -} - -.icon-text-width { - background-position: -240px -48px; -} - -.icon-align-left { - background-position: -264px -48px; -} - -.icon-align-center { - background-position: -288px -48px; -} - -.icon-align-right { - background-position: -312px -48px; -} - -.icon-align-justify { - background-position: -336px -48px; -} - -.icon-list { - background-position: -360px -48px; -} - -.icon-indent-left { - background-position: -384px -48px; -} - -.icon-indent-right { - background-position: -408px -48px; -} - -.icon-facetime-video { - background-position: -432px -48px; -} - -.icon-picture { - background-position: -456px -48px; -} - -.icon-pencil { - background-position: 0 -72px; -} - -.icon-map-marker { - background-position: -24px -72px; -} - -.icon-adjust { - background-position: -48px -72px; -} - -.icon-tint { - background-position: -72px -72px; -} - -.icon-edit { - background-position: -96px -72px; -} - -.icon-share { - background-position: -120px -72px; -} - -.icon-check { - background-position: -144px -72px; -} - -.icon-move { - background-position: -168px -72px; -} - -.icon-step-backward { - background-position: -192px -72px; -} - -.icon-fast-backward { - background-position: -216px -72px; -} - -.icon-backward { - background-position: -240px -72px; -} - -.icon-play { - background-position: -264px -72px; -} - -.icon-pause { - background-position: -288px -72px; -} - -.icon-stop { - background-position: -312px -72px; -} - -.icon-forward { - background-position: -336px -72px; -} - -.icon-fast-forward { - background-position: -360px -72px; -} - -.icon-step-forward { - background-position: -384px -72px; -} - -.icon-eject { - background-position: -408px -72px; -} - -.icon-chevron-left { - background-position: -432px -72px; -} - -.icon-chevron-right { - background-position: -456px -72px; -} - -.icon-plus-sign { - background-position: 0 -96px; -} - -.icon-minus-sign { - background-position: -24px -96px; -} - -.icon-remove-sign { - background-position: -48px -96px; -} - -.icon-ok-sign { - background-position: -72px -96px; -} - -.icon-question-sign { - background-position: -96px -96px; -} - -.icon-info-sign { - background-position: -120px -96px; -} - -.icon-screenshot { - background-position: -144px -96px; -} - -.icon-remove-circle { - background-position: -168px -96px; -} - -.icon-ok-circle { - background-position: -192px -96px; -} - -.icon-ban-circle { - background-position: -216px -96px; -} - -.icon-arrow-left { - background-position: -240px -96px; -} - -.icon-arrow-right { - background-position: -264px -96px; -} - -.icon-arrow-up { - background-position: -289px -96px; -} - -.icon-arrow-down { - background-position: -312px -96px; -} - -.icon-share-alt { - background-position: -336px -96px; -} - -.icon-resize-full { - background-position: -360px -96px; -} - -.icon-resize-small { - background-position: -384px -96px; -} - -.icon-plus { - background-position: -408px -96px; -} - -.icon-minus { - background-position: -433px -96px; -} - -.icon-asterisk { - background-position: -456px -96px; -} - -.icon-exclamation-sign { - background-position: 0 -120px; -} - -.icon-gift { - background-position: -24px -120px; -} - -.icon-leaf { - background-position: -48px -120px; -} - -.icon-fire { - background-position: -72px -120px; -} - -.icon-eye-open { - background-position: -96px -120px; -} - -.icon-eye-close { - background-position: -120px -120px; -} - -.icon-warning-sign { - background-position: -144px -120px; -} - -.icon-plane { - background-position: -168px -120px; -} - -.icon-calendar { - background-position: -192px -120px; -} - -.icon-random { - width: 16px; - background-position: -216px -120px; -} - -.icon-comment { - background-position: -240px -120px; -} - -.icon-magnet { - background-position: -264px -120px; -} - -.icon-chevron-up { - background-position: -288px -120px; -} - -.icon-chevron-down { - background-position: -313px -119px; -} - -.icon-retweet { - background-position: -336px -120px; -} - -.icon-shopping-cart { - background-position: -360px -120px; -} - -.icon-folder-close { - background-position: -384px -120px; -} - -.icon-folder-open { - width: 16px; - background-position: -408px -120px; -} - -.icon-resize-vertical { - background-position: -432px -119px; -} - -.icon-resize-horizontal { - background-position: -456px -118px; -} - -.icon-hdd { - background-position: 0 -144px; -} - -.icon-bullhorn { - background-position: -24px -144px; -} - -.icon-bell { - background-position: -48px -144px; -} - -.icon-certificate { - background-position: -72px -144px; -} - -.icon-thumbs-up { - background-position: -96px -144px; -} - -.icon-thumbs-down { - background-position: -120px -144px; -} - -.icon-hand-right { - background-position: -144px -144px; -} - -.icon-hand-left { - background-position: -168px -144px; -} - -.icon-hand-up { - background-position: -192px -144px; -} - -.icon-hand-down { - background-position: -216px -144px; -} - -.icon-circle-arrow-right { - background-position: -240px -144px; -} - -.icon-circle-arrow-left { - background-position: -264px -144px; -} - -.icon-circle-arrow-up { - background-position: -288px -144px; -} - -.icon-circle-arrow-down { - background-position: -312px -144px; -} - -.icon-globe { - background-position: -336px -144px; -} - -.icon-wrench { - background-position: -360px -144px; -} - -.icon-tasks { - background-position: -384px -144px; -} - -.icon-filter { - background-position: -408px -144px; -} - -.icon-briefcase { - background-position: -432px -144px; -} - -.icon-fullscreen { - background-position: -456px -144px; -} - -.dropup, -.dropdown { - position: relative; -} - -.dropdown-toggle { - *margin-bottom: -3px; -} - -.dropdown-toggle:active, -.open .dropdown-toggle { - outline: 0; -} - -.caret { - display: inline-block; - width: 0; - height: 0; - vertical-align: top; - border-top: 4px solid #000000; - border-right: 4px solid transparent; - border-left: 4px solid transparent; - content: ""; -} - -.dropdown .caret { - margin-top: 8px; - margin-left: 2px; -} - -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - list-style: none; - background-color: #ffffff; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.2); - *border-right-width: 2px; - *border-bottom-width: 2px; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -moz-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -webkit-background-clip: padding-box; - -moz-background-clip: padding; - background-clip: padding-box; -} - -.dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.dropdown-menu .divider { - *width: 100%; - height: 1px; - margin: 9px 1px; - *margin: -5px 0 5px; - overflow: hidden; - background-color: #e5e5e5; - border-bottom: 1px solid #ffffff; -} - -.dropdown-menu li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: normal; - line-height: 20px; - color: #333333; - white-space: nowrap; -} - -.dropdown-menu li > a:hover, -.dropdown-menu li > a:focus, -.dropdown-submenu:hover > a { - color: #ffffff; - text-decoration: none; - background-color: #0081c2; - background-image: -moz-linear-gradient(top, #0088cc, #0077b3); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0077b3)); - background-image: -webkit-linear-gradient(top, #0088cc, #0077b3); - background-image: -o-linear-gradient(top, #0088cc, #0077b3); - background-image: linear-gradient(to bottom, #0088cc, #0077b3); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0077b3', GradientType=0); -} - -.dropdown-menu .active > a, -.dropdown-menu .active > a:hover { - color: #ffffff; - text-decoration: none; - background-color: #0081c2; - background-image: -moz-linear-gradient(top, #0088cc, #0077b3); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0077b3)); - background-image: -webkit-linear-gradient(top, #0088cc, #0077b3); - background-image: -o-linear-gradient(top, #0088cc, #0077b3); - background-image: linear-gradient(to bottom, #0088cc, #0077b3); - background-repeat: repeat-x; - outline: 0; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0077b3', GradientType=0); -} - -.dropdown-menu .disabled > a, -.dropdown-menu .disabled > a:hover { - color: #999999; -} - -.dropdown-menu .disabled > a:hover { - text-decoration: none; - cursor: default; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.open { - *z-index: 1000; -} - -.open > .dropdown-menu { - display: block; -} - -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} - -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - border-top: 0; - border-bottom: 4px solid #000000; - content: ""; -} - -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 1px; -} - -.dropdown-submenu { - position: relative; -} - -.dropdown-submenu > .dropdown-menu { - top: 0; - left: 100%; - margin-top: -6px; - margin-left: -1px; - -webkit-border-radius: 0 6px 6px 6px; - -moz-border-radius: 0 6px 6px 6px; - border-radius: 0 6px 6px 6px; -} - -.dropdown-submenu:hover > .dropdown-menu { - display: block; -} - -.dropup .dropdown-submenu > .dropdown-menu { - top: auto; - bottom: 0; - margin-top: 0; - margin-bottom: -2px; - -webkit-border-radius: 5px 5px 5px 0; - -moz-border-radius: 5px 5px 5px 0; - border-radius: 5px 5px 5px 0; -} - -.dropdown-submenu > a:after { - display: block; - float: right; - width: 0; - height: 0; - margin-top: 5px; - margin-right: -10px; - border-color: transparent; - border-left-color: #cccccc; - border-style: solid; - border-width: 5px 0 5px 5px; - content: " "; -} - -.dropdown-submenu:hover > a:after { - border-left-color: #ffffff; -} - -.dropdown-submenu.pull-left { - float: none; -} - -.dropdown-submenu.pull-left > .dropdown-menu { - left: -100%; - margin-left: 10px; - -webkit-border-radius: 6px 0 6px 6px; - -moz-border-radius: 6px 0 6px 6px; - border-radius: 6px 0 6px 6px; -} - -.dropdown .dropdown-menu .nav-header { - padding-right: 20px; - padding-left: 20px; -} - -.typeahead { - z-index: 1051; - margin-top: 2px; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #f5f5f5; - border: 1px solid #e3e3e3; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, 0.15); -} - -.well-large { - padding: 24px; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.well-small { - padding: 9px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - -moz-transition: opacity 0.15s linear; - -o-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} - -.fade.in { - opacity: 1; -} - -.collapse { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition: height 0.35s ease; - -moz-transition: height 0.35s ease; - -o-transition: height 0.35s ease; - transition: height 0.35s ease; -} - -.collapse.in { - height: auto; -} - -.close { - float: right; - font-size: 20px; - font-weight: bold; - line-height: 20px; - color: #000000; - text-shadow: 0 1px 0 #ffffff; - opacity: 0.2; - filter: alpha(opacity=20); -} - -.close:hover { - color: #000000; - text-decoration: none; - cursor: pointer; - opacity: 0.4; - filter: alpha(opacity=40); -} - -button.close { - padding: 0; - cursor: pointer; - background: transparent; - border: 0; - -webkit-appearance: none; -} - -.btn { - display: inline-block; - *display: inline; - padding: 4px 12px; - margin-bottom: 0; - *margin-left: .3em; - font-size: 14px; - line-height: 20px; - color: #333333; - text-align: center; - text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75); - vertical-align: middle; - cursor: pointer; - background-color: #f5f5f5; - *background-color: #e6e6e6; - background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#e6e6e6)); - background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6); - background-image: -o-linear-gradient(top, #ffffff, #e6e6e6); - background-image: linear-gradient(to bottom, #ffffff, #e6e6e6); - background-repeat: repeat-x; - border: 1px solid #bbbbbb; - *border: 0; - border-color: #e6e6e6 #e6e6e6 #bfbfbf; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - border-bottom-color: #a2a2a2; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - *zoom: 1; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn:hover, -.btn:active, -.btn.active, -.btn.disabled, -.btn[disabled] { - color: #333333; - background-color: #e6e6e6; - *background-color: #d9d9d9; -} - -.btn:active, -.btn.active { - background-color: #cccccc \9; -} - -.btn:first-child { - *margin-left: 0; -} - -.btn:hover { - color: #333333; - text-decoration: none; - background-position: 0 -15px; - -webkit-transition: background-position 0.1s linear; - -moz-transition: background-position 0.1s linear; - -o-transition: background-position 0.1s linear; - transition: background-position 0.1s linear; -} - -.btn:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -.btn.active, -.btn:active { - background-image: none; - outline: 0; - -webkit-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn.disabled, -.btn[disabled] { - cursor: default; - background-image: none; - opacity: 0.65; - filter: alpha(opacity=65); - -webkit-box-shadow: none; - -moz-box-shadow: none; - box-shadow: none; -} - -.btn-large { - padding: 11px 19px; - font-size: 17.5px; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.btn-large [class^="icon-"], -.btn-large [class*=" icon-"] { - margin-top: 4px; -} - -.btn-small { - padding: 2px 10px; - font-size: 11.9px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.btn-small [class^="icon-"], -.btn-small [class*=" icon-"] { - margin-top: 0; -} - -.btn-mini [class^="icon-"], -.btn-mini [class*=" icon-"] { - margin-top: -1px; -} - -.btn-mini { - padding: 0 6px; - font-size: 10.5px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.btn-block { - display: block; - width: 100%; - padding-right: 0; - padding-left: 0; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -.btn-block + .btn-block { - margin-top: 5px; -} - -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} - -.btn-primary.active, -.btn-warning.active, -.btn-danger.active, -.btn-success.active, -.btn-info.active, -.btn-inverse.active { - color: rgba(255, 255, 255, 0.75); -} - -.btn { - border-color: #c5c5c5; - border-color: rgba(0, 0, 0, 0.15) rgba(0, 0, 0, 0.15) rgba(0, 0, 0, 0.25); -} - -.btn-primary { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #006dcc; - *background-color: #0044cc; - background-image: -moz-linear-gradient(top, #0088cc, #0044cc); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc)); - background-image: -webkit-linear-gradient(top, #0088cc, #0044cc); - background-image: -o-linear-gradient(top, #0088cc, #0044cc); - background-image: linear-gradient(to bottom, #0088cc, #0044cc); - background-repeat: repeat-x; - border-color: #0044cc #0044cc #002a80; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0044cc', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-primary:hover, -.btn-primary:active, -.btn-primary.active, -.btn-primary.disabled, -.btn-primary[disabled] { - color: #ffffff; - background-color: #0044cc; - *background-color: #003bb3; -} - -.btn-primary:active, -.btn-primary.active { - background-color: #003399 \9; -} - -.btn-warning { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #faa732; - *background-color: #f89406; - background-image: -moz-linear-gradient(top, #fbb450, #f89406); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fbb450), to(#f89406)); - background-image: -webkit-linear-gradient(top, #fbb450, #f89406); - background-image: -o-linear-gradient(top, #fbb450, #f89406); - background-image: linear-gradient(to bottom, #fbb450, #f89406); - background-repeat: repeat-x; - border-color: #f89406 #f89406 #ad6704; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450', endColorstr='#fff89406', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-warning:hover, -.btn-warning:active, -.btn-warning.active, -.btn-warning.disabled, -.btn-warning[disabled] { - color: #ffffff; - background-color: #f89406; - *background-color: #df8505; -} - -.btn-warning:active, -.btn-warning.active { - background-color: #c67605 \9; -} - -.btn-danger { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #da4f49; - *background-color: #bd362f; - background-image: -moz-linear-gradient(top, #ee5f5b, #bd362f); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ee5f5b), to(#bd362f)); - background-image: -webkit-linear-gradient(top, #ee5f5b, #bd362f); - background-image: -o-linear-gradient(top, #ee5f5b, #bd362f); - background-image: linear-gradient(to bottom, #ee5f5b, #bd362f); - background-repeat: repeat-x; - border-color: #bd362f #bd362f #802420; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b', endColorstr='#ffbd362f', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-danger:hover, -.btn-danger:active, -.btn-danger.active, -.btn-danger.disabled, -.btn-danger[disabled] { - color: #ffffff; - background-color: #bd362f; - *background-color: #a9302a; -} - -.btn-danger:active, -.btn-danger.active { - background-color: #942a25 \9; -} - -.btn-success { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #5bb75b; - *background-color: #51a351; - background-image: -moz-linear-gradient(top, #62c462, #51a351); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#62c462), to(#51a351)); - background-image: -webkit-linear-gradient(top, #62c462, #51a351); - background-image: -o-linear-gradient(top, #62c462, #51a351); - background-image: linear-gradient(to bottom, #62c462, #51a351); - background-repeat: repeat-x; - border-color: #51a351 #51a351 #387038; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462', endColorstr='#ff51a351', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-success:hover, -.btn-success:active, -.btn-success.active, -.btn-success.disabled, -.btn-success[disabled] { - color: #ffffff; - background-color: #51a351; - *background-color: #499249; -} - -.btn-success:active, -.btn-success.active { - background-color: #408140 \9; -} - -.btn-info { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #49afcd; - *background-color: #2f96b4; - background-image: -moz-linear-gradient(top, #5bc0de, #2f96b4); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#5bc0de), to(#2f96b4)); - background-image: -webkit-linear-gradient(top, #5bc0de, #2f96b4); - background-image: -o-linear-gradient(top, #5bc0de, #2f96b4); - background-image: linear-gradient(to bottom, #5bc0de, #2f96b4); - background-repeat: repeat-x; - border-color: #2f96b4 #2f96b4 #1f6377; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2f96b4', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-info:hover, -.btn-info:active, -.btn-info.active, -.btn-info.disabled, -.btn-info[disabled] { - color: #ffffff; - background-color: #2f96b4; - *background-color: #2a85a0; -} - -.btn-info:active, -.btn-info.active { - background-color: #24748c \9; -} - -.btn-inverse { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #363636; - *background-color: #222222; - background-image: -moz-linear-gradient(top, #444444, #222222); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#444444), to(#222222)); - background-image: -webkit-linear-gradient(top, #444444, #222222); - background-image: -o-linear-gradient(top, #444444, #222222); - background-image: linear-gradient(to bottom, #444444, #222222); - background-repeat: repeat-x; - border-color: #222222 #222222 #000000; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff444444', endColorstr='#ff222222', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-inverse:hover, -.btn-inverse:active, -.btn-inverse.active, -.btn-inverse.disabled, -.btn-inverse[disabled] { - color: #ffffff; - background-color: #222222; - *background-color: #151515; -} - -.btn-inverse:active, -.btn-inverse.active { - background-color: #080808 \9; -} - -button.btn, -input[type="submit"].btn { - *padding-top: 3px; - *padding-bottom: 3px; -} - -button.btn::-moz-focus-inner, -input[type="submit"].btn::-moz-focus-inner { - padding: 0; - border: 0; -} - -button.btn.btn-large, -input[type="submit"].btn.btn-large { - *padding-top: 7px; - *padding-bottom: 7px; -} - -button.btn.btn-small, -input[type="submit"].btn.btn-small { - *padding-top: 3px; - *padding-bottom: 3px; -} - -button.btn.btn-mini, -input[type="submit"].btn.btn-mini { - *padding-top: 1px; - *padding-bottom: 1px; -} - -.btn-link, -.btn-link:active, -.btn-link[disabled] { - background-color: transparent; - background-image: none; - -webkit-box-shadow: none; - -moz-box-shadow: none; - box-shadow: none; -} - -.btn-link { - color: #0088cc; - cursor: pointer; - border-color: transparent; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.btn-link:hover { - color: #005580; - text-decoration: underline; - background-color: transparent; -} - -.btn-link[disabled]:hover { - color: #333333; - text-decoration: none; -} - -.btn-group { - position: relative; - display: inline-block; - *display: inline; - *margin-left: .3em; - font-size: 0; - white-space: nowrap; - vertical-align: middle; - *zoom: 1; -} - -.btn-group:first-child { - *margin-left: 0; -} - -.btn-group + .btn-group { - margin-left: 5px; -} - -.btn-toolbar { - margin-top: 10px; - margin-bottom: 10px; - font-size: 0; -} - -.btn-toolbar > .btn + .btn, -.btn-toolbar > .btn-group + .btn, -.btn-toolbar > .btn + .btn-group { - margin-left: 5px; -} - -.btn-group > .btn { - position: relative; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.btn-group > .btn + .btn { - margin-left: -1px; -} - -.btn-group > .btn, -.btn-group > .dropdown-menu, -.btn-group > .popover { - font-size: 14px; -} - -.btn-group > .btn-mini { - font-size: 10.5px; -} - -.btn-group > .btn-small { - font-size: 11.9px; -} - -.btn-group > .btn-large { - font-size: 17.5px; -} - -.btn-group > .btn:first-child { - margin-left: 0; - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-bottomleft: 4px; - -moz-border-radius-topleft: 4px; -} - -.btn-group > .btn:last-child, -.btn-group > .dropdown-toggle { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-bottomright: 4px; -} - -.btn-group > .btn.large:first-child { - margin-left: 0; - -webkit-border-bottom-left-radius: 6px; - border-bottom-left-radius: 6px; - -webkit-border-top-left-radius: 6px; - border-top-left-radius: 6px; - -moz-border-radius-bottomleft: 6px; - -moz-border-radius-topleft: 6px; -} - -.btn-group > .btn.large:last-child, -.btn-group > .large.dropdown-toggle { - -webkit-border-top-right-radius: 6px; - border-top-right-radius: 6px; - -webkit-border-bottom-right-radius: 6px; - border-bottom-right-radius: 6px; - -moz-border-radius-topright: 6px; - -moz-border-radius-bottomright: 6px; -} - -.btn-group > .btn:hover, -.btn-group > .btn:focus, -.btn-group > .btn:active, -.btn-group > .btn.active { - z-index: 2; -} - -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} - -.btn-group > .btn + .dropdown-toggle { - *padding-top: 5px; - padding-right: 8px; - *padding-bottom: 5px; - padding-left: 8px; - -webkit-box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn-group > .btn-mini + .dropdown-toggle { - *padding-top: 2px; - padding-right: 5px; - *padding-bottom: 2px; - padding-left: 5px; -} - -.btn-group > .btn-small + .dropdown-toggle { - *padding-top: 5px; - *padding-bottom: 4px; -} - -.btn-group > .btn-large + .dropdown-toggle { - *padding-top: 7px; - padding-right: 12px; - *padding-bottom: 7px; - padding-left: 12px; -} - -.btn-group.open .dropdown-toggle { - background-image: none; - -webkit-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn-group.open .btn.dropdown-toggle { - background-color: #e6e6e6; -} - -.btn-group.open .btn-primary.dropdown-toggle { - background-color: #0044cc; -} - -.btn-group.open .btn-warning.dropdown-toggle { - background-color: #f89406; -} - -.btn-group.open .btn-danger.dropdown-toggle { - background-color: #bd362f; -} - -.btn-group.open .btn-success.dropdown-toggle { - background-color: #51a351; -} - -.btn-group.open .btn-info.dropdown-toggle { - background-color: #2f96b4; -} - -.btn-group.open .btn-inverse.dropdown-toggle { - background-color: #222222; -} - -.btn .caret { - margin-top: 8px; - margin-left: 0; -} - -.btn-mini .caret, -.btn-small .caret, -.btn-large .caret { - margin-top: 6px; -} - -.btn-large .caret { - border-top-width: 5px; - border-right-width: 5px; - border-left-width: 5px; -} - -.dropup .btn-large .caret { - border-bottom-width: 5px; -} - -.btn-primary .caret, -.btn-warning .caret, -.btn-danger .caret, -.btn-info .caret, -.btn-success .caret, -.btn-inverse .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.btn-group-vertical { - display: inline-block; - *display: inline; - /* IE7 inline-block hack */ - - *zoom: 1; -} - -.btn-group-vertical > .btn { - display: block; - float: none; - max-width: 100%; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.btn-group-vertical > .btn + .btn { - margin-top: -1px; - margin-left: 0; -} - -.btn-group-vertical > .btn:first-child { - -webkit-border-radius: 4px 4px 0 0; - -moz-border-radius: 4px 4px 0 0; - border-radius: 4px 4px 0 0; -} - -.btn-group-vertical > .btn:last-child { - -webkit-border-radius: 0 0 4px 4px; - -moz-border-radius: 0 0 4px 4px; - border-radius: 0 0 4px 4px; -} - -.btn-group-vertical > .btn-large:first-child { - -webkit-border-radius: 6px 6px 0 0; - -moz-border-radius: 6px 6px 0 0; - border-radius: 6px 6px 0 0; -} - -.btn-group-vertical > .btn-large:last-child { - -webkit-border-radius: 0 0 6px 6px; - -moz-border-radius: 0 0 6px 6px; - border-radius: 0 0 6px 6px; -} - -.alert { - padding: 8px 35px 8px 14px; - margin-bottom: 20px; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); - background-color: #fcf8e3; - border: 1px solid #fbeed5; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.alert, -.alert h4 { - color: #c09853; -} - -.alert h4 { - margin: 0; -} - -.alert .close { - position: relative; - top: -2px; - right: -21px; - line-height: 20px; -} - -.alert-success { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.alert-success h4 { - color: #468847; -} - -.alert-danger, -.alert-error { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.alert-danger h4, -.alert-error h4 { - color: #b94a48; -} - -.alert-info { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.alert-info h4 { - color: #3a87ad; -} - -.alert-block { - padding-top: 14px; - padding-bottom: 14px; -} - -.alert-block > p, -.alert-block > ul { - margin-bottom: 0; -} - -.alert-block p + p { - margin-top: 5px; -} - -.nav { - margin-bottom: 20px; - margin-left: 0; - list-style: none; -} - -.nav > li > a { - display: block; -} - -.nav > li > a:hover { - text-decoration: none; - background-color: #eeeeee; -} - -.nav > li > a > img { - max-width: none; -} - -.nav > .pull-right { - float: right; -} - -.nav-header { - display: block; - padding: 3px 15px; - font-size: 11px; - font-weight: bold; - line-height: 20px; - color: #999999; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); - text-transform: uppercase; -} - -.nav li + .nav-header { - margin-top: 9px; -} - -.nav-list { - padding-right: 15px; - padding-left: 15px; - margin-bottom: 0; -} - -.nav-list > li > a, -.nav-list .nav-header { - margin-right: -15px; - margin-left: -15px; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); -} - -.nav-list > li > a { - padding: 3px 15px; -} - -.nav-list > .active > a, -.nav-list > .active > a:hover { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); - background-color: #0088cc; -} - -.nav-list [class^="icon-"], -.nav-list [class*=" icon-"] { - margin-right: 2px; -} - -.nav-list .divider { - *width: 100%; - height: 1px; - margin: 9px 1px; - *margin: -5px 0 5px; - overflow: hidden; - background-color: #e5e5e5; - border-bottom: 1px solid #ffffff; -} - -.nav-tabs, -.nav-pills { - *zoom: 1; -} - -.nav-tabs:before, -.nav-pills:before, -.nav-tabs:after, -.nav-pills:after { - display: table; - line-height: 0; - content: ""; -} - -.nav-tabs:after, -.nav-pills:after { - clear: both; -} - -.nav-tabs > li, -.nav-pills > li { - float: left; -} - -.nav-tabs > li > a, -.nav-pills > li > a { - padding-right: 12px; - padding-left: 12px; - margin-right: 2px; - line-height: 14px; -} - -.nav-tabs { - border-bottom: 1px solid #ddd; -} - -.nav-tabs > li { - margin-bottom: -1px; -} - -.nav-tabs > li > a { - padding-top: 8px; - padding-bottom: 8px; - line-height: 20px; - border: 1px solid transparent; - -webkit-border-radius: 4px 4px 0 0; - -moz-border-radius: 4px 4px 0 0; - border-radius: 4px 4px 0 0; -} - -.nav-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #dddddd; -} - -.nav-tabs > .active > a, -.nav-tabs > .active > a:hover { - color: #555555; - cursor: default; - background-color: #ffffff; - border: 1px solid #ddd; - border-bottom-color: transparent; -} - -.nav-pills > li > a { - padding-top: 8px; - padding-bottom: 8px; - margin-top: 2px; - margin-bottom: 2px; - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - border-radius: 5px; -} - -.nav-pills > .active > a, -.nav-pills > .active > a:hover { - color: #ffffff; - background-color: #0088cc; -} - -.nav-stacked > li { - float: none; -} - -.nav-stacked > li > a { - margin-right: 0; -} - -.nav-tabs.nav-stacked { - border-bottom: 0; -} - -.nav-tabs.nav-stacked > li > a { - border: 1px solid #ddd; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.nav-tabs.nav-stacked > li:first-child > a { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-topleft: 4px; -} - -.nav-tabs.nav-stacked > li:last-child > a { - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -moz-border-radius-bottomright: 4px; - -moz-border-radius-bottomleft: 4px; -} - -.nav-tabs.nav-stacked > li > a:hover { - z-index: 2; - border-color: #ddd; -} - -.nav-pills.nav-stacked > li > a { - margin-bottom: 3px; -} - -.nav-pills.nav-stacked > li:last-child > a { - margin-bottom: 1px; -} - -.nav-tabs .dropdown-menu { - -webkit-border-radius: 0 0 6px 6px; - -moz-border-radius: 0 0 6px 6px; - border-radius: 0 0 6px 6px; -} - -.nav-pills .dropdown-menu { - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.nav .dropdown-toggle .caret { - margin-top: 6px; - border-top-color: #0088cc; - border-bottom-color: #0088cc; -} - -.nav .dropdown-toggle:hover .caret { - border-top-color: #005580; - border-bottom-color: #005580; -} - -/* move down carets for tabs */ - -.nav-tabs .dropdown-toggle .caret { - margin-top: 8px; -} - -.nav .active .dropdown-toggle .caret { - border-top-color: #fff; - border-bottom-color: #fff; -} - -.nav-tabs .active .dropdown-toggle .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.nav > .dropdown.active > a:hover { - cursor: pointer; -} - -.nav-tabs .open .dropdown-toggle, -.nav-pills .open .dropdown-toggle, -.nav > li.dropdown.open.active > a:hover { - color: #ffffff; - background-color: #999999; - border-color: #999999; -} - -.nav li.dropdown.open .caret, -.nav li.dropdown.open.active .caret, -.nav li.dropdown.open a:hover .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; - opacity: 1; - filter: alpha(opacity=100); -} - -.tabs-stacked .open > a:hover { - border-color: #999999; -} - -.tabbable { - *zoom: 1; -} - -.tabbable:before, -.tabbable:after { - display: table; - line-height: 0; - content: ""; -} - -.tabbable:after { - clear: both; -} - -.tab-content { - overflow: auto; -} - -.tabs-below > .nav-tabs, -.tabs-right > .nav-tabs, -.tabs-left > .nav-tabs { - border-bottom: 0; -} - -.tab-content > .tab-pane, -.pill-content > .pill-pane { - display: none; -} - -.tab-content > .active, -.pill-content > .active { - display: block; -} - -.tabs-below > .nav-tabs { - border-top: 1px solid #ddd; -} - -.tabs-below > .nav-tabs > li { - margin-top: -1px; - margin-bottom: 0; -} - -.tabs-below > .nav-tabs > li > a { - -webkit-border-radius: 0 0 4px 4px; - -moz-border-radius: 0 0 4px 4px; - border-radius: 0 0 4px 4px; -} - -.tabs-below > .nav-tabs > li > a:hover { - border-top-color: #ddd; - border-bottom-color: transparent; -} - -.tabs-below > .nav-tabs > .active > a, -.tabs-below > .nav-tabs > .active > a:hover { - border-color: transparent #ddd #ddd #ddd; -} - -.tabs-left > .nav-tabs > li, -.tabs-right > .nav-tabs > li { - float: none; -} - -.tabs-left > .nav-tabs > li > a, -.tabs-right > .nav-tabs > li > a { - min-width: 74px; - margin-right: 0; - margin-bottom: 3px; -} - -.tabs-left > .nav-tabs { - float: left; - margin-right: 19px; - border-right: 1px solid #ddd; -} - -.tabs-left > .nav-tabs > li > a { - margin-right: -1px; - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.tabs-left > .nav-tabs > li > a:hover { - border-color: #eeeeee #dddddd #eeeeee #eeeeee; -} - -.tabs-left > .nav-tabs .active > a, -.tabs-left > .nav-tabs .active > a:hover { - border-color: #ddd transparent #ddd #ddd; - *border-right-color: #ffffff; -} - -.tabs-right > .nav-tabs { - float: right; - margin-left: 19px; - border-left: 1px solid #ddd; -} - -.tabs-right > .nav-tabs > li > a { - margin-left: -1px; - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.tabs-right > .nav-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #eeeeee #dddddd; -} - -.tabs-right > .nav-tabs .active > a, -.tabs-right > .nav-tabs .active > a:hover { - border-color: #ddd #ddd #ddd transparent; - *border-left-color: #ffffff; -} - -.nav > .disabled > a { - color: #999999; -} - -.nav > .disabled > a:hover { - text-decoration: none; - cursor: default; - background-color: transparent; -} - -.navbar { - *position: relative; - *z-index: 2; - margin-bottom: 20px; - overflow: visible; -} - -.navbar-inner { - min-height: 40px; - padding-right: 20px; - padding-left: 20px; - background-color: #fafafa; - background-image: -moz-linear-gradient(top, #ffffff, #f2f2f2); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#f2f2f2)); - background-image: -webkit-linear-gradient(top, #ffffff, #f2f2f2); - background-image: -o-linear-gradient(top, #ffffff, #f2f2f2); - background-image: linear-gradient(to bottom, #ffffff, #f2f2f2); - background-repeat: repeat-x; - border: 1px solid #d4d4d4; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff2f2f2', GradientType=0); - *zoom: 1; - -webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - -moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); -} - -.navbar-inner:before, -.navbar-inner:after { - display: table; - line-height: 0; - content: ""; -} - -.navbar-inner:after { - clear: both; -} - -.navbar .container { - width: auto; -} - -.nav-collapse.collapse { - height: auto; - overflow: visible; -} - -.navbar .brand { - display: block; - float: left; - padding: 10px 20px 10px; - margin-left: -20px; - font-size: 20px; - font-weight: 200; - color: #777777; - text-shadow: 0 1px 0 #ffffff; -} - -.navbar .brand:hover { - text-decoration: none; -} - -.navbar-text { - margin-bottom: 0; - line-height: 40px; - color: #777777; -} - -.navbar-link { - color: #777777; -} - -.navbar-link:hover { - color: #333333; -} - -.navbar .divider-vertical { - height: 40px; - margin: 0 9px; - border-right: 1px solid #ffffff; - border-left: 1px solid #f2f2f2; -} - -.navbar .btn, -.navbar .btn-group { - margin-top: 5px; -} - -.navbar .btn-group .btn, -.navbar .input-prepend .btn, -.navbar .input-append .btn { - margin-top: 0; -} - -.navbar-form { - margin-bottom: 0; - *zoom: 1; -} - -.navbar-form:before, -.navbar-form:after { - display: table; - line-height: 0; - content: ""; -} - -.navbar-form:after { - clear: both; -} - -.navbar-form input, -.navbar-form select, -.navbar-form .radio, -.navbar-form .checkbox { - margin-top: 5px; -} - -.navbar-form input, -.navbar-form select, -.navbar-form .btn { - display: inline-block; - margin-bottom: 0; -} - -.navbar-form input[type="image"], -.navbar-form input[type="checkbox"], -.navbar-form input[type="radio"] { - margin-top: 3px; -} - -.navbar-form .input-append, -.navbar-form .input-prepend { - margin-top: 5px; - white-space: nowrap; -} - -.navbar-form .input-append input, -.navbar-form .input-prepend input { - margin-top: 0; -} - -.navbar-search { - position: relative; - float: left; - margin-top: 5px; - margin-bottom: 0; -} - -.navbar-search .search-query { - padding: 4px 14px; - margin-bottom: 0; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 13px; - font-weight: normal; - line-height: 1; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - border-radius: 15px; -} - -.navbar-static-top { - position: static; - margin-bottom: 0; -} - -.navbar-static-top .navbar-inner { - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - z-index: 1030; - margin-bottom: 0; -} - -.navbar-fixed-top .navbar-inner, -.navbar-static-top .navbar-inner { - border-width: 0 0 1px; -} - -.navbar-fixed-bottom .navbar-inner { - border-width: 1px 0 0; -} - -.navbar-fixed-top .navbar-inner, -.navbar-fixed-bottom .navbar-inner { - padding-right: 0; - padding-left: 0; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.navbar-static-top .container, -.navbar-fixed-top .container, -.navbar-fixed-bottom .container { - width: 940px; -} - -.navbar-fixed-top { - top: 0; -} - -.navbar-fixed-top .navbar-inner, -.navbar-static-top .navbar-inner { - -webkit-box-shadow: 0 1px 10px rgba(0, 0, 0, 0.1); - -moz-box-shadow: 0 1px 10px rgba(0, 0, 0, 0.1); - box-shadow: 0 1px 10px rgba(0, 0, 0, 0.1); -} - -.navbar-fixed-bottom { - bottom: 0; -} - -.navbar-fixed-bottom .navbar-inner { - -webkit-box-shadow: 0 -1px 10px rgba(0, 0, 0, 0.1); - -moz-box-shadow: 0 -1px 10px rgba(0, 0, 0, 0.1); - box-shadow: 0 -1px 10px rgba(0, 0, 0, 0.1); -} - -.navbar .nav { - position: relative; - left: 0; - display: block; - float: left; - margin: 0 10px 0 0; -} - -.navbar .nav.pull-right { - float: right; - margin-right: 0; -} - -.navbar .nav > li { - float: left; -} - -.navbar .nav > li > a { - float: none; - padding: 10px 15px 10px; - color: #777777; - text-decoration: none; - text-shadow: 0 1px 0 #ffffff; -} - -.navbar .nav .dropdown-toggle .caret { - margin-top: 8px; -} - -.navbar .nav > li > a:focus, -.navbar .nav > li > a:hover { - color: #333333; - text-decoration: none; - background-color: transparent; -} - -.navbar .nav > .active > a, -.navbar .nav > .active > a:hover, -.navbar .nav > .active > a:focus { - color: #555555; - text-decoration: none; - background-color: #e5e5e5; - -webkit-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); - -moz-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); -} - -.navbar .btn-navbar { - display: none; - float: right; - padding: 7px 10px; - margin-right: 5px; - margin-left: 5px; - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #ededed; - *background-color: #e5e5e5; - background-image: -moz-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f2f2f2), to(#e5e5e5)); - background-image: -webkit-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -o-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: linear-gradient(to bottom, #f2f2f2, #e5e5e5); - background-repeat: repeat-x; - border-color: #e5e5e5 #e5e5e5 #bfbfbf; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2', endColorstr='#ffe5e5e5', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); - -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); -} - -.navbar .btn-navbar:hover, -.navbar .btn-navbar:active, -.navbar .btn-navbar.active, -.navbar .btn-navbar.disabled, -.navbar .btn-navbar[disabled] { - color: #ffffff; - background-color: #e5e5e5; - *background-color: #d9d9d9; -} - -.navbar .btn-navbar:active, -.navbar .btn-navbar.active { - background-color: #cccccc \9; -} - -.navbar .btn-navbar .icon-bar { - display: block; - width: 18px; - height: 2px; - background-color: #f5f5f5; - -webkit-border-radius: 1px; - -moz-border-radius: 1px; - border-radius: 1px; - -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25); - -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25); - box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25); -} - -.btn-navbar .icon-bar + .icon-bar { - margin-top: 3px; -} - -.navbar .nav > li > .dropdown-menu:before { - position: absolute; - top: -7px; - left: 9px; - display: inline-block; - border-right: 7px solid transparent; - border-bottom: 7px solid #ccc; - border-left: 7px solid transparent; - border-bottom-color: rgba(0, 0, 0, 0.2); - content: ''; -} - -.navbar .nav > li > .dropdown-menu:after { - position: absolute; - top: -6px; - left: 10px; - display: inline-block; - border-right: 6px solid transparent; - border-bottom: 6px solid #ffffff; - border-left: 6px solid transparent; - content: ''; -} - -.navbar-fixed-bottom .nav > li > .dropdown-menu:before { - top: auto; - bottom: -7px; - border-top: 7px solid #ccc; - border-bottom: 0; - border-top-color: rgba(0, 0, 0, 0.2); -} - -.navbar-fixed-bottom .nav > li > .dropdown-menu:after { - top: auto; - bottom: -6px; - border-top: 6px solid #ffffff; - border-bottom: 0; -} - -.navbar .nav li.dropdown > a:hover .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.navbar .nav li.dropdown.open > .dropdown-toggle, -.navbar .nav li.dropdown.active > .dropdown-toggle, -.navbar .nav li.dropdown.open.active > .dropdown-toggle { - color: #555555; - background-color: #e5e5e5; -} - -.navbar .nav li.dropdown > .dropdown-toggle .caret { - border-top-color: #777777; - border-bottom-color: #777777; -} - -.navbar .nav li.dropdown.open > .dropdown-toggle .caret, -.navbar .nav li.dropdown.active > .dropdown-toggle .caret, -.navbar .nav li.dropdown.open.active > .dropdown-toggle .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.navbar .pull-right > li > .dropdown-menu, -.navbar .nav > li > .dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.navbar .pull-right > li > .dropdown-menu:before, -.navbar .nav > li > .dropdown-menu.pull-right:before { - right: 12px; - left: auto; -} - -.navbar .pull-right > li > .dropdown-menu:after, -.navbar .nav > li > .dropdown-menu.pull-right:after { - right: 13px; - left: auto; -} - -.navbar .pull-right > li > .dropdown-menu .dropdown-menu, -.navbar .nav > li > .dropdown-menu.pull-right .dropdown-menu { - right: 100%; - left: auto; - margin-right: -1px; - margin-left: 0; - -webkit-border-radius: 6px 0 6px 6px; - -moz-border-radius: 6px 0 6px 6px; - border-radius: 6px 0 6px 6px; -} - -.navbar-inverse .navbar-inner { - background-color: #1b1b1b; - background-image: -moz-linear-gradient(top, #222222, #111111); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#222222), to(#111111)); - background-image: -webkit-linear-gradient(top, #222222, #111111); - background-image: -o-linear-gradient(top, #222222, #111111); - background-image: linear-gradient(to bottom, #222222, #111111); - background-repeat: repeat-x; - border-color: #252525; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff111111', GradientType=0); -} - -.navbar-inverse .brand, -.navbar-inverse .nav > li > a { - color: #999999; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} - -.navbar-inverse .brand:hover, -.navbar-inverse .nav > li > a:hover { - color: #ffffff; -} - -.navbar-inverse .brand { - color: #999999; -} - -.navbar-inverse .navbar-text { - color: #999999; -} - -.navbar-inverse .nav > li > a:focus, -.navbar-inverse .nav > li > a:hover { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .nav .active > a, -.navbar-inverse .nav .active > a:hover, -.navbar-inverse .nav .active > a:focus { - color: #ffffff; - background-color: #111111; -} - -.navbar-inverse .navbar-link { - color: #999999; -} - -.navbar-inverse .navbar-link:hover { - color: #ffffff; -} - -.navbar-inverse .divider-vertical { - border-right-color: #222222; - border-left-color: #111111; -} - -.navbar-inverse .nav li.dropdown.open > .dropdown-toggle, -.navbar-inverse .nav li.dropdown.active > .dropdown-toggle, -.navbar-inverse .nav li.dropdown.open.active > .dropdown-toggle { - color: #ffffff; - background-color: #111111; -} - -.navbar-inverse .nav li.dropdown > a:hover .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.navbar-inverse .nav li.dropdown > .dropdown-toggle .caret { - border-top-color: #999999; - border-bottom-color: #999999; -} - -.navbar-inverse .nav li.dropdown.open > .dropdown-toggle .caret, -.navbar-inverse .nav li.dropdown.active > .dropdown-toggle .caret, -.navbar-inverse .nav li.dropdown.open.active > .dropdown-toggle .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.navbar-inverse .navbar-search .search-query { - color: #ffffff; - background-color: #515151; - border-color: #111111; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15); - -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15); - -webkit-transition: none; - -moz-transition: none; - -o-transition: none; - transition: none; -} - -.navbar-inverse .navbar-search .search-query:-moz-placeholder { - color: #cccccc; -} - -.navbar-inverse .navbar-search .search-query:-ms-input-placeholder { - color: #cccccc; -} - -.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder { - color: #cccccc; -} - -.navbar-inverse .navbar-search .search-query:focus, -.navbar-inverse .navbar-search .search-query.focused { - padding: 5px 15px; - color: #333333; - text-shadow: 0 1px 0 #ffffff; - background-color: #ffffff; - border: 0; - outline: 0; - -webkit-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); - -moz-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); - box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); -} - -.navbar-inverse .btn-navbar { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #0e0e0e; - *background-color: #040404; - background-image: -moz-linear-gradient(top, #151515, #040404); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#151515), to(#040404)); - background-image: -webkit-linear-gradient(top, #151515, #040404); - background-image: -o-linear-gradient(top, #151515, #040404); - background-image: linear-gradient(to bottom, #151515, #040404); - background-repeat: repeat-x; - border-color: #040404 #040404 #000000; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff151515', endColorstr='#ff040404', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.navbar-inverse .btn-navbar:hover, -.navbar-inverse .btn-navbar:active, -.navbar-inverse .btn-navbar.active, -.navbar-inverse .btn-navbar.disabled, -.navbar-inverse .btn-navbar[disabled] { - color: #ffffff; - background-color: #040404; - *background-color: #000000; -} - -.navbar-inverse .btn-navbar:active, -.navbar-inverse .btn-navbar.active { - background-color: #000000 \9; -} - -.breadcrumb { - padding: 8px 15px; - margin: 0 0 20px; - list-style: none; - background-color: #f5f5f5; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.breadcrumb > li { - display: inline-block; - *display: inline; - text-shadow: 0 1px 0 #ffffff; - *zoom: 1; -} - -.breadcrumb > li > .divider { - padding: 0 5px; - color: #ccc; -} - -.breadcrumb > .active { - color: #999999; -} - -.pagination { - margin: 20px 0; -} - -.pagination ul { - display: inline-block; - *display: inline; - margin-bottom: 0; - margin-left: 0; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - *zoom: 1; - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.pagination ul > li { - display: inline; -} - -.pagination ul > li > a, -.pagination ul > li > span { - float: left; - padding: 4px 12px; - line-height: 20px; - text-decoration: none; - background-color: #ffffff; - border: 1px solid #dddddd; - border-left-width: 0; -} - -.pagination ul > li > a:hover, -.pagination ul > .active > a, -.pagination ul > .active > span { - background-color: #f5f5f5; -} - -.pagination ul > .active > a, -.pagination ul > .active > span { - color: #999999; - cursor: default; -} - -.pagination ul > .disabled > span, -.pagination ul > .disabled > a, -.pagination ul > .disabled > a:hover { - color: #999999; - cursor: default; - background-color: transparent; -} - -.pagination ul > li:first-child > a, -.pagination ul > li:first-child > span { - border-left-width: 1px; - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-bottomleft: 4px; - -moz-border-radius-topleft: 4px; -} - -.pagination ul > li:last-child > a, -.pagination ul > li:last-child > span { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-bottomright: 4px; -} - -.pagination-centered { - text-align: center; -} - -.pagination-right { - text-align: right; -} - -.pagination-large ul > li > a, -.pagination-large ul > li > span { - padding: 11px 19px; - font-size: 17.5px; -} - -.pagination-large ul > li:first-child > a, -.pagination-large ul > li:first-child > span { - -webkit-border-bottom-left-radius: 6px; - border-bottom-left-radius: 6px; - -webkit-border-top-left-radius: 6px; - border-top-left-radius: 6px; - -moz-border-radius-bottomleft: 6px; - -moz-border-radius-topleft: 6px; -} - -.pagination-large ul > li:last-child > a, -.pagination-large ul > li:last-child > span { - -webkit-border-top-right-radius: 6px; - border-top-right-radius: 6px; - -webkit-border-bottom-right-radius: 6px; - border-bottom-right-radius: 6px; - -moz-border-radius-topright: 6px; - -moz-border-radius-bottomright: 6px; -} - -.pagination-mini ul > li:first-child > a, -.pagination-small ul > li:first-child > a, -.pagination-mini ul > li:first-child > span, -.pagination-small ul > li:first-child > span { - -webkit-border-bottom-left-radius: 3px; - border-bottom-left-radius: 3px; - -webkit-border-top-left-radius: 3px; - border-top-left-radius: 3px; - -moz-border-radius-bottomleft: 3px; - -moz-border-radius-topleft: 3px; -} - -.pagination-mini ul > li:last-child > a, -.pagination-small ul > li:last-child > a, -.pagination-mini ul > li:last-child > span, -.pagination-small ul > li:last-child > span { - -webkit-border-top-right-radius: 3px; - border-top-right-radius: 3px; - -webkit-border-bottom-right-radius: 3px; - border-bottom-right-radius: 3px; - -moz-border-radius-topright: 3px; - -moz-border-radius-bottomright: 3px; -} - -.pagination-small ul > li > a, -.pagination-small ul > li > span { - padding: 2px 10px; - font-size: 11.9px; -} - -.pagination-mini ul > li > a, -.pagination-mini ul > li > span { - padding: 0 6px; - font-size: 10.5px; -} - -.pager { - margin: 20px 0; - text-align: center; - list-style: none; - *zoom: 1; -} - -.pager:before, -.pager:after { - display: table; - line-height: 0; - content: ""; -} - -.pager:after { - clear: both; -} - -.pager li { - display: inline; -} - -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #fff; - border: 1px solid #ddd; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - border-radius: 15px; -} - -.pager li > a:hover { - text-decoration: none; - background-color: #f5f5f5; -} - -.pager .next > a, -.pager .next > span { - float: right; -} - -.pager .previous > a, -.pager .previous > span { - float: left; -} - -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > span { - color: #999999; - cursor: default; - background-color: #fff; -} - -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - background-color: #000000; -} - -.modal-backdrop.fade { - opacity: 0; -} - -.modal-backdrop, -.modal-backdrop.fade.in { - opacity: 0.8; - filter: alpha(opacity=80); -} - -.modal { - position: fixed; - top: 10%; - left: 50%; - z-index: 1050; - width: 560px; - margin-left: -280px; - background-color: #ffffff; - border: 1px solid #999; - border: 1px solid rgba(0, 0, 0, 0.3); - *border: 1px solid #999; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - outline: none; - -webkit-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3); - -moz-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3); - box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3); - -webkit-background-clip: padding-box; - -moz-background-clip: padding-box; - background-clip: padding-box; -} - -.modal.fade { - top: -25%; - -webkit-transition: opacity 0.3s linear, top 0.3s ease-out; - -moz-transition: opacity 0.3s linear, top 0.3s ease-out; - -o-transition: opacity 0.3s linear, top 0.3s ease-out; - transition: opacity 0.3s linear, top 0.3s ease-out; -} - -.modal.fade.in { - top: 10%; -} - -.modal-header { - padding: 9px 15px; - border-bottom: 1px solid #eee; -} - -.modal-header .close { - margin-top: 2px; -} - -.modal-header h3 { - margin: 0; - line-height: 30px; -} - -.modal-body { - position: relative; - max-height: 400px; - padding: 15px; - overflow-y: auto; -} - -.modal-form { - margin-bottom: 0; -} - -.modal-footer { - padding: 14px 15px 15px; - margin-bottom: 0; - text-align: right; - background-color: #f5f5f5; - border-top: 1px solid #ddd; - -webkit-border-radius: 0 0 6px 6px; - -moz-border-radius: 0 0 6px 6px; - border-radius: 0 0 6px 6px; - *zoom: 1; - -webkit-box-shadow: inset 0 1px 0 #ffffff; - -moz-box-shadow: inset 0 1px 0 #ffffff; - box-shadow: inset 0 1px 0 #ffffff; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - line-height: 0; - content: ""; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer .btn + .btn { - margin-bottom: 0; - margin-left: 5px; -} - -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} - -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} - -.tooltip { - position: absolute; - z-index: 1030; - display: block; - padding: 5px; - font-size: 11px; - opacity: 0; - filter: alpha(opacity=0); - visibility: visible; -} - -.tooltip.in { - opacity: 0.8; - filter: alpha(opacity=80); -} - -.tooltip.top { - margin-top: -3px; -} - -.tooltip.right { - margin-left: 3px; -} - -.tooltip.bottom { - margin-top: 3px; -} - -.tooltip.left { - margin-left: -3px; -} - -.tooltip-inner { - max-width: 200px; - padding: 3px 8px; - color: #ffffff; - text-align: center; - text-decoration: none; - background-color: #000000; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-right-color: #000000; - border-width: 5px 5px 5px 0; -} - -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-left-color: #000000; - border-width: 5px 0 5px 5px; -} - -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1010; - display: none; - width: 236px; - padding: 1px; - text-align: left; - white-space: normal; - background-color: #ffffff; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.2); - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -moz-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -webkit-background-clip: padding-box; - -moz-background-clip: padding; - background-clip: padding-box; -} - -.popover.top { - margin-top: -10px; -} - -.popover.right { - margin-left: 10px; -} - -.popover.bottom { - margin-top: 10px; -} - -.popover.left { - margin-left: -10px; -} - -.popover-title { - padding: 8px 14px; - margin: 0; - font-size: 14px; - font-weight: normal; - line-height: 18px; - background-color: #f7f7f7; - border-bottom: 1px solid #ebebeb; - -webkit-border-radius: 5px 5px 0 0; - -moz-border-radius: 5px 5px 0 0; - border-radius: 5px 5px 0 0; -} - -.popover-content { - padding: 9px 14px; -} - -.popover .arrow, -.popover .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.popover .arrow { - border-width: 11px; -} - -.popover .arrow:after { - border-width: 10px; - content: ""; -} - -.popover.top .arrow { - bottom: -11px; - left: 50%; - margin-left: -11px; - border-top-color: #999; - border-top-color: rgba(0, 0, 0, 0.25); - border-bottom-width: 0; -} - -.popover.top .arrow:after { - bottom: 1px; - margin-left: -10px; - border-top-color: #ffffff; - border-bottom-width: 0; -} - -.popover.right .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-right-color: #999; - border-right-color: rgba(0, 0, 0, 0.25); - border-left-width: 0; -} - -.popover.right .arrow:after { - bottom: -10px; - left: 1px; - border-right-color: #ffffff; - border-left-width: 0; -} - -.popover.bottom .arrow { - top: -11px; - left: 50%; - margin-left: -11px; - border-bottom-color: #999; - border-bottom-color: rgba(0, 0, 0, 0.25); - border-top-width: 0; -} - -.popover.bottom .arrow:after { - top: 1px; - margin-left: -10px; - border-bottom-color: #ffffff; - border-top-width: 0; -} - -.popover.left .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-left-color: #999; - border-left-color: rgba(0, 0, 0, 0.25); - border-right-width: 0; -} - -.popover.left .arrow:after { - right: 1px; - bottom: -10px; - border-left-color: #ffffff; - border-right-width: 0; -} - -.thumbnails { - margin-left: -20px; - list-style: none; - *zoom: 1; -} - -.thumbnails:before, -.thumbnails:after { - display: table; - line-height: 0; - content: ""; -} - -.thumbnails:after { - clear: both; -} - -.row-fluid .thumbnails { - margin-left: 0; -} - -.thumbnails > li { - float: left; - margin-bottom: 20px; - margin-left: 20px; -} - -.thumbnail { - display: block; - padding: 4px; - line-height: 20px; - border: 1px solid #ddd; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055); - -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055); - box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055); - -webkit-transition: all 0.2s ease-in-out; - -moz-transition: all 0.2s ease-in-out; - -o-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -a.thumbnail:hover { - border-color: #0088cc; - -webkit-box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25); - -moz-box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25); - box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25); -} - -.thumbnail > img { - display: block; - max-width: 100%; - margin-right: auto; - margin-left: auto; -} - -.thumbnail .caption { - padding: 9px; - color: #555555; -} - -.media, -.media-body { - overflow: hidden; - *overflow: visible; - zoom: 1; -} - -.media, -.media .media { - margin-top: 15px; -} - -.media:first-child { - margin-top: 0; -} - -.media-object { - display: block; -} - -.media-heading { - margin: 0 0 5px; -} - -.media .pull-left { - margin-right: 10px; -} - -.media .pull-right { - margin-left: 10px; -} - -.media-list { - margin-left: 0; - list-style: none; -} - -.label, -.badge { - display: inline-block; - padding: 2px 4px; - font-size: 11.844px; - font-weight: bold; - line-height: 14px; - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - white-space: nowrap; - vertical-align: baseline; - background-color: #999999; -} - -.label { - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.badge { - padding-right: 9px; - padding-left: 9px; - -webkit-border-radius: 9px; - -moz-border-radius: 9px; - border-radius: 9px; -} - -.label:empty, -.badge:empty { - display: none; -} - -a.label:hover, -a.badge:hover { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.label-important, -.badge-important { - background-color: #b94a48; -} - -.label-important[href], -.badge-important[href] { - background-color: #953b39; -} - -.label-warning, -.badge-warning { - background-color: #f89406; -} - -.label-warning[href], -.badge-warning[href] { - background-color: #c67605; -} - -.label-success, -.badge-success { - background-color: #468847; -} - -.label-success[href], -.badge-success[href] { - background-color: #356635; -} - -.label-info, -.badge-info { - background-color: #3a87ad; -} - -.label-info[href], -.badge-info[href] { - background-color: #2d6987; -} - -.label-inverse, -.badge-inverse { - background-color: #333333; -} - -.label-inverse[href], -.badge-inverse[href] { - background-color: #1a1a1a; -} - -.btn .label, -.btn .badge { - position: relative; - top: -1px; -} - -.btn-mini .label, -.btn-mini .badge { - top: 0; -} - -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-moz-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-ms-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-o-keyframes progress-bar-stripes { - from { - background-position: 0 0; - } - to { - background-position: 40px 0; - } -} - -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -.progress { - height: 20px; - margin-bottom: 20px; - overflow: hidden; - background-color: #f7f7f7; - background-image: -moz-linear-gradient(top, #f5f5f5, #f9f9f9); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f5f5f5), to(#f9f9f9)); - background-image: -webkit-linear-gradient(top, #f5f5f5, #f9f9f9); - background-image: -o-linear-gradient(top, #f5f5f5, #f9f9f9); - background-image: linear-gradient(to bottom, #f5f5f5, #f9f9f9); - background-repeat: repeat-x; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#fff9f9f9', GradientType=0); - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); -} - -.progress .bar { - float: left; - width: 0; - height: 100%; - font-size: 12px; - color: #ffffff; - text-align: center; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #0e90d2; - background-image: -moz-linear-gradient(top, #149bdf, #0480be); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#149bdf), to(#0480be)); - background-image: -webkit-linear-gradient(top, #149bdf, #0480be); - background-image: -o-linear-gradient(top, #149bdf, #0480be); - background-image: linear-gradient(to bottom, #149bdf, #0480be); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff149bdf', endColorstr='#ff0480be', GradientType=0); - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -moz-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - -webkit-transition: width 0.6s ease; - -moz-transition: width 0.6s ease; - -o-transition: width 0.6s ease; - transition: width 0.6s ease; -} - -.progress .bar + .bar { - -webkit-box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -moz-box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15); -} - -.progress-striped .bar { - background-color: #149bdf; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - -webkit-background-size: 40px 40px; - -moz-background-size: 40px 40px; - -o-background-size: 40px 40px; - background-size: 40px 40px; -} - -.progress.active .bar { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -moz-animation: progress-bar-stripes 2s linear infinite; - -ms-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} - -.progress-danger .bar, -.progress .bar-danger { - background-color: #dd514c; - background-image: -moz-linear-gradient(top, #ee5f5b, #c43c35); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ee5f5b), to(#c43c35)); - background-image: -webkit-linear-gradient(top, #ee5f5b, #c43c35); - background-image: -o-linear-gradient(top, #ee5f5b, #c43c35); - background-image: linear-gradient(to bottom, #ee5f5b, #c43c35); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b', endColorstr='#ffc43c35', GradientType=0); -} - -.progress-danger.progress-striped .bar, -.progress-striped .bar-danger { - background-color: #ee5f5b; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-success .bar, -.progress .bar-success { - background-color: #5eb95e; - background-image: -moz-linear-gradient(top, #62c462, #57a957); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#62c462), to(#57a957)); - background-image: -webkit-linear-gradient(top, #62c462, #57a957); - background-image: -o-linear-gradient(top, #62c462, #57a957); - background-image: linear-gradient(to bottom, #62c462, #57a957); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462', endColorstr='#ff57a957', GradientType=0); -} - -.progress-success.progress-striped .bar, -.progress-striped .bar-success { - background-color: #62c462; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-info .bar, -.progress .bar-info { - background-color: #4bb1cf; - background-image: -moz-linear-gradient(top, #5bc0de, #339bb9); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#5bc0de), to(#339bb9)); - background-image: -webkit-linear-gradient(top, #5bc0de, #339bb9); - background-image: -o-linear-gradient(top, #5bc0de, #339bb9); - background-image: linear-gradient(to bottom, #5bc0de, #339bb9); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff339bb9', GradientType=0); -} - -.progress-info.progress-striped .bar, -.progress-striped .bar-info { - background-color: #5bc0de; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-warning .bar, -.progress .bar-warning { - background-color: #faa732; - background-image: -moz-linear-gradient(top, #fbb450, #f89406); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fbb450), to(#f89406)); - background-image: -webkit-linear-gradient(top, #fbb450, #f89406); - background-image: -o-linear-gradient(top, #fbb450, #f89406); - background-image: linear-gradient(to bottom, #fbb450, #f89406); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450', endColorstr='#fff89406', GradientType=0); -} - -.progress-warning.progress-striped .bar, -.progress-striped .bar-warning { - background-color: #fbb450; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.accordion { - margin-bottom: 20px; -} - -.accordion-group { - margin-bottom: 2px; - border: 1px solid #e5e5e5; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.accordion-heading { - border-bottom: 0; -} - -.accordion-heading .accordion-toggle { - display: block; - padding: 8px 15px; -} - -.accordion-toggle { - cursor: pointer; -} - -.accordion-inner { - padding: 9px 15px; - border-top: 1px solid #e5e5e5; -} - -.carousel { - position: relative; - margin-bottom: 20px; - line-height: 1; -} - -.carousel-inner { - position: relative; - width: 100%; - overflow: hidden; -} - -.carousel-inner > .item { - position: relative; - display: none; - -webkit-transition: 0.6s ease-in-out left; - -moz-transition: 0.6s ease-in-out left; - -o-transition: 0.6s ease-in-out left; - transition: 0.6s ease-in-out left; -} - -.carousel-inner > .item > img { - display: block; - line-height: 1; -} - -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} - -.carousel-inner > .active { - left: 0; -} - -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} - -.carousel-inner > .next { - left: 100%; -} - -.carousel-inner > .prev { - left: -100%; -} - -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} - -.carousel-inner > .active.left { - left: -100%; -} - -.carousel-inner > .active.right { - left: 100%; -} - -.carousel-control { - position: absolute; - top: 40%; - left: 15px; - width: 40px; - height: 40px; - margin-top: -20px; - font-size: 60px; - font-weight: 100; - line-height: 30px; - color: #ffffff; - text-align: center; - background: #222222; - border: 3px solid #ffffff; - -webkit-border-radius: 23px; - -moz-border-radius: 23px; - border-radius: 23px; - opacity: 0.5; - filter: alpha(opacity=50); -} - -.carousel-control.right { - right: 15px; - left: auto; -} - -.carousel-control:hover { - color: #ffffff; - text-decoration: none; - opacity: 0.9; - filter: alpha(opacity=90); -} - -.carousel-caption { - position: absolute; - right: 0; - bottom: 0; - left: 0; - padding: 15px; - background: #333333; - background: rgba(0, 0, 0, 0.75); -} - -.carousel-caption h4, -.carousel-caption p { - line-height: 20px; - color: #ffffff; -} - -.carousel-caption h4 { - margin: 0 0 5px; -} - -.carousel-caption p { - margin-bottom: 0; -} - -.hero-unit { - padding: 60px; - margin-bottom: 30px; - font-size: 18px; - font-weight: 200; - line-height: 30px; - color: inherit; - background-color: #eeeeee; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.hero-unit h1 { - margin-bottom: 0; - font-size: 60px; - line-height: 1; - letter-spacing: -1px; - color: inherit; -} - -.hero-unit li { - line-height: 30px; -} - -.pull-right { - float: right; -} - -.pull-left { - float: left; -} - -.hide { - display: none; -} - -.show { - display: block; -} - -.invisible { - visibility: hidden; -} - -.affix { - position: fixed; -} diff --git a/clicon/web/static/bootstrap/css/bootstrap.min.css b/clicon/web/static/bootstrap/css/bootstrap.min.css deleted file mode 100644 index 140f731..0000000 --- a/clicon/web/static/bootstrap/css/bootstrap.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Bootstrap v2.2.2 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}a:hover,a:active{outline:0}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{width:auto\9;height:auto;max-width:100%;vertical-align:middle;border:0;-ms-interpolation-mode:bicubic}#map_canvas img,.google-maps img{max-width:none}button,input,select,textarea{margin:0;font-size:100%;vertical-align:middle}button,input{*overflow:visible;line-height:normal}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}label,select,button,input[type="button"],input[type="reset"],input[type="submit"],input[type="radio"],input[type="checkbox"]{cursor:pointer}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}textarea{overflow:auto;vertical-align:top}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}body{margin:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:20px;color:#333;background-color:#fff}a{color:#08c;text-decoration:none}a:hover{color:#005580;text-decoration:underline}.img-rounded{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.img-polaroid{padding:4px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.1);box-shadow:0 1px 3px rgba(0,0,0,0.1)}.img-circle{-webkit-border-radius:500px;-moz-border-radius:500px;border-radius:500px}.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;min-height:1px;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.span12{width:940px}.span11{width:860px}.span10{width:780px}.span9{width:700px}.span8{width:620px}.span7{width:540px}.span6{width:460px}.span5{width:380px}.span4{width:300px}.span3{width:220px}.span2{width:140px}.span1{width:60px}.offset12{margin-left:980px}.offset11{margin-left:900px}.offset10{margin-left:820px}.offset9{margin-left:740px}.offset8{margin-left:660px}.offset7{margin-left:580px}.offset6{margin-left:500px}.offset5{margin-left:420px}.offset4{margin-left:340px}.offset3{margin-left:260px}.offset2{margin-left:180px}.offset1{margin-left:100px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.127659574468085%;*margin-left:2.074468085106383%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .controls-row [class*="span"]+[class*="span"]{margin-left:2.127659574468085%}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.48936170212765%;*width:91.43617021276594%}.row-fluid .span10{width:82.97872340425532%;*width:82.92553191489361%}.row-fluid .span9{width:74.46808510638297%;*width:74.41489361702126%}.row-fluid .span8{width:65.95744680851064%;*width:65.90425531914893%}.row-fluid .span7{width:57.44680851063829%;*width:57.39361702127659%}.row-fluid .span6{width:48.93617021276595%;*width:48.88297872340425%}.row-fluid .span5{width:40.42553191489362%;*width:40.37234042553192%}.row-fluid .span4{width:31.914893617021278%;*width:31.861702127659576%}.row-fluid .span3{width:23.404255319148934%;*width:23.351063829787233%}.row-fluid .span2{width:14.893617021276595%;*width:14.840425531914894%}.row-fluid .span1{width:6.382978723404255%;*width:6.329787234042553%}.row-fluid .offset12{margin-left:104.25531914893617%;*margin-left:104.14893617021275%}.row-fluid .offset12:first-child{margin-left:102.12765957446808%;*margin-left:102.02127659574467%}.row-fluid .offset11{margin-left:95.74468085106382%;*margin-left:95.6382978723404%}.row-fluid .offset11:first-child{margin-left:93.61702127659574%;*margin-left:93.51063829787232%}.row-fluid .offset10{margin-left:87.23404255319149%;*margin-left:87.12765957446807%}.row-fluid .offset10:first-child{margin-left:85.1063829787234%;*margin-left:84.99999999999999%}.row-fluid .offset9{margin-left:78.72340425531914%;*margin-left:78.61702127659572%}.row-fluid .offset9:first-child{margin-left:76.59574468085106%;*margin-left:76.48936170212764%}.row-fluid .offset8{margin-left:70.2127659574468%;*margin-left:70.10638297872339%}.row-fluid .offset8:first-child{margin-left:68.08510638297872%;*margin-left:67.9787234042553%}.row-fluid .offset7{margin-left:61.70212765957446%;*margin-left:61.59574468085106%}.row-fluid .offset7:first-child{margin-left:59.574468085106375%;*margin-left:59.46808510638297%}.row-fluid .offset6{margin-left:53.191489361702125%;*margin-left:53.085106382978715%}.row-fluid .offset6:first-child{margin-left:51.063829787234035%;*margin-left:50.95744680851063%}.row-fluid .offset5{margin-left:44.68085106382979%;*margin-left:44.57446808510638%}.row-fluid .offset5:first-child{margin-left:42.5531914893617%;*margin-left:42.4468085106383%}.row-fluid .offset4{margin-left:36.170212765957444%;*margin-left:36.06382978723405%}.row-fluid .offset4:first-child{margin-left:34.04255319148936%;*margin-left:33.93617021276596%}.row-fluid .offset3{margin-left:27.659574468085104%;*margin-left:27.5531914893617%}.row-fluid .offset3:first-child{margin-left:25.53191489361702%;*margin-left:25.425531914893618%}.row-fluid .offset2{margin-left:19.148936170212764%;*margin-left:19.04255319148936%}.row-fluid .offset2:first-child{margin-left:17.02127659574468%;*margin-left:16.914893617021278%}.row-fluid .offset1{margin-left:10.638297872340425%;*margin-left:10.53191489361702%}.row-fluid .offset1:first-child{margin-left:8.51063829787234%;*margin-left:8.404255319148938%}[class*="span"].hide,.row-fluid [class*="span"].hide{display:none}[class*="span"].pull-right,.row-fluid [class*="span"].pull-right{float:right}.container{margin-right:auto;margin-left:auto;*zoom:1}.container:before,.container:after{display:table;line-height:0;content:""}.container:after{clear:both}.container-fluid{padding-right:20px;padding-left:20px;*zoom:1}.container-fluid:before,.container-fluid:after{display:table;line-height:0;content:""}.container-fluid:after{clear:both}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:21px;font-weight:200;line-height:30px}small{font-size:85%}strong{font-weight:bold}em{font-style:italic}cite{font-style:normal}.muted{color:#999}a.muted:hover{color:#808080}.text-warning{color:#c09853}a.text-warning:hover{color:#a47e3c}.text-error{color:#b94a48}a.text-error:hover{color:#953b39}.text-info{color:#3a87ad}a.text-info:hover{color:#2d6987}.text-success{color:#468847}a.text-success:hover{color:#356635}h1,h2,h3,h4,h5,h6{margin:10px 0;font-family:inherit;font-weight:bold;line-height:20px;color:inherit;text-rendering:optimizelegibility}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{line-height:40px}h1{font-size:38.5px}h2{font-size:31.5px}h3{font-size:24.5px}h4{font-size:17.5px}h5{font-size:14px}h6{font-size:11.9px}h1 small{font-size:24.5px}h2 small{font-size:17.5px}h3 small{font-size:14px}h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:20px 0 30px;border-bottom:1px solid #eee}ul,ol{padding:0;margin:0 0 10px 25px}ul ul,ul ol,ol ol,ol ul{margin-bottom:0}li{line-height:20px}ul.unstyled,ol.unstyled{margin-left:0;list-style:none}ul.inline,ol.inline{margin-left:0;list-style:none}ul.inline>li,ol.inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-bottom:20px}dt,dd{line-height:20px}dt{font-weight:bold}dd{margin-left:10px}.dl-horizontal{*zoom:1}.dl-horizontal:before,.dl-horizontal:after{display:table;line-height:0;content:""}.dl-horizontal:after{clear:both}.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}hr{margin:20px 0;border:0;border-top:1px solid #eee;border-bottom:1px solid #fff}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:0 0 0 15px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{margin-bottom:0;font-size:16px;font-weight:300;line-height:25px}blockquote small{display:block;line-height:20px;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{float:right;padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:20px}code,pre{padding:0 3px 2px;font-family:Monaco,Menlo,Consolas,"Courier New",monospace;font-size:12px;color:#333;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}code{padding:2px 4px;color:#d14;white-space:nowrap;background-color:#f7f7f9;border:1px solid #e1e1e8}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:20px;word-break:break-all;word-wrap:break-word;white-space:pre;white-space:pre-wrap;background-color:#f5f5f5;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;color:inherit;white-space:pre;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}form{margin:0 0 20px}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:40px;color:#333;border:0;border-bottom:1px solid #e5e5e5}legend small{font-size:15px;color:#999}label,input,button,select,textarea{font-size:14px;font-weight:normal;line-height:20px}input,button,select,textarea{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif}label{display:block;margin-bottom:5px}select,textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{display:inline-block;height:20px;padding:4px 6px;margin-bottom:10px;font-size:14px;line-height:20px;color:#555;vertical-align:middle;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}input,textarea,.uneditable-input{width:206px}textarea{height:auto}textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{background-color:#fff;border:1px solid #ccc;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border linear .2s,box-shadow linear .2s;-moz-transition:border linear .2s,box-shadow linear .2s;-o-transition:border linear .2s,box-shadow linear .2s;transition:border linear .2s,box-shadow linear .2s}textarea:focus,input[type="text"]:focus,input[type="password"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus,.uneditable-input:focus{border-color:rgba(82,168,236,0.8);outline:0;outline:thin dotted \9;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6)}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;*margin-top:0;line-height:normal}input[type="file"],input[type="image"],input[type="submit"],input[type="reset"],input[type="button"],input[type="radio"],input[type="checkbox"]{width:auto}select,input[type="file"]{height:30px;*margin-top:4px;line-height:30px}select{width:220px;background-color:#fff;border:1px solid #ccc}select[multiple],select[size]{height:auto}select:focus,input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.uneditable-input,.uneditable-textarea{color:#999;cursor:not-allowed;background-color:#fcfcfc;border-color:#ccc;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);box-shadow:inset 0 1px 2px rgba(0,0,0,0.025)}.uneditable-input{overflow:hidden;white-space:nowrap}.uneditable-textarea{width:auto;height:auto}input:-moz-placeholder,textarea:-moz-placeholder{color:#999}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:#999}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#999}.radio,.checkbox{min-height:20px;padding-left:20px}.radio input[type="radio"],.checkbox input[type="checkbox"]{float:left;margin-left:-20px}.controls>.radio:first-child,.controls>.checkbox:first-child{padding-top:5px}.radio.inline,.checkbox.inline{display:inline-block;padding-top:5px;margin-bottom:0;vertical-align:middle}.radio.inline+.radio.inline,.checkbox.inline+.checkbox.inline{margin-left:10px}.input-mini{width:60px}.input-small{width:90px}.input-medium{width:150px}.input-large{width:210px}.input-xlarge{width:270px}.input-xxlarge{width:530px}input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"]{float:none;margin-left:0}.input-append input[class*="span"],.input-append .uneditable-input[class*="span"],.input-prepend input[class*="span"],.input-prepend .uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"],.row-fluid .input-prepend [class*="span"],.row-fluid .input-append [class*="span"]{display:inline-block}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:926px}input.span11,textarea.span11,.uneditable-input.span11{width:846px}input.span10,textarea.span10,.uneditable-input.span10{width:766px}input.span9,textarea.span9,.uneditable-input.span9{width:686px}input.span8,textarea.span8,.uneditable-input.span8{width:606px}input.span7,textarea.span7,.uneditable-input.span7{width:526px}input.span6,textarea.span6,.uneditable-input.span6{width:446px}input.span5,textarea.span5,.uneditable-input.span5{width:366px}input.span4,textarea.span4,.uneditable-input.span4{width:286px}input.span3,textarea.span3,.uneditable-input.span3{width:206px}input.span2,textarea.span2,.uneditable-input.span2{width:126px}input.span1,textarea.span1,.uneditable-input.span1{width:46px}.controls-row{*zoom:1}.controls-row:before,.controls-row:after{display:table;line-height:0;content:""}.controls-row:after{clear:both}.controls-row [class*="span"],.row-fluid .controls-row [class*="span"]{float:left}.controls-row .checkbox[class*="span"],.controls-row .radio[class*="span"]{padding-top:5px}input[disabled],select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#eee}input[type="radio"][disabled],input[type="checkbox"][disabled],input[type="radio"][readonly],input[type="checkbox"][readonly]{background-color:transparent}.control-group.warning .control-label,.control-group.warning .help-block,.control-group.warning .help-inline{color:#c09853}.control-group.warning .checkbox,.control-group.warning .radio,.control-group.warning input,.control-group.warning select,.control-group.warning textarea{color:#c09853}.control-group.warning input,.control-group.warning select,.control-group.warning textarea{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.warning input:focus,.control-group.warning select:focus,.control-group.warning textarea:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.control-group.warning .input-prepend .add-on,.control-group.warning .input-append .add-on{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.control-group.error .control-label,.control-group.error .help-block,.control-group.error .help-inline{color:#b94a48}.control-group.error .checkbox,.control-group.error .radio,.control-group.error input,.control-group.error select,.control-group.error textarea{color:#b94a48}.control-group.error input,.control-group.error select,.control-group.error textarea{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.error input:focus,.control-group.error select:focus,.control-group.error textarea:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.control-group.error .input-prepend .add-on,.control-group.error .input-append .add-on{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.control-group.success .control-label,.control-group.success .help-block,.control-group.success .help-inline{color:#468847}.control-group.success .checkbox,.control-group.success .radio,.control-group.success input,.control-group.success select,.control-group.success textarea{color:#468847}.control-group.success input,.control-group.success select,.control-group.success textarea{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.success input:focus,.control-group.success select:focus,.control-group.success textarea:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.control-group.success .input-prepend .add-on,.control-group.success .input-append .add-on{color:#468847;background-color:#dff0d8;border-color:#468847}.control-group.info .control-label,.control-group.info .help-block,.control-group.info .help-inline{color:#3a87ad}.control-group.info .checkbox,.control-group.info .radio,.control-group.info input,.control-group.info select,.control-group.info textarea{color:#3a87ad}.control-group.info input,.control-group.info select,.control-group.info textarea{border-color:#3a87ad;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.info input:focus,.control-group.info select:focus,.control-group.info textarea:focus{border-color:#2d6987;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3}.control-group.info .input-prepend .add-on,.control-group.info .input-append .add-on{color:#3a87ad;background-color:#d9edf7;border-color:#3a87ad}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#b94a48;border-color:#ee5f5b}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#e9322d;-webkit-box-shadow:0 0 6px #f8b9b7;-moz-box-shadow:0 0 6px #f8b9b7;box-shadow:0 0 6px #f8b9b7}.form-actions{padding:19px 20px 20px;margin-top:20px;margin-bottom:20px;background-color:#f5f5f5;border-top:1px solid #e5e5e5;*zoom:1}.form-actions:before,.form-actions:after{display:table;line-height:0;content:""}.form-actions:after{clear:both}.help-block,.help-inline{color:#595959}.help-block{display:block;margin-bottom:10px}.help-inline{display:inline-block;*display:inline;padding-left:5px;vertical-align:middle;*zoom:1}.input-append,.input-prepend{margin-bottom:5px;font-size:0;white-space:nowrap}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input,.input-append .dropdown-menu,.input-prepend .dropdown-menu{font-size:14px}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input{position:relative;margin-bottom:0;*margin-left:0;vertical-align:top;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-append input:focus,.input-prepend input:focus,.input-append select:focus,.input-prepend select:focus,.input-append .uneditable-input:focus,.input-prepend .uneditable-input:focus{z-index:2}.input-append .add-on,.input-prepend .add-on{display:inline-block;width:auto;height:20px;min-width:16px;padding:4px 5px;font-size:14px;font-weight:normal;line-height:20px;text-align:center;text-shadow:0 1px 0 #fff;background-color:#eee;border:1px solid #ccc}.input-append .add-on,.input-prepend .add-on,.input-append .btn,.input-prepend .btn,.input-append .btn-group>.dropdown-toggle,.input-prepend .btn-group>.dropdown-toggle{vertical-align:top;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-append .active,.input-prepend .active{background-color:#a9dba9;border-color:#46a546}.input-prepend .add-on,.input-prepend .btn{margin-right:-1px}.input-prepend .add-on:first-child,.input-prepend .btn:first-child{-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-append input,.input-append select,.input-append .uneditable-input{-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-append input+.btn-group .btn:last-child,.input-append select+.btn-group .btn:last-child,.input-append .uneditable-input+.btn-group .btn:last-child{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-append .add-on,.input-append .btn,.input-append .btn-group{margin-left:-1px}.input-append .add-on:last-child,.input-append .btn:last-child,.input-append .btn-group:last-child>.dropdown-toggle{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append input,.input-prepend.input-append select,.input-prepend.input-append .uneditable-input{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-prepend.input-append input+.btn-group .btn,.input-prepend.input-append select+.btn-group .btn,.input-prepend.input-append .uneditable-input+.btn-group .btn{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append .add-on:first-child,.input-prepend.input-append .btn:first-child{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-prepend.input-append .add-on:last-child,.input-prepend.input-append .btn:last-child{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append .btn-group:first-child{margin-left:0}input.search-query{padding-right:14px;padding-right:4px \9;padding-left:14px;padding-left:4px \9;margin-bottom:0;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.form-search .input-append .search-query,.form-search .input-prepend .search-query{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.form-search .input-append .search-query{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search .input-append .btn{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .search-query{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .btn{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search input,.form-inline input,.form-horizontal input,.form-search textarea,.form-inline textarea,.form-horizontal textarea,.form-search select,.form-inline select,.form-horizontal select,.form-search .help-inline,.form-inline .help-inline,.form-horizontal .help-inline,.form-search .uneditable-input,.form-inline .uneditable-input,.form-horizontal .uneditable-input,.form-search .input-prepend,.form-inline .input-prepend,.form-horizontal .input-prepend,.form-search .input-append,.form-inline .input-append,.form-horizontal .input-append{display:inline-block;*display:inline;margin-bottom:0;vertical-align:middle;*zoom:1}.form-search .hide,.form-inline .hide,.form-horizontal .hide{display:none}.form-search label,.form-inline label,.form-search .btn-group,.form-inline .btn-group{display:inline-block}.form-search .input-append,.form-inline .input-append,.form-search .input-prepend,.form-inline .input-prepend{margin-bottom:0}.form-search .radio,.form-search .checkbox,.form-inline .radio,.form-inline .checkbox{padding-left:0;margin-bottom:0;vertical-align:middle}.form-search .radio input[type="radio"],.form-search .checkbox input[type="checkbox"],.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:left;margin-right:3px;margin-left:0}.control-group{margin-bottom:10px}legend+.control-group{margin-top:20px;-webkit-margin-top-collapse:separate}.form-horizontal .control-group{margin-bottom:20px;*zoom:1}.form-horizontal .control-group:before,.form-horizontal .control-group:after{display:table;line-height:0;content:""}.form-horizontal .control-group:after{clear:both}.form-horizontal .control-label{float:left;width:160px;padding-top:5px;text-align:right}.form-horizontal .controls{*display:inline-block;*padding-left:20px;margin-left:180px;*margin-left:0}.form-horizontal .controls:first-child{*padding-left:180px}.form-horizontal .help-block{margin-bottom:0}.form-horizontal input+.help-block,.form-horizontal select+.help-block,.form-horizontal textarea+.help-block,.form-horizontal .uneditable-input+.help-block,.form-horizontal .input-prepend+.help-block,.form-horizontal .input-append+.help-block{margin-top:10px}.form-horizontal .form-actions{padding-left:180px}table{max-width:100%;background-color:transparent;border-collapse:collapse;border-spacing:0}.table{width:100%;margin-bottom:20px}.table th,.table td{padding:8px;line-height:20px;text-align:left;vertical-align:top;border-top:1px solid #ddd}.table th{font-weight:bold}.table thead th{vertical-align:bottom}.table caption+thead tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child th,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child th,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed th,.table-condensed td{padding:4px 5px}.table-bordered{border:1px solid #ddd;border-collapse:separate;*border-collapse:collapse;border-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.table-bordered th,.table-bordered td{border-left:1px solid #ddd}.table-bordered caption+thead tr:first-child th,.table-bordered caption+tbody tr:first-child th,.table-bordered caption+tbody tr:first-child td,.table-bordered colgroup+thead tr:first-child th,.table-bordered colgroup+tbody tr:first-child th,.table-bordered colgroup+tbody tr:first-child td,.table-bordered thead:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child td{border-top:0}.table-bordered thead:first-child tr:first-child>th:first-child,.table-bordered tbody:first-child tr:first-child>td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered thead:first-child tr:first-child>th:last-child,.table-bordered tbody:first-child tr:first-child>td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-bordered thead:last-child tr:last-child>th:first-child,.table-bordered tbody:last-child tr:last-child>td:first-child,.table-bordered tfoot:last-child tr:last-child>td:first-child{-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomleft:4px}.table-bordered thead:last-child tr:last-child>th:last-child,.table-bordered tbody:last-child tr:last-child>td:last-child,.table-bordered tfoot:last-child tr:last-child>td:last-child{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-bottomright:4px}.table-bordered tfoot+tbody:last-child tr:last-child td:first-child{-webkit-border-bottom-left-radius:0;border-bottom-left-radius:0;-moz-border-radius-bottomleft:0}.table-bordered tfoot+tbody:last-child tr:last-child td:last-child{-webkit-border-bottom-right-radius:0;border-bottom-right-radius:0;-moz-border-radius-bottomright:0}.table-bordered caption+thead tr:first-child th:first-child,.table-bordered caption+tbody tr:first-child td:first-child,.table-bordered colgroup+thead tr:first-child th:first-child,.table-bordered colgroup+tbody tr:first-child td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered caption+thead tr:first-child th:last-child,.table-bordered caption+tbody tr:first-child td:last-child,.table-bordered colgroup+thead tr:first-child th:last-child,.table-bordered colgroup+tbody tr:first-child td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-striped tbody>tr:nth-child(odd)>td,.table-striped tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover tbody tr:hover td,.table-hover tbody tr:hover th{background-color:#f5f5f5}table td[class*="span"],table th[class*="span"],.row-fluid table td[class*="span"],.row-fluid table th[class*="span"]{display:table-cell;float:none;margin-left:0}.table td.span1,.table th.span1{float:none;width:44px;margin-left:0}.table td.span2,.table th.span2{float:none;width:124px;margin-left:0}.table td.span3,.table th.span3{float:none;width:204px;margin-left:0}.table td.span4,.table th.span4{float:none;width:284px;margin-left:0}.table td.span5,.table th.span5{float:none;width:364px;margin-left:0}.table td.span6,.table th.span6{float:none;width:444px;margin-left:0}.table td.span7,.table th.span7{float:none;width:524px;margin-left:0}.table td.span8,.table th.span8{float:none;width:604px;margin-left:0}.table td.span9,.table th.span9{float:none;width:684px;margin-left:0}.table td.span10,.table th.span10{float:none;width:764px;margin-left:0}.table td.span11,.table th.span11{float:none;width:844px;margin-left:0}.table td.span12,.table th.span12{float:none;width:924px;margin-left:0}.table tbody tr.success td{background-color:#dff0d8}.table tbody tr.error td{background-color:#f2dede}.table tbody tr.warning td{background-color:#fcf8e3}.table tbody tr.info td{background-color:#d9edf7}.table-hover tbody tr.success:hover td{background-color:#d0e9c6}.table-hover tbody tr.error:hover td{background-color:#ebcccc}.table-hover tbody tr.warning:hover td{background-color:#faf2cc}.table-hover tbody tr.info:hover td{background-color:#c4e3f3}[class^="icon-"],[class*=" icon-"]{display:inline-block;width:14px;height:14px;margin-top:1px;*margin-right:.3em;line-height:14px;vertical-align:text-top;background-image:url("../img/glyphicons-halflings.png");background-position:14px 14px;background-repeat:no-repeat}.icon-white,.nav-pills>.active>a>[class^="icon-"],.nav-pills>.active>a>[class*=" icon-"],.nav-list>.active>a>[class^="icon-"],.nav-list>.active>a>[class*=" icon-"],.navbar-inverse .nav>.active>a>[class^="icon-"],.navbar-inverse .nav>.active>a>[class*=" icon-"],.dropdown-menu>li>a:hover>[class^="icon-"],.dropdown-menu>li>a:hover>[class*=" icon-"],.dropdown-menu>.active>a>[class^="icon-"],.dropdown-menu>.active>a>[class*=" icon-"],.dropdown-submenu:hover>a>[class^="icon-"],.dropdown-submenu:hover>a>[class*=" icon-"]{background-image:url("../img/glyphicons-halflings-white.png")}.icon-glass{background-position:0 0}.icon-music{background-position:-24px 0}.icon-search{background-position:-48px 0}.icon-envelope{background-position:-72px 0}.icon-heart{background-position:-96px 0}.icon-star{background-position:-120px 0}.icon-star-empty{background-position:-144px 0}.icon-user{background-position:-168px 0}.icon-film{background-position:-192px 0}.icon-th-large{background-position:-216px 0}.icon-th{background-position:-240px 0}.icon-th-list{background-position:-264px 0}.icon-ok{background-position:-288px 0}.icon-remove{background-position:-312px 0}.icon-zoom-in{background-position:-336px 0}.icon-zoom-out{background-position:-360px 0}.icon-off{background-position:-384px 0}.icon-signal{background-position:-408px 0}.icon-cog{background-position:-432px 0}.icon-trash{background-position:-456px 0}.icon-home{background-position:0 -24px}.icon-file{background-position:-24px -24px}.icon-time{background-position:-48px -24px}.icon-road{background-position:-72px -24px}.icon-download-alt{background-position:-96px -24px}.icon-download{background-position:-120px -24px}.icon-upload{background-position:-144px -24px}.icon-inbox{background-position:-168px -24px}.icon-play-circle{background-position:-192px -24px}.icon-repeat{background-position:-216px -24px}.icon-refresh{background-position:-240px -24px}.icon-list-alt{background-position:-264px -24px}.icon-lock{background-position:-287px -24px}.icon-flag{background-position:-312px -24px}.icon-headphones{background-position:-336px -24px}.icon-volume-off{background-position:-360px -24px}.icon-volume-down{background-position:-384px -24px}.icon-volume-up{background-position:-408px -24px}.icon-qrcode{background-position:-432px -24px}.icon-barcode{background-position:-456px -24px}.icon-tag{background-position:0 -48px}.icon-tags{background-position:-25px -48px}.icon-book{background-position:-48px -48px}.icon-bookmark{background-position:-72px -48px}.icon-print{background-position:-96px -48px}.icon-camera{background-position:-120px -48px}.icon-font{background-position:-144px -48px}.icon-bold{background-position:-167px -48px}.icon-italic{background-position:-192px -48px}.icon-text-height{background-position:-216px -48px}.icon-text-width{background-position:-240px -48px}.icon-align-left{background-position:-264px -48px}.icon-align-center{background-position:-288px -48px}.icon-align-right{background-position:-312px -48px}.icon-align-justify{background-position:-336px -48px}.icon-list{background-position:-360px -48px}.icon-indent-left{background-position:-384px -48px}.icon-indent-right{background-position:-408px -48px}.icon-facetime-video{background-position:-432px -48px}.icon-picture{background-position:-456px -48px}.icon-pencil{background-position:0 -72px}.icon-map-marker{background-position:-24px -72px}.icon-adjust{background-position:-48px -72px}.icon-tint{background-position:-72px -72px}.icon-edit{background-position:-96px -72px}.icon-share{background-position:-120px -72px}.icon-check{background-position:-144px -72px}.icon-move{background-position:-168px -72px}.icon-step-backward{background-position:-192px -72px}.icon-fast-backward{background-position:-216px -72px}.icon-backward{background-position:-240px -72px}.icon-play{background-position:-264px -72px}.icon-pause{background-position:-288px -72px}.icon-stop{background-position:-312px -72px}.icon-forward{background-position:-336px -72px}.icon-fast-forward{background-position:-360px -72px}.icon-step-forward{background-position:-384px -72px}.icon-eject{background-position:-408px -72px}.icon-chevron-left{background-position:-432px -72px}.icon-chevron-right{background-position:-456px -72px}.icon-plus-sign{background-position:0 -96px}.icon-minus-sign{background-position:-24px -96px}.icon-remove-sign{background-position:-48px -96px}.icon-ok-sign{background-position:-72px -96px}.icon-question-sign{background-position:-96px -96px}.icon-info-sign{background-position:-120px -96px}.icon-screenshot{background-position:-144px -96px}.icon-remove-circle{background-position:-168px -96px}.icon-ok-circle{background-position:-192px -96px}.icon-ban-circle{background-position:-216px -96px}.icon-arrow-left{background-position:-240px -96px}.icon-arrow-right{background-position:-264px -96px}.icon-arrow-up{background-position:-289px -96px}.icon-arrow-down{background-position:-312px -96px}.icon-share-alt{background-position:-336px -96px}.icon-resize-full{background-position:-360px -96px}.icon-resize-small{background-position:-384px -96px}.icon-plus{background-position:-408px -96px}.icon-minus{background-position:-433px -96px}.icon-asterisk{background-position:-456px -96px}.icon-exclamation-sign{background-position:0 -120px}.icon-gift{background-position:-24px -120px}.icon-leaf{background-position:-48px -120px}.icon-fire{background-position:-72px -120px}.icon-eye-open{background-position:-96px -120px}.icon-eye-close{background-position:-120px -120px}.icon-warning-sign{background-position:-144px -120px}.icon-plane{background-position:-168px -120px}.icon-calendar{background-position:-192px -120px}.icon-random{width:16px;background-position:-216px -120px}.icon-comment{background-position:-240px -120px}.icon-magnet{background-position:-264px -120px}.icon-chevron-up{background-position:-288px -120px}.icon-chevron-down{background-position:-313px -119px}.icon-retweet{background-position:-336px -120px}.icon-shopping-cart{background-position:-360px -120px}.icon-folder-close{background-position:-384px -120px}.icon-folder-open{width:16px;background-position:-408px -120px}.icon-resize-vertical{background-position:-432px -119px}.icon-resize-horizontal{background-position:-456px -118px}.icon-hdd{background-position:0 -144px}.icon-bullhorn{background-position:-24px -144px}.icon-bell{background-position:-48px -144px}.icon-certificate{background-position:-72px -144px}.icon-thumbs-up{background-position:-96px -144px}.icon-thumbs-down{background-position:-120px -144px}.icon-hand-right{background-position:-144px -144px}.icon-hand-left{background-position:-168px -144px}.icon-hand-up{background-position:-192px -144px}.icon-hand-down{background-position:-216px -144px}.icon-circle-arrow-right{background-position:-240px -144px}.icon-circle-arrow-left{background-position:-264px -144px}.icon-circle-arrow-up{background-position:-288px -144px}.icon-circle-arrow-down{background-position:-312px -144px}.icon-globe{background-position:-336px -144px}.icon-wrench{background-position:-360px -144px}.icon-tasks{background-position:-384px -144px}.icon-filter{background-position:-408px -144px}.icon-briefcase{background-position:-432px -144px}.icon-fullscreen{background-position:-456px -144px}.dropup,.dropdown{position:relative}.dropdown-toggle{*margin-bottom:-3px}.dropdown-toggle:active,.open .dropdown-toggle{outline:0}.caret{display:inline-block;width:0;height:0;vertical-align:top;border-top:4px solid #000;border-right:4px solid transparent;border-left:4px solid transparent;content:""}.dropdown .caret{margin-top:8px;margin-left:2px}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);*border-right-width:2px;*border-bottom-width:2px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.dropdown-menu li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:20px;color:#333;white-space:nowrap}.dropdown-menu li>a:hover,.dropdown-menu li>a:focus,.dropdown-submenu:hover>a{color:#fff;text-decoration:none;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu .active>a,.dropdown-menu .active>a:hover{color:#fff;text-decoration:none;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;outline:0;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu .disabled>a,.dropdown-menu .disabled>a:hover{color:#999}.dropdown-menu .disabled>a:hover{text-decoration:none;cursor:default;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open{*z-index:1000}.open>.dropdown-menu{display:block}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}.dropdown-submenu{position:relative}.dropdown-submenu>.dropdown-menu{top:0;left:100%;margin-top:-6px;margin-left:-1px;-webkit-border-radius:0 6px 6px 6px;-moz-border-radius:0 6px 6px 6px;border-radius:0 6px 6px 6px}.dropdown-submenu:hover>.dropdown-menu{display:block}.dropup .dropdown-submenu>.dropdown-menu{top:auto;bottom:0;margin-top:0;margin-bottom:-2px;-webkit-border-radius:5px 5px 5px 0;-moz-border-radius:5px 5px 5px 0;border-radius:5px 5px 5px 0}.dropdown-submenu>a:after{display:block;float:right;width:0;height:0;margin-top:5px;margin-right:-10px;border-color:transparent;border-left-color:#ccc;border-style:solid;border-width:5px 0 5px 5px;content:" "}.dropdown-submenu:hover>a:after{border-left-color:#fff}.dropdown-submenu.pull-left{float:none}.dropdown-submenu.pull-left>.dropdown-menu{left:-100%;margin-left:10px;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.dropdown .dropdown-menu .nav-header{padding-right:20px;padding-left:20px}.typeahead{z-index:1051;margin-top:2px;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-large{padding:24px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.well-small{padding:9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.fade{opacity:0;-webkit-transition:opacity .15s linear;-moz-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;-moz-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.collapse.in{height:auto}.close{float:right;font-size:20px;font-weight:bold;line-height:20px;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover{color:#000;text-decoration:none;cursor:pointer;opacity:.4;filter:alpha(opacity=40)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.btn{display:inline-block;*display:inline;padding:4px 12px;margin-bottom:0;*margin-left:.3em;font-size:14px;line-height:20px;color:#333;text-align:center;text-shadow:0 1px 1px rgba(255,255,255,0.75);vertical-align:middle;cursor:pointer;background-color:#f5f5f5;*background-color:#e6e6e6;background-image:-moz-linear-gradient(top,#fff,#e6e6e6);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,#e6e6e6);background-image:-o-linear-gradient(top,#fff,#e6e6e6);background-image:linear-gradient(to bottom,#fff,#e6e6e6);background-repeat:repeat-x;border:1px solid #bbb;*border:0;border-color:#e6e6e6 #e6e6e6 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);border-bottom-color:#a2a2a2;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);*zoom:1;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn:hover,.btn:active,.btn.active,.btn.disabled,.btn[disabled]{color:#333;background-color:#e6e6e6;*background-color:#d9d9d9}.btn:active,.btn.active{background-color:#ccc \9}.btn:first-child{*margin-left:0}.btn:hover{color:#333;text-decoration:none;background-position:0 -15px;-webkit-transition:background-position .1s linear;-moz-transition:background-position .1s linear;-o-transition:background-position .1s linear;transition:background-position .1s linear}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn.disabled,.btn[disabled]{cursor:default;background-image:none;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-large{padding:11px 19px;font-size:17.5px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.btn-large [class^="icon-"],.btn-large [class*=" icon-"]{margin-top:4px}.btn-small{padding:2px 10px;font-size:11.9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.btn-small [class^="icon-"],.btn-small [class*=" icon-"]{margin-top:0}.btn-mini [class^="icon-"],.btn-mini [class*=" icon-"]{margin-top:-1px}.btn-mini{padding:0 6px;font-size:10.5px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.btn-primary.active,.btn-warning.active,.btn-danger.active,.btn-success.active,.btn-info.active,.btn-inverse.active{color:rgba(255,255,255,0.75)}.btn{border-color:#c5c5c5;border-color:rgba(0,0,0,0.15) rgba(0,0,0,0.15) rgba(0,0,0,0.25)}.btn-primary{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#006dcc;*background-color:#04c;background-image:-moz-linear-gradient(top,#08c,#04c);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#04c));background-image:-webkit-linear-gradient(top,#08c,#04c);background-image:-o-linear-gradient(top,#08c,#04c);background-image:linear-gradient(to bottom,#08c,#04c);background-repeat:repeat-x;border-color:#04c #04c #002a80;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0044cc',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-primary:hover,.btn-primary:active,.btn-primary.active,.btn-primary.disabled,.btn-primary[disabled]{color:#fff;background-color:#04c;*background-color:#003bb3}.btn-primary:active,.btn-primary.active{background-color:#039 \9}.btn-warning{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#faa732;*background-color:#f89406;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;border-color:#f89406 #f89406 #ad6704;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-warning:hover,.btn-warning:active,.btn-warning.active,.btn-warning.disabled,.btn-warning[disabled]{color:#fff;background-color:#f89406;*background-color:#df8505}.btn-warning:active,.btn-warning.active{background-color:#c67605 \9}.btn-danger{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#da4f49;*background-color:#bd362f;background-image:-moz-linear-gradient(top,#ee5f5b,#bd362f);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#bd362f));background-image:-webkit-linear-gradient(top,#ee5f5b,#bd362f);background-image:-o-linear-gradient(top,#ee5f5b,#bd362f);background-image:linear-gradient(to bottom,#ee5f5b,#bd362f);background-repeat:repeat-x;border-color:#bd362f #bd362f #802420;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffbd362f',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-danger:hover,.btn-danger:active,.btn-danger.active,.btn-danger.disabled,.btn-danger[disabled]{color:#fff;background-color:#bd362f;*background-color:#a9302a}.btn-danger:active,.btn-danger.active{background-color:#942a25 \9}.btn-success{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#5bb75b;*background-color:#51a351;background-image:-moz-linear-gradient(top,#62c462,#51a351);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#51a351));background-image:-webkit-linear-gradient(top,#62c462,#51a351);background-image:-o-linear-gradient(top,#62c462,#51a351);background-image:linear-gradient(to bottom,#62c462,#51a351);background-repeat:repeat-x;border-color:#51a351 #51a351 #387038;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff51a351',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-success:hover,.btn-success:active,.btn-success.active,.btn-success.disabled,.btn-success[disabled]{color:#fff;background-color:#51a351;*background-color:#499249}.btn-success:active,.btn-success.active{background-color:#408140 \9}.btn-info{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#49afcd;*background-color:#2f96b4;background-image:-moz-linear-gradient(top,#5bc0de,#2f96b4);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#2f96b4));background-image:-webkit-linear-gradient(top,#5bc0de,#2f96b4);background-image:-o-linear-gradient(top,#5bc0de,#2f96b4);background-image:linear-gradient(to bottom,#5bc0de,#2f96b4);background-repeat:repeat-x;border-color:#2f96b4 #2f96b4 #1f6377;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff2f96b4',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-info:hover,.btn-info:active,.btn-info.active,.btn-info.disabled,.btn-info[disabled]{color:#fff;background-color:#2f96b4;*background-color:#2a85a0}.btn-info:active,.btn-info.active{background-color:#24748c \9}.btn-inverse{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#363636;*background-color:#222;background-image:-moz-linear-gradient(top,#444,#222);background-image:-webkit-gradient(linear,0 0,0 100%,from(#444),to(#222));background-image:-webkit-linear-gradient(top,#444,#222);background-image:-o-linear-gradient(top,#444,#222);background-image:linear-gradient(to bottom,#444,#222);background-repeat:repeat-x;border-color:#222 #222 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff444444',endColorstr='#ff222222',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-inverse:hover,.btn-inverse:active,.btn-inverse.active,.btn-inverse.disabled,.btn-inverse[disabled]{color:#fff;background-color:#222;*background-color:#151515}.btn-inverse:active,.btn-inverse.active{background-color:#080808 \9}button.btn,input[type="submit"].btn{*padding-top:3px;*padding-bottom:3px}button.btn::-moz-focus-inner,input[type="submit"].btn::-moz-focus-inner{padding:0;border:0}button.btn.btn-large,input[type="submit"].btn.btn-large{*padding-top:7px;*padding-bottom:7px}button.btn.btn-small,input[type="submit"].btn.btn-small{*padding-top:3px;*padding-bottom:3px}button.btn.btn-mini,input[type="submit"].btn.btn-mini{*padding-top:1px;*padding-bottom:1px}.btn-link,.btn-link:active,.btn-link[disabled]{background-color:transparent;background-image:none;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-link{color:#08c;cursor:pointer;border-color:transparent;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-link:hover{color:#005580;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover{color:#333;text-decoration:none}.btn-group{position:relative;display:inline-block;*display:inline;*margin-left:.3em;font-size:0;white-space:nowrap;vertical-align:middle;*zoom:1}.btn-group:first-child{*margin-left:0}.btn-group+.btn-group{margin-left:5px}.btn-toolbar{margin-top:10px;margin-bottom:10px;font-size:0}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group{margin-left:5px}.btn-group>.btn{position:relative;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group>.btn+.btn{margin-left:-1px}.btn-group>.btn,.btn-group>.dropdown-menu,.btn-group>.popover{font-size:14px}.btn-group>.btn-mini{font-size:10.5px}.btn-group>.btn-small{font-size:11.9px}.btn-group>.btn-large{font-size:17.5px}.btn-group>.btn:first-child{margin-left:0;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.btn-group>.btn:last-child,.btn-group>.dropdown-toggle{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.btn-group>.btn.large:first-child{margin-left:0;-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.btn-group>.btn.large:last-child,.btn-group>.large.dropdown-toggle{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active{z-index:2}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{*padding-top:5px;padding-right:8px;*padding-bottom:5px;padding-left:8px;-webkit-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn-group>.btn-mini+.dropdown-toggle{*padding-top:2px;padding-right:5px;*padding-bottom:2px;padding-left:5px}.btn-group>.btn-small+.dropdown-toggle{*padding-top:5px;*padding-bottom:4px}.btn-group>.btn-large+.dropdown-toggle{*padding-top:7px;padding-right:12px;*padding-bottom:7px;padding-left:12px}.btn-group.open .dropdown-toggle{background-image:none;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn-group.open .btn.dropdown-toggle{background-color:#e6e6e6}.btn-group.open .btn-primary.dropdown-toggle{background-color:#04c}.btn-group.open .btn-warning.dropdown-toggle{background-color:#f89406}.btn-group.open .btn-danger.dropdown-toggle{background-color:#bd362f}.btn-group.open .btn-success.dropdown-toggle{background-color:#51a351}.btn-group.open .btn-info.dropdown-toggle{background-color:#2f96b4}.btn-group.open .btn-inverse.dropdown-toggle{background-color:#222}.btn .caret{margin-top:8px;margin-left:0}.btn-mini .caret,.btn-small .caret,.btn-large .caret{margin-top:6px}.btn-large .caret{border-top-width:5px;border-right-width:5px;border-left-width:5px}.dropup .btn-large .caret{border-bottom-width:5px}.btn-primary .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret,.btn-success .caret,.btn-inverse .caret{border-top-color:#fff;border-bottom-color:#fff}.btn-group-vertical{display:inline-block;*display:inline;*zoom:1}.btn-group-vertical>.btn{display:block;float:none;max-width:100%;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group-vertical>.btn+.btn{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:first-child{-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.btn-group-vertical>.btn:last-child{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.btn-group-vertical>.btn-large:first-child{-webkit-border-radius:6px 6px 0 0;-moz-border-radius:6px 6px 0 0;border-radius:6px 6px 0 0}.btn-group-vertical>.btn-large:last-child{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.alert{padding:8px 35px 8px 14px;margin-bottom:20px;text-shadow:0 1px 0 rgba(255,255,255,0.5);background-color:#fcf8e3;border:1px solid #fbeed5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.alert,.alert h4{color:#c09853}.alert h4{margin:0}.alert .close{position:relative;top:-2px;right:-21px;line-height:20px}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success h4{color:#468847}.alert-danger,.alert-error{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger h4,.alert-error h4{color:#b94a48}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info h4{color:#3a87ad}.alert-block{padding-top:14px;padding-bottom:14px}.alert-block>p,.alert-block>ul{margin-bottom:0}.alert-block p+p{margin-top:5px}.nav{margin-bottom:20px;margin-left:0;list-style:none}.nav>li>a{display:block}.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li>a>img{max-width:none}.nav>.pull-right{float:right}.nav-header{display:block;padding:3px 15px;font-size:11px;font-weight:bold;line-height:20px;color:#999;text-shadow:0 1px 0 rgba(255,255,255,0.5);text-transform:uppercase}.nav li+.nav-header{margin-top:9px}.nav-list{padding-right:15px;padding-left:15px;margin-bottom:0}.nav-list>li>a,.nav-list .nav-header{margin-right:-15px;margin-left:-15px;text-shadow:0 1px 0 rgba(255,255,255,0.5)}.nav-list>li>a{padding:3px 15px}.nav-list>.active>a,.nav-list>.active>a:hover{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.2);background-color:#08c}.nav-list [class^="icon-"],.nav-list [class*=" icon-"]{margin-right:2px}.nav-list .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.nav-tabs,.nav-pills{*zoom:1}.nav-tabs:before,.nav-pills:before,.nav-tabs:after,.nav-pills:after{display:table;line-height:0;content:""}.nav-tabs:after,.nav-pills:after{clear:both}.nav-tabs>li,.nav-pills>li{float:left}.nav-tabs>li>a,.nav-pills>li>a{padding-right:12px;padding-left:12px;margin-right:2px;line-height:14px}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{margin-bottom:-1px}.nav-tabs>li>a{padding-top:8px;padding-bottom:8px;line-height:20px;border:1px solid transparent;-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>.active>a,.nav-tabs>.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-pills>li>a{padding-top:8px;padding-bottom:8px;margin-top:2px;margin-bottom:2px;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.nav-pills>.active>a,.nav-pills>.active>a:hover{color:#fff;background-color:#08c}.nav-stacked>li{float:none}.nav-stacked>li>a{margin-right:0}.nav-tabs.nav-stacked{border-bottom:0}.nav-tabs.nav-stacked>li>a{border:1px solid #ddd;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.nav-tabs.nav-stacked>li:first-child>a{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-topleft:4px}.nav-tabs.nav-stacked>li:last-child>a{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomright:4px;-moz-border-radius-bottomleft:4px}.nav-tabs.nav-stacked>li>a:hover{z-index:2;border-color:#ddd}.nav-pills.nav-stacked>li>a{margin-bottom:3px}.nav-pills.nav-stacked>li:last-child>a{margin-bottom:1px}.nav-tabs .dropdown-menu{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.nav-pills .dropdown-menu{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.nav .dropdown-toggle .caret{margin-top:6px;border-top-color:#08c;border-bottom-color:#08c}.nav .dropdown-toggle:hover .caret{border-top-color:#005580;border-bottom-color:#005580}.nav-tabs .dropdown-toggle .caret{margin-top:8px}.nav .active .dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.nav-tabs .active .dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.nav>.dropdown.active>a:hover{cursor:pointer}.nav-tabs .open .dropdown-toggle,.nav-pills .open .dropdown-toggle,.nav>li.dropdown.open.active>a:hover{color:#fff;background-color:#999;border-color:#999}.nav li.dropdown.open .caret,.nav li.dropdown.open.active .caret,.nav li.dropdown.open a:hover .caret{border-top-color:#fff;border-bottom-color:#fff;opacity:1;filter:alpha(opacity=100)}.tabs-stacked .open>a:hover{border-color:#999}.tabbable{*zoom:1}.tabbable:before,.tabbable:after{display:table;line-height:0;content:""}.tabbable:after{clear:both}.tab-content{overflow:auto}.tabs-below>.nav-tabs,.tabs-right>.nav-tabs,.tabs-left>.nav-tabs{border-bottom:0}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.tabs-below>.nav-tabs{border-top:1px solid #ddd}.tabs-below>.nav-tabs>li{margin-top:-1px;margin-bottom:0}.tabs-below>.nav-tabs>li>a{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.tabs-below>.nav-tabs>li>a:hover{border-top-color:#ddd;border-bottom-color:transparent}.tabs-below>.nav-tabs>.active>a,.tabs-below>.nav-tabs>.active>a:hover{border-color:transparent #ddd #ddd #ddd}.tabs-left>.nav-tabs>li,.tabs-right>.nav-tabs>li{float:none}.tabs-left>.nav-tabs>li>a,.tabs-right>.nav-tabs>li>a{min-width:74px;margin-right:0;margin-bottom:3px}.tabs-left>.nav-tabs{float:left;margin-right:19px;border-right:1px solid #ddd}.tabs-left>.nav-tabs>li>a{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.tabs-left>.nav-tabs>li>a:hover{border-color:#eee #ddd #eee #eee}.tabs-left>.nav-tabs .active>a,.tabs-left>.nav-tabs .active>a:hover{border-color:#ddd transparent #ddd #ddd;*border-right-color:#fff}.tabs-right>.nav-tabs{float:right;margin-left:19px;border-left:1px solid #ddd}.tabs-right>.nav-tabs>li>a{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.tabs-right>.nav-tabs>li>a:hover{border-color:#eee #eee #eee #ddd}.tabs-right>.nav-tabs .active>a,.tabs-right>.nav-tabs .active>a:hover{border-color:#ddd #ddd #ddd transparent;*border-left-color:#fff}.nav>.disabled>a{color:#999}.nav>.disabled>a:hover{text-decoration:none;cursor:default;background-color:transparent}.navbar{*position:relative;*z-index:2;margin-bottom:20px;overflow:visible}.navbar-inner{min-height:40px;padding-right:20px;padding-left:20px;background-color:#fafafa;background-image:-moz-linear-gradient(top,#fff,#f2f2f2);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#f2f2f2));background-image:-webkit-linear-gradient(top,#fff,#f2f2f2);background-image:-o-linear-gradient(top,#fff,#f2f2f2);background-image:linear-gradient(to bottom,#fff,#f2f2f2);background-repeat:repeat-x;border:1px solid #d4d4d4;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff2f2f2',GradientType=0);*zoom:1;-webkit-box-shadow:0 1px 4px rgba(0,0,0,0.065);-moz-box-shadow:0 1px 4px rgba(0,0,0,0.065);box-shadow:0 1px 4px rgba(0,0,0,0.065)}.navbar-inner:before,.navbar-inner:after{display:table;line-height:0;content:""}.navbar-inner:after{clear:both}.navbar .container{width:auto}.nav-collapse.collapse{height:auto;overflow:visible}.navbar .brand{display:block;float:left;padding:10px 20px 10px;margin-left:-20px;font-size:20px;font-weight:200;color:#777;text-shadow:0 1px 0 #fff}.navbar .brand:hover{text-decoration:none}.navbar-text{margin-bottom:0;line-height:40px;color:#777}.navbar-link{color:#777}.navbar-link:hover{color:#333}.navbar .divider-vertical{height:40px;margin:0 9px;border-right:1px solid #fff;border-left:1px solid #f2f2f2}.navbar .btn,.navbar .btn-group{margin-top:5px}.navbar .btn-group .btn,.navbar .input-prepend .btn,.navbar .input-append .btn{margin-top:0}.navbar-form{margin-bottom:0;*zoom:1}.navbar-form:before,.navbar-form:after{display:table;line-height:0;content:""}.navbar-form:after{clear:both}.navbar-form input,.navbar-form select,.navbar-form .radio,.navbar-form .checkbox{margin-top:5px}.navbar-form input,.navbar-form select,.navbar-form .btn{display:inline-block;margin-bottom:0}.navbar-form input[type="image"],.navbar-form input[type="checkbox"],.navbar-form input[type="radio"]{margin-top:3px}.navbar-form .input-append,.navbar-form .input-prepend{margin-top:5px;white-space:nowrap}.navbar-form .input-append input,.navbar-form .input-prepend input{margin-top:0}.navbar-search{position:relative;float:left;margin-top:5px;margin-bottom:0}.navbar-search .search-query{padding:4px 14px;margin-bottom:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:13px;font-weight:normal;line-height:1;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.navbar-static-top{position:static;margin-bottom:0}.navbar-static-top .navbar-inner{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;margin-bottom:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{border-width:0 0 1px}.navbar-fixed-bottom .navbar-inner{border-width:1px 0 0}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding-right:0;padding-left:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.navbar-fixed-top{top:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{-webkit-box-shadow:0 1px 10px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 10px rgba(0,0,0,0.1);box-shadow:0 1px 10px rgba(0,0,0,0.1)}.navbar-fixed-bottom{bottom:0}.navbar-fixed-bottom .navbar-inner{-webkit-box-shadow:0 -1px 10px rgba(0,0,0,0.1);-moz-box-shadow:0 -1px 10px rgba(0,0,0,0.1);box-shadow:0 -1px 10px rgba(0,0,0,0.1)}.navbar .nav{position:relative;left:0;display:block;float:left;margin:0 10px 0 0}.navbar .nav.pull-right{float:right;margin-right:0}.navbar .nav>li{float:left}.navbar .nav>li>a{float:none;padding:10px 15px 10px;color:#777;text-decoration:none;text-shadow:0 1px 0 #fff}.navbar .nav .dropdown-toggle .caret{margin-top:8px}.navbar .nav>li>a:focus,.navbar .nav>li>a:hover{color:#333;text-decoration:none;background-color:transparent}.navbar .nav>.active>a,.navbar .nav>.active>a:hover,.navbar .nav>.active>a:focus{color:#555;text-decoration:none;background-color:#e5e5e5;-webkit-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);-moz-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);box-shadow:inset 0 3px 8px rgba(0,0,0,0.125)}.navbar .btn-navbar{display:none;float:right;padding:7px 10px;margin-right:5px;margin-left:5px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#ededed;*background-color:#e5e5e5;background-image:-moz-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f2f2f2),to(#e5e5e5));background-image:-webkit-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-o-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:linear-gradient(to bottom,#f2f2f2,#e5e5e5);background-repeat:repeat-x;border-color:#e5e5e5 #e5e5e5 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2',endColorstr='#ffe5e5e5',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075)}.navbar .btn-navbar:hover,.navbar .btn-navbar:active,.navbar .btn-navbar.active,.navbar .btn-navbar.disabled,.navbar .btn-navbar[disabled]{color:#fff;background-color:#e5e5e5;*background-color:#d9d9d9}.navbar .btn-navbar:active,.navbar .btn-navbar.active{background-color:#ccc \9}.navbar .btn-navbar .icon-bar{display:block;width:18px;height:2px;background-color:#f5f5f5;-webkit-border-radius:1px;-moz-border-radius:1px;border-radius:1px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.25);-moz-box-shadow:0 1px 0 rgba(0,0,0,0.25);box-shadow:0 1px 0 rgba(0,0,0,0.25)}.btn-navbar .icon-bar+.icon-bar{margin-top:3px}.navbar .nav>li>.dropdown-menu:before{position:absolute;top:-7px;left:9px;display:inline-block;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-left:7px solid transparent;border-bottom-color:rgba(0,0,0,0.2);content:''}.navbar .nav>li>.dropdown-menu:after{position:absolute;top:-6px;left:10px;display:inline-block;border-right:6px solid transparent;border-bottom:6px solid #fff;border-left:6px solid transparent;content:''}.navbar-fixed-bottom .nav>li>.dropdown-menu:before{top:auto;bottom:-7px;border-top:7px solid #ccc;border-bottom:0;border-top-color:rgba(0,0,0,0.2)}.navbar-fixed-bottom .nav>li>.dropdown-menu:after{top:auto;bottom:-6px;border-top:6px solid #fff;border-bottom:0}.navbar .nav li.dropdown>a:hover .caret{border-top-color:#555;border-bottom-color:#555}.navbar .nav li.dropdown.open>.dropdown-toggle,.navbar .nav li.dropdown.active>.dropdown-toggle,.navbar .nav li.dropdown.open.active>.dropdown-toggle{color:#555;background-color:#e5e5e5}.navbar .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#777;border-bottom-color:#777}.navbar .nav li.dropdown.open>.dropdown-toggle .caret,.navbar .nav li.dropdown.active>.dropdown-toggle .caret,.navbar .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.navbar .pull-right>li>.dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar .pull-right>li>.dropdown-menu:before,.navbar .nav>li>.dropdown-menu.pull-right:before{right:12px;left:auto}.navbar .pull-right>li>.dropdown-menu:after,.navbar .nav>li>.dropdown-menu.pull-right:after{right:13px;left:auto}.navbar .pull-right>li>.dropdown-menu .dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right .dropdown-menu{right:100%;left:auto;margin-right:-1px;margin-left:0;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.navbar-inverse .navbar-inner{background-color:#1b1b1b;background-image:-moz-linear-gradient(top,#222,#111);background-image:-webkit-gradient(linear,0 0,0 100%,from(#222),to(#111));background-image:-webkit-linear-gradient(top,#222,#111);background-image:-o-linear-gradient(top,#222,#111);background-image:linear-gradient(to bottom,#222,#111);background-repeat:repeat-x;border-color:#252525;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222',endColorstr='#ff111111',GradientType=0)}.navbar-inverse .brand,.navbar-inverse .nav>li>a{color:#999;text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-inverse .brand:hover,.navbar-inverse .nav>li>a:hover{color:#fff}.navbar-inverse .brand{color:#999}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .nav>li>a:focus,.navbar-inverse .nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .nav .active>a,.navbar-inverse .nav .active>a:hover,.navbar-inverse .nav .active>a:focus{color:#fff;background-color:#111}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .divider-vertical{border-right-color:#222;border-left-color:#111}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle{color:#fff;background-color:#111}.navbar-inverse .nav li.dropdown>a:hover .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-search .search-query{color:#fff;background-color:#515151;border-color:#111;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-webkit-transition:none;-moz-transition:none;-o-transition:none;transition:none}.navbar-inverse .navbar-search .search-query:-moz-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:-ms-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:focus,.navbar-inverse .navbar-search .search-query.focused{padding:5px 15px;color:#333;text-shadow:0 1px 0 #fff;background-color:#fff;border:0;outline:0;-webkit-box-shadow:0 0 3px rgba(0,0,0,0.15);-moz-box-shadow:0 0 3px rgba(0,0,0,0.15);box-shadow:0 0 3px rgba(0,0,0,0.15)}.navbar-inverse .btn-navbar{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e0e0e;*background-color:#040404;background-image:-moz-linear-gradient(top,#151515,#040404);background-image:-webkit-gradient(linear,0 0,0 100%,from(#151515),to(#040404));background-image:-webkit-linear-gradient(top,#151515,#040404);background-image:-o-linear-gradient(top,#151515,#040404);background-image:linear-gradient(to bottom,#151515,#040404);background-repeat:repeat-x;border-color:#040404 #040404 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff151515',endColorstr='#ff040404',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.navbar-inverse .btn-navbar:hover,.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active,.navbar-inverse .btn-navbar.disabled,.navbar-inverse .btn-navbar[disabled]{color:#fff;background-color:#040404;*background-color:#000}.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active{background-color:#000 \9}.breadcrumb{padding:8px 15px;margin:0 0 20px;list-style:none;background-color:#f5f5f5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.breadcrumb>li{display:inline-block;*display:inline;text-shadow:0 1px 0 #fff;*zoom:1}.breadcrumb>li>.divider{padding:0 5px;color:#ccc}.breadcrumb>.active{color:#999}.pagination{margin:20px 0}.pagination ul{display:inline-block;*display:inline;margin-bottom:0;margin-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;*zoom:1;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.pagination ul>li{display:inline}.pagination ul>li>a,.pagination ul>li>span{float:left;padding:4px 12px;line-height:20px;text-decoration:none;background-color:#fff;border:1px solid #ddd;border-left-width:0}.pagination ul>li>a:hover,.pagination ul>.active>a,.pagination ul>.active>span{background-color:#f5f5f5}.pagination ul>.active>a,.pagination ul>.active>span{color:#999;cursor:default}.pagination ul>.disabled>span,.pagination ul>.disabled>a,.pagination ul>.disabled>a:hover{color:#999;cursor:default;background-color:transparent}.pagination ul>li:first-child>a,.pagination ul>li:first-child>span{border-left-width:1px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.pagination ul>li:last-child>a,.pagination ul>li:last-child>span{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.pagination-centered{text-align:center}.pagination-right{text-align:right}.pagination-large ul>li>a,.pagination-large ul>li>span{padding:11px 19px;font-size:17.5px}.pagination-large ul>li:first-child>a,.pagination-large ul>li:first-child>span{-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.pagination-large ul>li:last-child>a,.pagination-large ul>li:last-child>span{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.pagination-mini ul>li:first-child>a,.pagination-small ul>li:first-child>a,.pagination-mini ul>li:first-child>span,.pagination-small ul>li:first-child>span{-webkit-border-bottom-left-radius:3px;border-bottom-left-radius:3px;-webkit-border-top-left-radius:3px;border-top-left-radius:3px;-moz-border-radius-bottomleft:3px;-moz-border-radius-topleft:3px}.pagination-mini ul>li:last-child>a,.pagination-small ul>li:last-child>a,.pagination-mini ul>li:last-child>span,.pagination-small ul>li:last-child>span{-webkit-border-top-right-radius:3px;border-top-right-radius:3px;-webkit-border-bottom-right-radius:3px;border-bottom-right-radius:3px;-moz-border-radius-topright:3px;-moz-border-radius-bottomright:3px}.pagination-small ul>li>a,.pagination-small ul>li>span{padding:2px 10px;font-size:11.9px}.pagination-mini ul>li>a,.pagination-mini ul>li>span{padding:0 6px;font-size:10.5px}.pager{margin:20px 0;text-align:center;list-style:none;*zoom:1}.pager:before,.pager:after{display:table;line-height:0;content:""}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.pager li>a:hover{text-decoration:none;background-color:#f5f5f5}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>span{color:#999;cursor:default;background-color:#fff}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop,.modal-backdrop.fade.in{opacity:.8;filter:alpha(opacity=80)}.modal{position:fixed;top:10%;left:50%;z-index:1050;width:560px;margin-left:-280px;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.3);*border:1px solid #999;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;outline:0;-webkit-box-shadow:0 3px 7px rgba(0,0,0,0.3);-moz-box-shadow:0 3px 7px rgba(0,0,0,0.3);box-shadow:0 3px 7px rgba(0,0,0,0.3);-webkit-background-clip:padding-box;-moz-background-clip:padding-box;background-clip:padding-box}.modal.fade{top:-25%;-webkit-transition:opacity .3s linear,top .3s ease-out;-moz-transition:opacity .3s linear,top .3s ease-out;-o-transition:opacity .3s linear,top .3s ease-out;transition:opacity .3s linear,top .3s ease-out}.modal.fade.in{top:10%}.modal-header{padding:9px 15px;border-bottom:1px solid #eee}.modal-header .close{margin-top:2px}.modal-header h3{margin:0;line-height:30px}.modal-body{position:relative;max-height:400px;padding:15px;overflow-y:auto}.modal-form{margin-bottom:0}.modal-footer{padding:14px 15px 15px;margin-bottom:0;text-align:right;background-color:#f5f5f5;border-top:1px solid #ddd;-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px;*zoom:1;-webkit-box-shadow:inset 0 1px 0 #fff;-moz-box-shadow:inset 0 1px 0 #fff;box-shadow:inset 0 1px 0 #fff}.modal-footer:before,.modal-footer:after{display:table;line-height:0;content:""}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.tooltip{position:absolute;z-index:1030;display:block;padding:5px;font-size:11px;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.8;filter:alpha(opacity=80)}.tooltip.top{margin-top:-3px}.tooltip.right{margin-left:3px}.tooltip.bottom{margin-top:3px}.tooltip.left{margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;width:236px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;-webkit-border-radius:5px 5px 0 0;-moz-border-radius:5px 5px 0 0;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0}.thumbnails{margin-left:-20px;list-style:none;*zoom:1}.thumbnails:before,.thumbnails:after{display:table;line-height:0;content:""}.thumbnails:after{clear:both}.row-fluid .thumbnails{margin-left:0}.thumbnails>li{float:left;margin-bottom:20px;margin-left:20px}.thumbnail{display:block;padding:4px;line-height:20px;border:1px solid #ddd;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.055);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.055);box-shadow:0 1px 3px rgba(0,0,0,0.055);-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}a.thumbnail:hover{border-color:#08c;-webkit-box-shadow:0 1px 4px rgba(0,105,214,0.25);-moz-box-shadow:0 1px 4px rgba(0,105,214,0.25);box-shadow:0 1px 4px rgba(0,105,214,0.25)}.thumbnail>img{display:block;max-width:100%;margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#555}.media,.media-body{overflow:hidden;*overflow:visible;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media .pull-left{margin-right:10px}.media .pull-right{margin-left:10px}.media-list{margin-left:0;list-style:none}.label,.badge{display:inline-block;padding:2px 4px;font-size:11.844px;font-weight:bold;line-height:14px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);white-space:nowrap;vertical-align:baseline;background-color:#999}.label{-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.badge{padding-right:9px;padding-left:9px;-webkit-border-radius:9px;-moz-border-radius:9px;border-radius:9px}.label:empty,.badge:empty{display:none}a.label:hover,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.label-important,.badge-important{background-color:#b94a48}.label-important[href],.badge-important[href]{background-color:#953b39}.label-warning,.badge-warning{background-color:#f89406}.label-warning[href],.badge-warning[href]{background-color:#c67605}.label-success,.badge-success{background-color:#468847}.label-success[href],.badge-success[href]{background-color:#356635}.label-info,.badge-info{background-color:#3a87ad}.label-info[href],.badge-info[href]{background-color:#2d6987}.label-inverse,.badge-inverse{background-color:#333}.label-inverse[href],.badge-inverse[href]{background-color:#1a1a1a}.btn .label,.btn .badge{position:relative;top:-1px}.btn-mini .label,.btn-mini .badge{top:0}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-ms-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f7f7f7;background-image:-moz-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f5f5f5),to(#f9f9f9));background-image:-webkit-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-o-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:linear-gradient(to bottom,#f5f5f5,#f9f9f9);background-repeat:repeat-x;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress .bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e90d2;background-image:-moz-linear-gradient(top,#149bdf,#0480be);background-image:-webkit-gradient(linear,0 0,0 100%,from(#149bdf),to(#0480be));background-image:-webkit-linear-gradient(top,#149bdf,#0480be);background-image:-o-linear-gradient(top,#149bdf,#0480be);background-image:linear-gradient(to bottom,#149bdf,#0480be);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff149bdf',endColorstr='#ff0480be',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-transition:width .6s ease;-moz-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress .bar+.bar{-webkit-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15)}.progress-striped .bar{background-color:#149bdf;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;-moz-background-size:40px 40px;-o-background-size:40px 40px;background-size:40px 40px}.progress.active .bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-danger .bar,.progress .bar-danger{background-color:#dd514c;background-image:-moz-linear-gradient(top,#ee5f5b,#c43c35);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#c43c35));background-image:-webkit-linear-gradient(top,#ee5f5b,#c43c35);background-image:-o-linear-gradient(top,#ee5f5b,#c43c35);background-image:linear-gradient(to bottom,#ee5f5b,#c43c35);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffc43c35',GradientType=0)}.progress-danger.progress-striped .bar,.progress-striped .bar-danger{background-color:#ee5f5b;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-success .bar,.progress .bar-success{background-color:#5eb95e;background-image:-moz-linear-gradient(top,#62c462,#57a957);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#57a957));background-image:-webkit-linear-gradient(top,#62c462,#57a957);background-image:-o-linear-gradient(top,#62c462,#57a957);background-image:linear-gradient(to bottom,#62c462,#57a957);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff57a957',GradientType=0)}.progress-success.progress-striped .bar,.progress-striped .bar-success{background-color:#62c462;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-info .bar,.progress .bar-info{background-color:#4bb1cf;background-image:-moz-linear-gradient(top,#5bc0de,#339bb9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#339bb9));background-image:-webkit-linear-gradient(top,#5bc0de,#339bb9);background-image:-o-linear-gradient(top,#5bc0de,#339bb9);background-image:linear-gradient(to bottom,#5bc0de,#339bb9);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff339bb9',GradientType=0)}.progress-info.progress-striped .bar,.progress-striped .bar-info{background-color:#5bc0de;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-warning .bar,.progress .bar-warning{background-color:#faa732;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0)}.progress-warning.progress-striped .bar,.progress-striped .bar-warning{background-color:#fbb450;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.accordion{margin-bottom:20px}.accordion-group{margin-bottom:2px;border:1px solid #e5e5e5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.accordion-heading{border-bottom:0}.accordion-heading .accordion-toggle{display:block;padding:8px 15px}.accordion-toggle{cursor:pointer}.accordion-inner{padding:9px 15px;border-top:1px solid #e5e5e5}.carousel{position:relative;margin-bottom:20px;line-height:1}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-moz-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img{display:block;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:40%;left:15px;width:40px;height:40px;margin-top:-20px;font-size:60px;font-weight:100;line-height:30px;color:#fff;text-align:center;background:#222;border:3px solid #fff;-webkit-border-radius:23px;-moz-border-radius:23px;border-radius:23px;opacity:.5;filter:alpha(opacity=50)}.carousel-control.right{right:15px;left:auto}.carousel-control:hover{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-caption{position:absolute;right:0;bottom:0;left:0;padding:15px;background:#333;background:rgba(0,0,0,0.75)}.carousel-caption h4,.carousel-caption p{line-height:20px;color:#fff}.carousel-caption h4{margin:0 0 5px}.carousel-caption p{margin-bottom:0}.hero-unit{padding:60px;margin-bottom:30px;font-size:18px;font-weight:200;line-height:30px;color:inherit;background-color:#eee;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.hero-unit h1{margin-bottom:0;font-size:60px;line-height:1;letter-spacing:-1px;color:inherit}.hero-unit li{line-height:30px}.pull-right{float:right}.pull-left{float:left}.hide{display:none}.show{display:block}.invisible{visibility:hidden}.affix{position:fixed} diff --git a/clicon/web/static/bootstrap/img/glyphicons-halflings-white.png b/clicon/web/static/bootstrap/img/glyphicons-halflings-white.png deleted file mode 100644 index 3bf6484a29d8da269f9bc874b25493a45fae3bae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8777 zcmZvC1yGz#v+m*$LXcp=A$ZWB0fL7wNbp_U*$~{_gL`my3oP#L!5tQYy99Ta`+g_q zKlj|KJ2f@c)ARJx{q*bbkhN_!|Wn*Vos8{TEhUT@5e;_WJsIMMcG5%>DiS&dv_N`4@J0cnAQ-#>RjZ z00W5t&tJ^l-QC*ST1-p~00u^9XJ=AUl7oW-;2a+x2k__T=grN{+1c4XK0ZL~^z^i$ zp&>vEhr@4fZWb380S18T&!0cQ3IKpHF)?v=b_NIm0Q>vwY7D0baZ)n z31Fa5sELUQARIVaU0nqf0XzT+fB_63aA;@<$l~wse|mcA;^G1TmX?-)e)jkGPfkuA z92@|!<>h5S_4f8QP-JRq>d&7)^Yin8l7K8gED$&_FaV?gY+wLjpoW%~7NDe=nHfMG z5DO3j{R9kv5GbssrUpO)OyvVrlx>u0UKD0i;Dpm5S5dY16(DL5l{ixz|mhJU@&-OWCTb7_%}8-fE(P~+XIRO zJU|wp1|S>|J3KrLcz^+v1f&BDpd>&MAaibR4#5A_4(MucZwG9E1h4@u0P@C8;oo+g zIVj7kfJi{oV~E(NZ*h(@^-(Q(C`Psb3KZ{N;^GB(a8NE*Vwc715!9 zr-H4Ao|T_c6+VT_JH9H+P3>iXSt!a$F`>s`jn`w9GZ_~B!{0soaiV|O_c^R2aWa%}O3jUE)WO=pa zs~_Wz08z|ieY5A%$@FcBF9^!1a}m5ks@7gjn;67N>}S~Hrm`4sM5Hh`q7&5-N{|31 z6x1{ol7BnskoViZ0GqbLa#kW`Z)VCjt1MysKg|rT zi!?s##Ck>8c zpi|>$lGlw#@yMNi&V4`6OBGJ(H&7lqLlcTQ&1zWriG_fL>BnFcr~?;E93{M-xIozQ zO=EHQ#+?<}%@wbWWv23#!V70h9MOuUVaU>3kpTvYfc|LBw?&b*89~Gc9i&8tlT#kF ztpbZoAzkdB+UTy=tx%L3Z4)I{zY(Kb)eg{InobSJmNwPZt$14aS-uc4eKuY8h$dtfyxu^a%zA)>fYI&)@ZXky?^{5>xSC?;w4r&td6vBdi%vHm4=XJH!3yL3?Ep+T5aU_>i;yr_XGq zxZfCzUU@GvnoIk+_Nd`aky>S&H!b*{A%L>?*XPAgWL(Vf(k7qUS}>Zn=U(ZfcOc{B z3*tOHH@t5Ub5D~#N7!Fxx}P2)sy{vE_l(R7$aW&CX>c|&HY+7};vUIietK%}!phrCuh+;C@1usp;XLU<8Gq8P!rEI3ieg#W$!= zQcZr{hp>8sF?k&Yl0?B84OneiQxef-4TEFrq3O~JAZR}yEJHA|Xkqd49tR&8oq{zP zY@>J^HBV*(gJvJZc_0VFN7Sx?H7#75E3#?N8Z!C+_f53YU}pyggxx1?wQi5Yb-_`I`_V*SMx5+*P^b=ec5RON-k1cIlsBLk}(HiaJyab0`CI zo0{=1_LO$~oE2%Tl_}KURuX<`+mQN_sTdM&* zkFf!Xtl^e^gTy6ON=&gTn6)$JHQq2)33R@_!#9?BLNq-Wi{U|rVX7Vny$l6#+SZ@KvQt@VYb%<9JfapI^b9j=wa+Tqb4ei;8c5 z&1>Uz@lVFv6T4Z*YU$r4G`g=91lSeA<=GRZ!*KTWKDPR}NPUW%peCUj`Ix_LDq!8| zMH-V`Pv!a~QkTL||L@cqiTz)*G-0=ytr1KqTuFPan9y4gYD5>PleK`NZB$ev@W%t= zkp)_=lBUTLZJpAtZg;pjI;7r2y|26-N7&a(hX|`1YNM9N8{>8JAuv}hp1v`3JHT-=5lbXpbMq7X~2J5Kl zh7tyU`_AusMFZ{ej9D;Uyy;SQ!4nwgSnngsYBwdS&EO3NS*o04)*juAYl;57c2Ly0(DEZ8IY?zSph-kyxu+D`tt@oU{32J#I{vmy=#0ySPK zA+i(A3yl)qmTz*$dZi#y9FS;$;h%bY+;StNx{_R56Otq+?pGe^T^{5d7Gs&?`_r`8 zD&dzOA|j8@3A&FR5U3*eQNBf<4^4W_iS_()*8b4aaUzfk2 zzIcMWSEjm;EPZPk{j{1>oXd}pXAj!NaRm8{Sjz!D=~q3WJ@vmt6ND_?HI~|wUS1j5 z9!S1MKr7%nxoJ3k`GB^7yV~*{n~O~n6($~x5Bu{7s|JyXbAyKI4+tO(zZYMslK;Zc zzeHGVl{`iP@jfSKq>R;{+djJ9n%$%EL()Uw+sykjNQdflkJZSjqV_QDWivbZS~S{K zkE@T^Jcv)Dfm93!mf$XYnCT--_A$zo9MOkPB6&diM8MwOfV?+ApNv`moV@nqn>&lv zYbN1-M|jc~sG|yLN^1R2=`+1ih3jCshg`iP&mY$GMTcY^W^T`WOCX!{-KHmZ#GiRH zYl{|+KLn5!PCLtBy~9i}`#d^gCDDx$+GQb~uc;V#K3OgbbOG0j5{BRG-si%Bo{@lB zGIt+Ain8^C`!*S0d0OSWVO+Z89}}O8aFTZ>p&k}2gGCV zh#<$gswePFxWGT$4DC^8@84_e*^KT74?7n8!$8cg=sL$OlKr&HMh@Rr5%*Wr!xoOl zo7jItnj-xYgVTX)H1=A2bD(tleEH57#V{xAeW_ezISg5OC zg=k>hOLA^urTH_e6*vSYRqCm$J{xo}-x3@HH;bsHD1Z`Pzvsn}%cvfw%Q(}h`Dgtb z0_J^niUmoCM5$*f)6}}qi(u;cPgxfyeVaaVmOsG<)5`6tzU4wyhF;k|~|x>7-2hXpVBpc5k{L4M`Wbe6Q?tr^*B z`Y*>6*&R#~%JlBIitlZ^qGe3s21~h3U|&k%%jeMM;6!~UH|+0+<5V-_zDqZQN79?n?!Aj!Nj`YMO9?j>uqI9-Tex+nJD z%e0#Yca6(zqGUR|KITa?9x-#C0!JKJHO(+fy@1!B$%ZwJwncQW7vGYv?~!^`#L~Um zOL++>4qmqW`0Chc0T23G8|vO)tK=Z2`gvS4*qpqhIJCEv9i&&$09VO8YOz|oZ+ubd zNXVdLc&p=KsSgtmIPLN69P7xYkYQ1vJ?u1g)T!6Ru`k2wkdj*wDC)VryGu2=yb0?F z>q~~e>KZ0d_#7f3UgV%9MY1}vMgF{B8yfE{HL*pMyhYF)WDZ^^3vS8F zGlOhs%g_~pS3=WQ#494@jAXwOtr^Y|TnQ5zki>qRG)(oPY*f}U_=ip_{qB0!%w7~G zWE!P4p3khyW-JJnE>eECuYfI?^d366Shq!Wm#x&jAo>=HdCllE$>DPO0N;y#4G)D2y#B@5=N=+F%Xo2n{gKcPcK2!hP*^WSXl+ut; zyLvVoY>VL{H%Kd9^i~lsb8j4>$EllrparEOJNT?Ym>vJa$(P^tOG)5aVb_5w^*&M0 zYOJ`I`}9}UoSnYg#E(&yyK(tqr^@n}qU2H2DhkK-`2He% zgXr_4kpXoQHxAO9S`wEdmqGU4j=1JdG!OixdqB4PPP6RXA}>GM zumruUUH|ZG2$bBj)Qluj&uB=dRb)?^qomw?Z$X%#D+Q*O97eHrgVB2*mR$bFBU`*} zIem?dM)i}raTFDn@5^caxE^XFXVhBePmH9fqcTi`TLaXiueH=@06sl}>F%}h9H_e9 z>^O?LxM1EjX}NVppaO@NNQr=AtHcH-BU{yBT_vejJ#J)l^cl69Z7$sk`82Zyw7Wxt z=~J?hZm{f@W}|96FUJfy65Gk8?^{^yjhOahUMCNNpt5DJw}ZKH7b!bGiFY9y6OY&T z_N)?Jj(MuLTN36ZCJ6I5Xy7uVlrb$o*Z%=-)kPo9s?<^Yqz~!Z* z_mP8(unFq65XSi!$@YtieSQ!<7IEOaA9VkKI?lA`*(nURvfKL8cX}-+~uw9|_5)uC2`ZHcaeX7L8aG6Ghleg@F9aG%X$#g6^yP5apnB>YTz&EfS{q z9UVfSyEIczebC)qlVu5cOoMzS_jrC|)rQlAzK7sfiW0`M8mVIohazPE9Jzn*qPt%6 zZL8RELY@L09B83@Be;x5V-IHnn$}{RAT#<2JA%ttlk#^(%u}CGze|1JY5MPhbfnYG zIw%$XfBmA-<_pKLpGKwbRF$#P;@_)ech#>vj25sv25VM$ouo)?BXdRcO{)*OwTw)G zv43W~T6ekBMtUD%5Bm>`^Ltv!w4~65N!Ut5twl!Agrzyq4O2Fi3pUMtCU~>9gt_=h-f% z;1&OuSu?A_sJvIvQ+dZNo3?m1%b1+s&UAx?8sUHEe_sB7zkm4R%6)<@oYB_i5>3Ip zIA+?jVdX|zL{)?TGpx+=Ta>G80}0}Ax+722$XFNJsC1gcH56{8B)*)eU#r~HrC&}` z|EWW92&;6y;3}!L5zXa385@?-D%>dSvyK;?jqU2t_R3wvBW;$!j45uQ7tyEIQva;Db}r&bR3kqNSh)Q_$MJ#Uj3Gj1F;)sO|%6z#@<+ zi{pbYsYS#u`X$Nf($OS+lhw>xgjos1OnF^$-I$u;qhJswhH~p|ab*nO>zBrtb0ndn zxV0uh!LN`&xckTP+JW}gznSpU492)u+`f{9Yr)js`NmfYH#Wdtradc0TnKNz@Su!e zu$9}G_=ku;%4xk}eXl>)KgpuT>_<`Ud(A^a++K&pm3LbN;gI}ku@YVrA%FJBZ5$;m zobR8}OLtW4-i+qPPLS-(7<>M{)rhiPoi@?&vDeVq5%fmZk=mDdRV>Pb-l7pP1y6|J z8I>sF+TypKV=_^NwBU^>4JJq<*14GLfM2*XQzYdlqqjnE)gZsPW^E@mp&ww* zW9i>XL=uwLVZ9pO*8K>t>vdL~Ek_NUL$?LQi5sc#1Q-f6-ywKcIT8Kw?C(_3pbR`e|)%9S-({if|E+hR2W!&qfQ&UiF^I!|M#xhdWsenv^wpKCBiuxXbnp85`{i|;BM?Ba`lqTA zyRm=UWJl&E{8JzYDHFu>*Z10-?#A8D|5jW9Ho0*CAs0fAy~MqbwYuOq9jjt9*nuHI zbDwKvh)5Ir$r!fS5|;?Dt>V+@F*v8=TJJF)TdnC#Mk>+tGDGCw;A~^PC`gUt*<(|i zB{{g{`uFehu`$fm4)&k7`u{xIV)yvA(%5SxX9MS80p2EKnLtCZ>tlX>*Z6nd&6-Mv$5rHD*db;&IBK3KH&M<+ArlGXDRdX1VVO4)&R$f4NxXI>GBh zSv|h>5GDAI(4E`@F?EnW zS>#c&Gw6~_XL`qQG4bK`W*>hek4LX*efn6|_MY+rXkNyAuu?NxS%L7~9tD3cn7&p( zCtfqe6sjB&Q-Vs7BP5+%;#Gk};4xtwU!KY0XXbmkUy$kR9)!~?*v)qw00!+Yg^#H> zc#8*z6zZo>+(bud?K<*!QO4ehiTCK&PD4G&n)Tr9X_3r-we z?fI+}-G~Yn93gI6F{}Dw_SC*FLZ)5(85zp4%uubtD)J)UELLkvGk4#tw&Tussa)mTD$R2&O~{ zCI3>fr-!-b@EGRI%g0L8UU%%u_<;e9439JNV;4KSxd|78v+I+8^rmMf3f40Jb}wEszROD?xBZu>Ll3;sUIoNxDK3|j3*sam2tC@@e$ z^!;+AK>efeBJB%ALsQ{uFui)oDoq()2USi?n=6C3#eetz?wPswc={I<8x=(8lE4EIsUfyGNZ{|KYn1IR|=E==f z(;!A5(-2y^2xRFCSPqzHAZn5RCN_bp22T(KEtjA(rFZ%>a4@STrHZflxKoqe9Z4@^ zM*scx_y73?Q{vt6?~WEl?2q*;@8 z3M*&@%l)SQmXkcUm)d@GT2#JdzhfSAP9|n#C;$E8X|pwD!r#X?0P>0ZisQ~TNqupW z*lUY~+ikD`vQb?@SAWX#r*Y+;=_|oacL$2CL$^(mV}aKO77pg}O+-=T1oLBT5sL2i z42Qth2+0@C`c+*D0*5!qy26sis<9a7>LN2{z%Qj49t z=L@x`4$ALHb*3COHoT?5S_c(Hs}g!V>W^=6Q0}zaubkDn)(lTax0+!+%B}9Vqw6{H zvL|BRM`O<@;eVi1DzM!tXtBrA20Ce@^Jz|>%X-t`vi-%WweXCh_LhI#bUg2*pcP~R z*RuTUzBKLXO~~uMd&o$v3@d0shHfUjC6c539PE6rF&;Ufa(Rw@K1*m7?f5)t`MjH0 z)_V(cajV5Am>f!kWcI@5rE8t6$S>5M=k=aRZROH6fA^jJp~2NlR4;Q2>L$7F#RT#9 z>4@1RhWG`Khy>P2j1Yx^BBL{S`niMaxlSWV-JBU0-T9zZ%>7mR3l$~QV$({o0;jTI ze5=cN^!Bc2bT|BcojXp~K#2cM>OTe*cM{Kg-j*CkiW)EGQot^}s;cy8_1_@JA0Whq zlrNr+R;Efa+`6N)s5rH*|E)nYZ3uqkk2C(E7@A|3YI`ozP~9Lexx#*1(r8luq+YPk z{J}c$s` zPM35Fx(YWB3Z5IYnN+L_4|jaR(5iWJi2~l&xy}aU7kW?o-V*6Av2wyZTG!E2KSW2* zGRLQkQU;Oz##ie-Z4fI)WSRxn$(ZcD;TL+;^r=a4(G~H3ZhK$lSXZj?cvyY8%d9JM zzc3#pD^W_QnWy#rx#;c&N@sqHhrnHRmj#i;s%zLm6SE(n&BWpd&f7>XnjV}OlZntI70fq%8~9<7 zMYaw`E-rp49-oC1N_uZTo)Cu%RR2QWdHpzQIcNsoDp`3xfP+`gI?tVQZ4X={qU?(n zV>0ASES^Xuc;9JBji{)RnFL(Lez;8XbB1uWaMp@p?7xhXk6V#!6B@aP4Rz7-K%a>i z?fvf}va_DGUXlI#4--`A3qK7J?-HwnG7O~H2;zR~RLW)_^#La!=}+>KW#anZ{|^D3 B7G?kd diff --git a/clicon/web/static/bootstrap/img/glyphicons-halflings.png b/clicon/web/static/bootstrap/img/glyphicons-halflings.png deleted file mode 100644 index a9969993201f9cee63cf9f49217646347297b643..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12799 zcma*OWmH^Ivn@*S;K3nSf_t!#;0f+&pm7Po8`nk}2q8f5;M%x$SdAkd9FAvlc$ zx660V9e3Ox@4WZ^?7jZ%QFGU-T~%||Ug4iK6bbQY@zBuF2$hxOw9wF=A)nUSxR_5@ zEX>HBryGrjyuOFFv$Y4<+|3H@gQfEqD<)+}a~mryD|1U9*I_FOG&F%+Ww{SJ-V2BR zjt<81Ek$}Yb*95D4RS0HCps|uLyovt;P05hchQb-u2bzLtmog&f2}1VlNhxXV);S9 zM2buBg~!q9PtF)&KGRgf3#z7B(hm5WlNClaCWFs!-P!4-u*u5+=+D|ZE9e`KvhTHT zJBnLwGM%!u&vlE%1ytJ=!xt~y_YkFLQb6bS!E+s8l7PiPGSt9xrmg?LV&&SL?J~cI zS(e9TF1?SGyh+M_p@o1dyWu7o7_6p;N6hO!;4~ z2B`I;y`;$ZdtBpvK5%oQ^p4eR2L)BH>B$FQeC*t)c`L71gXHPUa|vyu`Bnz)H$ZcXGve(}XvR!+*8a>BLV;+ryG1kt0=)ytl zNJxFUN{V7P?#|Cp85QTa@(*Q3%K-R(Pkv1N8YU*(d(Y}9?PQ(j;NzWoEVWRD-~H$=f>j9~PN^BM2okI(gY-&_&BCV6RP&I$FnSEM3d=0fCxbxA6~l>54-upTrw zYgX@%m>jsSGi`0cQt6b8cX~+02IghVlNblR7eI;0ps}mpWUcxty1yG56C5rh%ep(X z?)#2d?C<4t-KLc*EAn>>M8%HvC1TyBSoPNg(4id~H8JwO#I)Bf;N*y6ai6K9_bA`4 z_g9(-R;qyH&6I$`b42v|0V3Z8IXN*p*8g$gE98+JpXNY+jXxU0zsR^W$#V=KP z3AEFp@OL}WqwOfsV<)A^UTF4&HF1vQecz?LWE@p^Z2){=KEC_3Iopx_eS42>DeiDG zWMXGbYfG~W7C8s@@m<_?#Gqk;!&)_Key@^0xJxrJahv{B&{^!>TV7TEDZlP|$=ZCz zmX=ZWtt4QZKx**)lQQoW8y-XLiOQy#T`2t}p6l*S`68ojyH@UXJ-b~@tN`WpjF z%7%Yzv807gsO!v=!(2uR)16!&U5~VPrPHtGzUU?2w(b1Xchq}(5Ed^G|SD7IG+kvgyVksU) z(0R)SW1V(>&q2nM%Z!C9=;pTg!(8pPSc%H01urXmQI6Gi^dkYCYfu6b4^tW))b^U+ z$2K&iOgN_OU7n#GC2jgiXU{caO5hZt0(>k+c^(r><#m|#J^s?zA6pi;^#*rp&;aqL zRcZi0Q4HhVX3$ybclxo4FFJW*`IV`)Bj_L3rQe?5{wLJh168Ve1jZv+f1D}f0S$N= zm4i|9cEWz&C9~ZI3q*gwWH^<6sBWuphgy@S3Qy?MJiL>gwd|E<2h9-$3;gT9V~S6r z)cAcmE0KXOwDA5eJ02-75d~f?3;n7a9d_xPBJaO;Z)#@s7gk5$Qn(Fc^w@9c5W0zY z59is0?Mt^@Rolcn{4%)Ioat(kxQH6}hIykSA)zht=9F_W*D#<}N(k&&;k;&gKkWIL z0Of*sP=X(Uyu$Pw;?F@?j{}=>{aSHFcii#78FC^6JGrg-)!)MV4AKz>pXnhVgTgx8 z1&5Y=>|8RGA6++FrSy=__k_imx|z-EI@foKi>tK0Hq2LetjUotCgk2QFXaej!BWYL zJc{fv(&qA7UUJ|AXLc5z*_NW#yWzKtl(c8mEW{A>5Hj^gfZ^HC9lQNQ?RowXjmuCj4!!54Us1=hY z0{@-phvC}yls!PmA~_z>Y&n&IW9FQcj}9(OLO-t^NN$c0o}YksCUWt|DV(MJB%%Sr zdf}8!9ylU2TW!=T{?)g-ojAMKc>3pW;KiZ7f0;&g)k}K^#HBhE5ot)%oxq$*$W@b# zg4p<Ou`ME|Kd1WHK@8 zzLD+0(NHWa`B{em3Ye?@aVsEi>y#0XVZfaFuq#;X5C3{*ikRx7UY4FF{ZtNHNO?A_ z#Q?hwRv~D8fPEc%B5E-ZMI&TAmikl||EERumQCRh7p;)>fdZMxvKq;ky0}7IjhJph zW*uuu*(Y6)S;Od--8uR^R#sb$cmFCnPcj9PPCWhPN;n`i1Q#Qn>ii z{WR|0>8F`vf&#E(c2NsoH=I7Cd-FV|%(7a`i}gZw4N~QFFG2WtS^H%@c?%9UZ+kez z;PwGgg_r6V>Kn5n(nZ40P4qMyrCP3bDkJp@hp6&X3>gzC>=f@Hsen<%I~7W+x@}b> z0}Et*vx_50-q@PIV=(3&Tbm}}QRo*FP2@)A#XX-8jYspIhah`9ukPBr)$8>Tmtg&R z?JBoH17?+1@Y@r>anoKPQ}F8o9?vhcG79Cjv^V6ct709VOQwg{c0Q#rBSsSmK3Q;O zBpNihl3S0_IGVE)^`#94#j~$;7+u870yWiV$@={|GrBmuz4b)*bCOPkaN0{6$MvazOEBxFdKZDlbVvv{8_*kJ zfE6C`4&Kkz<5u%dEdStd85-5UHG5IOWbo8i9azgg#zw-(P1AA049hddAB*UdG3Vn0 zX`OgM+EM|<+KhJ<=k?z~WA5waVj?T9eBdfJGebVifBKS1u<$#vl^BvSg)xsnT5Aw_ZY#}v*LXO#htB>f}x3qDdDHoFeb zAq7;0CW;XJ`d&G*9V)@H&739DpfWYzdQt+Kx_E1K#Cg1EMtFa8eQRk_JuUdHD*2;W zR~XFnl!L2A?48O;_iqCVr1oxEXvOIiN_9CUVTZs3C~P+11}ebyTRLACiJuMIG#`xP zKlC|E(S@QvN+%pBc6vPiQS8KgQAUh75C0a2xcPQDD$}*bM&z~g8+=9ltmkT$;c;s z5_=8%i0H^fEAOQbHXf0;?DN5z-5+1 zDxj50yYkz4ox9p$HbZ|H?8ukAbLE^P$@h}L%i6QVcY>)i!w=hkv2zvrduut%!8>6b zcus3bh1w~L804EZ*s96?GB&F7c5?m?|t$-tp2rKMy>F*=4;w*jW}^;8v`st&8)c; z2Ct2{)?S(Z;@_mjAEjb8x=qAQvx=}S6l9?~H?PmP`-xu;ME*B8sm|!h@BX4>u(xg_ zIHmQzp4Tgf*J}Y=8STR5_s)GKcmgV!$JKTg@LO402{{Wrg>#D4-L%vjmtJ4r?p&$F!o-BOf7ej~ z6)BuK^^g1b#(E>$s`t3i13{6-mmSp7{;QkeG5v}GAN&lM2lQT$@(aQCcFP(%UyZbF z#$HLTqGT^@F#A29b0HqiJsRJAlh8kngU`BDI6 zJUE~&!cQ*&f95Ot$#mxU5+*^$qg_DWNdfu+1irglB7yDglzH()2!@#rpu)^3S8weW z_FE$=j^GTY*|5SH95O8o8W9FluYwB=2PwtbW|JG6kcV^dMVmX(wG+Otj;E$%gfu^K z!t~<3??8=()WQSycsBKy24>NjRtuZ>zxJIED;YXaUz$@0z4rl+TW zWxmvM$%4jYIpO>j5k1t1&}1VKM~s!eLsCVQ`TTjn3JRXZD~>GM z$-IT~(Y)flNqDkC%DfbxaV9?QuWCV&-U1yzrV@0jRhE;)ZO0=r-{s@W?HOFbRHDDV zq;eLo+wOW;nI|#mNf(J?RImB9{YSO2Y`9825Lz#u4(nk3)RGv3X8B(A$TsontJ8L! z9JP^eWxtKC?G8^xAZa1HECx*rp35s!^%;&@Jyk)NexVc)@U4$^X1Dag6`WKs|(HhZ#rzO2KEw3xh~-0<;|zcs0L>OcO#YYX{SN8m6`9pp+ zQG@q$I)T?aoe#AoR@%om_#z=c@ych!bj~lV13Qi-xg$i$hXEAB#l=t7QWENGbma4L zbBf*X*4oNYZUd_;1{Ln_ZeAwQv4z?n9$eoxJeI?lU9^!AB2Y~AwOSq67dT9ADZ)s@ zCRYS7W$Zpkdx$3T>7$I%3EI2ik~m!f7&$Djpt6kZqDWZJ-G{*_eXs*B8$1R4+I}Kf zqniwCI64r;>h2Lu{0c(#Atn)%E8&)=0S4BMhq9$`vu|Ct;^ur~gL`bD>J@l)P$q_A zO7b3HGOUG`vgH{}&&AgrFy%K^>? z>wf**coZ2vdSDcNYSm~dZ(vk6&m6bVKmVgrx-X<>{QzA!)2*L+HLTQz$e8UcB&Djq zl)-%s$ZtUN-R!4ZiG=L0#_P=BbUyH+YPmFl_ogkkQ$=s@T1v}rNnZ^eMaqJ|quc+6 z*ygceDOrldsL30w`H;rNu+IjlS+G~p&0SawXCA1+D zC%cZtjUkLNq%FadtHE?O(yQTP486A{1x<{krq#rpauNQaeyhM3*i0%tBpQHQo-u)x z{0{&KS`>}vf2_}b160XZO2$b)cyrHq7ZSeiSbRvaxnKUH{Q`-P(nL&^fcF2){vhN- zbX&WEjP7?b4A%0y6n_=m%l00uZ+}mCYO(!x?j$+O$*TqoD_Q5EoyDJ?w?^UIa491H zE}87(bR`X;@u#3Qy~9wWdWQIg1`cXrk$x9=ccR|RY1~%{fAJ@uq@J3e872x0v$hmv ze_KcL(wM|n0EOp;t{hKoohYyDmYO;!`7^Lx;0k=PWPGZpI>V5qYlzjSL_(%|mud50 z7#{p97s`U|Sn$WYF>-i{i4`kzlrV6a<}=72q2sAT7Zh{>P%*6B;Zl;~0xWymt10Mo zl5{bmR(wJefJpNGK=fSRP|mpCI-)Nf6?Pv==FcFmpSwF1%CTOucV{yqxSyx4Zws3O z8hr5Uyd%ezIO7?PnEO0T%af#KOiXD$e?V&OX-B|ZX-YsgSs%sv-6U+sLPuz{D4bq| zpd&|o5tNCmpT>(uIbRf?8c}d3IpOb3sn6>_dr*26R#ev<_~vi)wleW$PX|5)$_ z+_|=pi(0D(AB_sjQ;sQQSM&AWqzDO1@NHw;C9cPdXRKRI#@nUW)CgFxzQ1nyd!+h& zcjU!U=&u|>@}R(9D$%lu2TlV>@I2-n@fCr5PrZNVyKWR7hm zWjoy^p7v8m#$qN0K#8jT- zq`mSirDZDa1Jxm;Rg3rAPhC)LcI4@-RvKT+@9&KsR3b0_0zuM!Fg7u>oF>3bzOxZPU&$ab$Z9@ zY)f7pKh22I7ZykL{YsdjcqeN++=0a}elQM-4;Q)(`Ep3|VFHqnXOh14`!Bus& z9w%*EWK6AiAM{s$6~SEQS;A>ey$#`7)khZvamem{P?>k)5&7Sl&&NXKk}o!%vd;-! zpo2p-_h^b$DNBO>{h4JdGB=D>fvGIYN8v&XsfxU~VaefL?q} z3ekM?iOKkCzQHkBkhg=hD!@&(L}FcHKoa zbZ7)H1C|lHjwEb@tu=n^OvdHOo7o+W`0-y3KdP#bb~wM=Vr_gyoEq|#B?$&d$tals ziIs-&7isBpvS|CjC|7C&3I0SE?~`a%g~$PI%;au^cUp@ER3?mn-|vyu!$7MV6(uvt z+CcGuM(Ku2&G0tcRCo7#D$Dirfqef2qPOE5I)oCGzmR5G!o#Q~(k~)c=LpIfrhHQk zeAva6MilEifE7rgP1M7AyWmLOXK}i8?=z2;N=no)`IGm#y%aGE>-FN zyXCp0Sln{IsfOBuCdE*#@CQof%jzuU*jkR*Su3?5t}F(#g0BD0Zzu|1MDes8U7f9; z$JBg|mqTXt`muZ8=Z`3wx$uizZG_7>GI7tcfOHW`C2bKxNOR)XAwRkLOaHS4xwlH4 zDpU29#6wLXI;H?0Se`SRa&I_QmI{zo7p%uveBZ0KZKd9H6@U?YGArbfm)D*^5=&Rp z`k{35?Z5GbZnv>z@NmJ%+sx=1WanWg)8r}C_>EGR8mk(NR$pW<-l8OTU^_u3M@gwS z7}GGa1)`z5G|DZirw;FB@VhH7Dq*0qc=|9lLe{w2#`g+_nt>_%o<~9(VZe=zI*SSz4w43-_o>4E4`M@NPKTWZuQJs)?KXbWp1M zimd5F;?AP(LWcaI-^Sl{`~>tmxsQB9Y$Xi*{Zr#py_+I$vx7@NY`S?HFfS!hUiz$a z{>!&e1(16T!Om)m)&k1W#*d#GslD^4!TwiF2WjFBvi=Ms!ADT)ArEW6zfVuIXcXVk z>AHjPADW+mJzY`_Ieq(s?jbk4iD2Rb8*V3t6?I+E06(K8H!!xnDzO%GB;Z$N-{M|B zeT`jo%9)s%op*XZKDd6*)-^lWO{#RaIGFdBH+;XXjI(8RxpBc~azG1H^2v7c^bkFE zZCVPE+E*Q=FSe8Vm&6|^3ki{9~qafiMAf7i4APZg>b%&5>nT@pHH z%O*pOv(77?ZiT{W zBibx}Q12tRc7Py1NcZTp`Q4ey%T_nj@1WKg5Fz_Rjl4wlJQj)rtp8yL3r!Shy zvZvnmh!tH4T6Js-?vI0<-rzzl{mgT*S0d_7^AU_8gBg^03o-J=p(1o6kww2hx|!%T z-jqp}m^G*W?$!R#M%Ef?&2jYxmx+lXWZszpI4d$pUN`(S)|*c^CgdwY>Fa>> zgGBJhwe8y#Xd*q0=@SLEgPF>+Qe4?%E*v{a`||luZ~&dqMBrRfJ{SDMaJ!s_;cSJp zSqZHXIdc@@XteNySUZs^9SG7xK`8=NBNM)fRVOjw)D^)w%L2OPkTQ$Tel-J)GD3=YXy+F4in(ILy*A3m@3o73uv?JC}Q>f zrY&8SWmesiba0|3X-jmlMT3 z*ST|_U@O=i*sM_*48G)dgXqlwoFp5G6qSM3&%_f_*n!PiT>?cNI)fAUkA{qWnqdMi+aNK_yVQ&lx4UZknAc9FIzVk% zo6JmFH~c{_tK!gt4+o2>)zoP{sR}!!vfRjI=13!z5}ijMFQ4a4?QIg-BE4T6!#%?d&L;`j5=a`4is>U;%@Rd~ zXC~H7eGQhhYWhMPWf9znDbYIgwud(6$W3e>$W4$~d%qoJ z+JE`1g$qJ%>b|z*xCKenmpV$0pM=Gl-Y*LT8K+P)2X#;XYEFF4mRbc~jj?DM@(1e`nL=F4Syv)TKIePQUz)bZ?Bi3@G@HO$Aps1DvDGkYF50O$_welu^cL7;vPiMGho74$;4fDqKbE{U zd1h{;LfM#Fb|Z&uH~Rm_J)R~Vy4b;1?tW_A)Iz#S_=F|~pISaVkCnQ0&u%Yz%o#|! zS-TSg87LUfFSs{tTuM3$!06ZzH&MFtG)X-l7>3)V?Txuj2HyG*5u;EY2_5vU0ujA? zHXh5G%6e3y7v?AjhyX79pnRBVr}RmPmtrxoB7lkxEzChX^(vKd+sLh?SBic=Q)5nA zdz7Mw3_iA>;T^_Kl~?1|5t%GZ;ki_+i>Q~Q1EVdKZ)$Sh3LM@ea&D~{2HOG++7*wF zAC6jW4>fa~!Vp5+$Z{<)Qxb|{unMgCv2)@%3j=7)Zc%U<^i|SAF88s!A^+Xs!OASYT%7;Jx?olg_6NFP1475N z#0s<@E~FI}#LNQ{?B1;t+N$2k*`K$Hxb%#8tRQi*Z#No0J}Pl;HWb){l7{A8(pu#@ zfE-OTvEreoz1+p`9sUI%Y{e5L-oTP_^NkgpYhZjp&ykinnW;(fu1;ttpSsgYM8ABX4dHe_HxU+%M(D=~) zYM}XUJ5guZ;=_ZcOsC`_{CiU$zN3$+x&5C`vX-V3`8&RjlBs^rf00MNYZW+jCd~7N z%{jJuUUwY(M`8$`B>K&_48!Li682ZaRknMgQ3~dnlp8C?__!P2z@=Auv;T^$yrsNy zCARmaA@^Yo2sS%2$`031-+h9KMZsIHfB>s@}>Y(z988e!`%4=EDoAQ0kbk>+lCoK60Mx9P!~I zlq~wf7kcm_NFImt3ZYlE(b3O1K^QWiFb$V^a2Jlwvm(!XYx<`i@ZMS3UwFt{;x+-v zhx{m=m;4dgvkKp5{*lfSN3o^keSpp9{hlXj%=}e_7Ou{Yiw(J@NXuh*;pL6@$HsfB zh?v+r^cp@jQ4EspC#RqpwPY(}_SS$wZ{S959`C25777&sgtNh%XTCo9VHJC-G z;;wi9{-iv+ETiY;K9qvlEc04f;ZnUP>cUL_T*ms``EtGoP^B#Q>n2dSrbAg8a>*Lg zd0EJ^=tdW~7fbcLFsqryFEcy*-8!?;n%;F+8i{eZyCDaiYxghr z$8k>L|2&-!lhvuVdk!r-kpSFl`5F5d4DJr%M4-qOy3gdmQbqF1=aBtRM7)c_Ae?$b8 zQg4c8*KQ{XJmL)1c7#0Yn0#PTMEs4-IHPjkn0!=;JdhMXqzMLeh`yOylXROP- zl#z3+fwM9l3%VN(6R77ua*uI9%hO7l7{+Hcbr(peh;afUK?B4EC09J{-u{mv)+u#? zdKVBCPt`eU@IzL)OXA`Ebu`Xp?u0m%h&X41}FNfnJ*g1!1wcbbpo%F4x!-#R9ft!8{5`Ho}04?FI#Kg zL|k`tF1t_`ywdy8(wnTut>HND(qNnq%Sq=AvvZbXnLx|mJhi!*&lwG2g|edBdVgLy zjvVTKHAx(+&P;P#2Xobo7_RttUi)Nllc}}hX>|N?-u5g7VJ-NNdwYcaOG?NK=5)}` zMtOL;o|i0mSKm(UI_7BL_^6HnVOTkuPI6y@ZLR(H?c1cr-_ouSLp{5!bx^DiKd*Yb z{K78Ci&Twup zTKm)ioN|wcYy%Qnwb)IzbH>W!;Ah5Zdm_jRY`+VRJ2 zhkspZ9hbK3iQD91A$d!0*-1i#%x81|s+SPRmD}d~<1p6!A13(!vABP2kNgqEG z?AMgl^P+iRoIY(9@_I?n1829lGvAsRnHwS~|5vD2+Zi53j<5N4wNn0{q>>jF9*bI) zL$kMXM-awNOElF>{?Jr^tOz1glbwaD-M0OKOlTeW3C!1ZyxRbB>8JDof(O&R1bh%3x#>y2~<>OXO#IIedH0Q`(&&?eo-c~ z>*Ah#3~09unym~UC-UFqqI>{dmUD$Y4@evG#ORLI*{ZM)Jl=e1it!XzY($S3V zLG!Y6fCjE>x6r@5FG1n|8ompSZaJ>9)q6jqU;XxCQk9zV(?C9+i*>w z21+KYt1gXX&0`x3E)hS7I5}snbBzox9C@Xzcr|{B8Hw;SY1$}&BoYKXH^hpjW-RgJ z-Fb}tannKCv>y~^`r|(1Q9;+sZlYf3XPSX|^gR01UFtu$B*R;$sPZdIZShRr>|b@J z;#G{EdoY+O;REEjQ}X7_YzWLO+Ey3>a_KDe1CjSe| z6arqcEZ)CX!8r(si`dqbF$uu&pnf^Np{1f*TdJ`r2;@SaZ z#hb4xlaCA@Pwqj#LlUEe5L{I$k(Zj$d3(~)u(F%&xb8={N9hKxlZIO1ABsM{Mt|)2 zJ^t9Id;?%4PfR4&Ph9B9cFK~@tG3wlFW-0fXZS_L4U*EiAA%+`h%q2^6BCC;t0iO4V=s4Qug{M|iDV@s zC7|ef-dxiR7T&Mpre!%hiUhHM%3Qxi$Lzw6&(Tvlx9QA_7LhYq<(o~=Y>3ka-zrQa zhGpfFK@)#)rtfz61w35^sN1=IFw&Oc!Nah+8@qhJ0UEGr;JplaxOGI82OVqZHsqfX ze1}r{jy;G?&}Da}a7>SCDsFDuzuseeCKof|Dz2BPsP8? zY;a)Tkr2P~0^2BeO?wnzF_Ul-ekY=-w26VnU%U3f19Z-pj&2 z4J_a|o4Dci+MO)mPQIM>kdPG1xydiR9@#8m zh27D7GF{p|a{8({Q-Pr-;#jV{2zHR>lGoFtIfIpoMo?exuQyX_A;;l0AP4!)JEM$EwMInZkj+8*IHP4vKRd zKx_l-i*>A*C@{u%ct`y~s6MWAfO{@FPIX&sg8H{GMDc{4M3%$@c8&RAlw0-R<4DO3 trJqdc$mBpWeznn?E0M$F`|3v=`3%T2A17h;rxP7$%JLd=6(2u;`(N3pt&so# diff --git a/clicon/web/static/bootstrap/js/bootstrap.js b/clicon/web/static/bootstrap/js/bootstrap.js deleted file mode 100644 index 6c15a58..0000000 --- a/clicon/web/static/bootstrap/js/bootstrap.js +++ /dev/null @@ -1,2159 +0,0 @@ -/* =================================================== - * bootstrap-transition.js v2.2.2 - * http://twitter.github.com/bootstrap/javascript.html#transitions - * =================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ========================================================== */ - - -!function ($) { - - "use strict"; // jshint ;_; - - - /* CSS TRANSITION SUPPORT (http://www.modernizr.com/) - * ======================================================= */ - - $(function () { - - $.support.transition = (function () { - - var transitionEnd = (function () { - - var el = document.createElement('bootstrap') - , transEndEventNames = { - 'WebkitTransition' : 'webkitTransitionEnd' - , 'MozTransition' : 'transitionend' - , 'OTransition' : 'oTransitionEnd otransitionend' - , 'transition' : 'transitionend' - } - , name - - for (name in transEndEventNames){ - if (el.style[name] !== undefined) { - return transEndEventNames[name] - } - } - - }()) - - return transitionEnd && { - end: transitionEnd - } - - })() - - }) - -}(window.jQuery);/* ========================================================== - * bootstrap-alert.js v2.2.2 - * http://twitter.github.com/bootstrap/javascript.html#alerts - * ========================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ========================================================== */ - - -!function ($) { - - "use strict"; // jshint ;_; - - - /* ALERT CLASS DEFINITION - * ====================== */ - - var dismiss = '[data-dismiss="alert"]' - , Alert = function (el) { - $(el).on('click', dismiss, this.close) - } - - Alert.prototype.close = function (e) { - var $this = $(this) - , selector = $this.attr('data-target') - , $parent - - if (!selector) { - selector = $this.attr('href') - selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7 - } - - $parent = $(selector) - - e && e.preventDefault() - - $parent.length || ($parent = $this.hasClass('alert') ? $this : $this.parent()) - - $parent.trigger(e = $.Event('close')) - - if (e.isDefaultPrevented()) return - - $parent.removeClass('in') - - function removeElement() { - $parent - .trigger('closed') - .remove() - } - - $.support.transition && $parent.hasClass('fade') ? - $parent.on($.support.transition.end, removeElement) : - removeElement() - } - - - /* ALERT PLUGIN DEFINITION - * ======================= */ - - var old = $.fn.alert - - $.fn.alert = function (option) { - return this.each(function () { - var $this = $(this) - , data = $this.data('alert') - if (!data) $this.data('alert', (data = new Alert(this))) - if (typeof option == 'string') data[option].call($this) - }) - } - - $.fn.alert.Constructor = Alert - - - /* ALERT NO CONFLICT - * ================= */ - - $.fn.alert.noConflict = function () { - $.fn.alert = old - return this - } - - - /* ALERT DATA-API - * ============== */ - - $(document).on('click.alert.data-api', dismiss, Alert.prototype.close) - -}(window.jQuery);/* ============================================================ - * bootstrap-button.js v2.2.2 - * http://twitter.github.com/bootstrap/javascript.html#buttons - * ============================================================ - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ============================================================ */ - - -!function ($) { - - "use strict"; // jshint ;_; - - - /* BUTTON PUBLIC CLASS DEFINITION - * ============================== */ - - var Button = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, $.fn.button.defaults, options) - } - - Button.prototype.setState = function (state) { - var d = 'disabled' - , $el = this.$element - , data = $el.data() - , val = $el.is('input') ? 'val' : 'html' - - state = state + 'Text' - data.resetText || $el.data('resetText', $el[val]()) - - $el[val](data[state] || this.options[state]) - - // push to event loop to allow forms to submit - setTimeout(function () { - state == 'loadingText' ? - $el.addClass(d).attr(d, d) : - $el.removeClass(d).removeAttr(d) - }, 0) - } - - Button.prototype.toggle = function () { - var $parent = this.$element.closest('[data-toggle="buttons-radio"]') - - $parent && $parent - .find('.active') - .removeClass('active') - - this.$element.toggleClass('active') - } - - - /* BUTTON PLUGIN DEFINITION - * ======================== */ - - var old = $.fn.button - - $.fn.button = function (option) { - return this.each(function () { - var $this = $(this) - , data = $this.data('button') - , options = typeof option == 'object' && option - if (!data) $this.data('button', (data = new Button(this, options))) - if (option == 'toggle') data.toggle() - else if (option) data.setState(option) - }) - } - - $.fn.button.defaults = { - loadingText: 'loading...' - } - - $.fn.button.Constructor = Button - - - /* BUTTON NO CONFLICT - * ================== */ - - $.fn.button.noConflict = function () { - $.fn.button = old - return this - } - - - /* BUTTON DATA-API - * =============== */ - - $(document).on('click.button.data-api', '[data-toggle^=button]', function (e) { - var $btn = $(e.target) - if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn') - $btn.button('toggle') - }) - -}(window.jQuery);/* ========================================================== - * bootstrap-carousel.js v2.2.2 - * http://twitter.github.com/bootstrap/javascript.html#carousel - * ========================================================== - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ========================================================== */ - - -!function ($) { - - "use strict"; // jshint ;_; - - - /* CAROUSEL CLASS DEFINITION - * ========================= */ - - var Carousel = function (element, options) { - this.$element = $(element) - this.options = options - this.options.pause == 'hover' && this.$element - .on('mouseenter', $.proxy(this.pause, this)) - .on('mouseleave', $.proxy(this.cycle, this)) - } - - Carousel.prototype = { - - cycle: function (e) { - if (!e) this.paused = false - this.options.interval - && !this.paused - && (this.interval = setInterval($.proxy(this.next, this), this.options.interval)) - return this - } - - , to: function (pos) { - var $active = this.$element.find('.item.active') - , children = $active.parent().children() - , activePos = children.index($active) - , that = this - - if (pos > (children.length - 1) || pos < 0) return - - if (this.sliding) { - return this.$element.one('slid', function () { - that.to(pos) - }) - } - - if (activePos == pos) { - return this.pause().cycle() - } - - return this.slide(pos > activePos ? 'next' : 'prev', $(children[pos])) - } - - , pause: function (e) { - if (!e) this.paused = true - if (this.$element.find('.next, .prev').length && $.support.transition.end) { - this.$element.trigger($.support.transition.end) - this.cycle() - } - clearInterval(this.interval) - this.interval = null - return this - } - - , next: function () { - if (this.sliding) return - return this.slide('next') - } - - , prev: function () { - if (this.sliding) return - return this.slide('prev') - } - - , slide: function (type, next) { - var $active = this.$element.find('.item.active') - , $next = next || $active[type]() - , isCycling = this.interval - , direction = type == 'next' ? 'left' : 'right' - , fallback = type == 'next' ? 'first' : 'last' - , that = this - , e - - this.sliding = true - - isCycling && this.pause() - - $next = $next.length ? $next : this.$element.find('.item')[fallback]() - - e = $.Event('slide', { - relatedTarget: $next[0] - }) - - if ($next.hasClass('active')) return - - if ($.support.transition && this.$element.hasClass('slide')) { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $next.addClass(type) - $next[0].offsetWidth // force reflow - $active.addClass(direction) - $next.addClass(direction) - this.$element.one($.support.transition.end, function () { - $next.removeClass([type, direction].join(' ')).addClass('active') - $active.removeClass(['active', direction].join(' ')) - that.sliding = false - setTimeout(function () { that.$element.trigger('slid') }, 0) - }) - } else { - this.$element.trigger(e) - if (e.isDefaultPrevented()) return - $active.removeClass('active') - $next.addClass('active') - this.sliding = false - this.$element.trigger('slid') - } - - isCycling && this.cycle() - - return this - } - - } - - - /* CAROUSEL PLUGIN DEFINITION - * ========================== */ - - var old = $.fn.carousel - - $.fn.carousel = function (option) { - return this.each(function () { - var $this = $(this) - , data = $this.data('carousel') - , options = $.extend({}, $.fn.carousel.defaults, typeof option == 'object' && option) - , action = typeof option == 'string' ? option : options.slide - if (!data) $this.data('carousel', (data = new Carousel(this, options))) - if (typeof option == 'number') data.to(option) - else if (action) data[action]() - else if (options.interval) data.cycle() - }) - } - - $.fn.carousel.defaults = { - interval: 5000 - , pause: 'hover' - } - - $.fn.carousel.Constructor = Carousel - - - /* CAROUSEL NO CONFLICT - * ==================== */ - - $.fn.carousel.noConflict = function () { - $.fn.carousel = old - return this - } - - /* CAROUSEL DATA-API - * ================= */ - - $(document).on('click.carousel.data-api', '[data-slide]', function (e) { - var $this = $(this), href - , $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7 - , options = $.extend({}, $target.data(), $this.data()) - $target.carousel(options) - e.preventDefault() - }) - -}(window.jQuery);/* ============================================================= - * bootstrap-collapse.js v2.2.2 - * http://twitter.github.com/bootstrap/javascript.html#collapse - * ============================================================= - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ============================================================ */ - - -!function ($) { - - "use strict"; // jshint ;_; - - - /* COLLAPSE PUBLIC CLASS DEFINITION - * ================================ */ - - var Collapse = function (element, options) { - this.$element = $(element) - this.options = $.extend({}, $.fn.collapse.defaults, options) - - if (this.options.parent) { - this.$parent = $(this.options.parent) - } - - this.options.toggle && this.toggle() - } - - Collapse.prototype = { - - constructor: Collapse - - , dimension: function () { - var hasWidth = this.$element.hasClass('width') - return hasWidth ? 'width' : 'height' - } - - , show: function () { - var dimension - , scroll - , actives - , hasData - - if (this.transitioning) return - - dimension = this.dimension() - scroll = $.camelCase(['scroll', dimension].join('-')) - actives = this.$parent && this.$parent.find('> .accordion-group > .in') - - if (actives && actives.length) { - hasData = actives.data('collapse') - if (hasData && hasData.transitioning) return - actives.collapse('hide') - hasData || actives.data('collapse', null) - } - - this.$element[dimension](0) - this.transition('addClass', $.Event('show'), 'shown') - $.support.transition && this.$element[dimension](this.$element[0][scroll]) - } - - , hide: function () { - var dimension - if (this.transitioning) return - dimension = this.dimension() - this.reset(this.$element[dimension]()) - this.transition('removeClass', $.Event('hide'), 'hidden') - this.$element[dimension](0) - } - - , reset: function (size) { - var dimension = this.dimension() - - this.$element - .removeClass('collapse') - [dimension](size || 'auto') - [0].offsetWidth - - this.$element[size !== null ? 'addClass' : 'removeClass']('collapse') - - return this - } - - , transition: function (method, startEvent, completeEvent) { - var that = this - , complete = function () { - if (startEvent.type == 'show') that.reset() - that.transitioning = 0 - that.$element.trigger(completeEvent) - } - - this.$element.trigger(startEvent) - - if (startEvent.isDefaultPrevented()) return - - this.transitioning = 1 - - this.$element[method]('in') - - $.support.transition && this.$element.hasClass('collapse') ? - this.$element.one($.support.transition.end, complete) : - complete() - } - - , toggle: function () { - this[this.$element.hasClass('in') ? 'hide' : 'show']() - } - - } - - - /* COLLAPSE PLUGIN DEFINITION - * ========================== */ - - var old = $.fn.collapse - - $.fn.collapse = function (option) { - return this.each(function () { - var $this = $(this) - , data = $this.data('collapse') - , options = typeof option == 'object' && option - if (!data) $this.data('collapse', (data = new Collapse(this, options))) - if (typeof option == 'string') data[option]() - }) - } - - $.fn.collapse.defaults = { - toggle: true - } - - $.fn.collapse.Constructor = Collapse - - - /* COLLAPSE NO CONFLICT - * ==================== */ - - $.fn.collapse.noConflict = function () { - $.fn.collapse = old - return this - } - - - /* COLLAPSE DATA-API - * ================= */ - - $(document).on('click.collapse.data-api', '[data-toggle=collapse]', function (e) { - var $this = $(this), href - , target = $this.attr('data-target') - || e.preventDefault() - || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7 - , option = $(target).data('collapse') ? 'toggle' : $this.data() - $this[$(target).hasClass('in') ? 'addClass' : 'removeClass']('collapsed') - $(target).collapse(option) - }) - -}(window.jQuery);/* ============================================================ - * bootstrap-dropdown.js v2.2.2 - * http://twitter.github.com/bootstrap/javascript.html#dropdowns - * ============================================================ - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ============================================================ */ - - -!function ($) { - - "use strict"; // jshint ;_; - - - /* DROPDOWN CLASS DEFINITION - * ========================= */ - - var toggle = '[data-toggle=dropdown]' - , Dropdown = function (element) { - var $el = $(element).on('click.dropdown.data-api', this.toggle) - $('html').on('click.dropdown.data-api', function () { - $el.parent().removeClass('open') - }) - } - - Dropdown.prototype = { - - constructor: Dropdown - - , toggle: function (e) { - var $this = $(this) - , $parent - , isActive - - if ($this.is('.disabled, :disabled')) return - - $parent = getParent($this) - - isActive = $parent.hasClass('open') - - clearMenus() - - if (!isActive) { - $parent.toggleClass('open') - } - - $this.focus() - - return false - } - - , keydown: function (e) { - var $this - , $items - , $active - , $parent - , isActive - , index - - if (!/(38|40|27)/.test(e.keyCode)) return - - $this = $(this) - - e.preventDefault() - e.stopPropagation() - - if ($this.is('.disabled, :disabled')) return - - $parent = getParent($this) - - isActive = $parent.hasClass('open') - - if (!isActive || (isActive && e.keyCode == 27)) return $this.click() - - $items = $('[role=menu] li:not(.divider):visible a', $parent) - - if (!$items.length) return - - index = $items.index($items.filter(':focus')) - - if (e.keyCode == 38 && index > 0) index-- // up - if (e.keyCode == 40 && index < $items.length - 1) index++ // down - if (!~index) index = 0 - - $items - .eq(index) - .focus() - } - - } - - function clearMenus() { - $(toggle).each(function () { - getParent($(this)).removeClass('open') - }) - } - - function getParent($this) { - var selector = $this.attr('data-target') - , $parent - - if (!selector) { - selector = $this.attr('href') - selector = selector && /#/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7 - } - - $parent = $(selector) - $parent.length || ($parent = $this.parent()) - - return $parent - } - - - /* DROPDOWN PLUGIN DEFINITION - * ========================== */ - - var old = $.fn.dropdown - - $.fn.dropdown = function (option) { - return this.each(function () { - var $this = $(this) - , data = $this.data('dropdown') - if (!data) $this.data('dropdown', (data = new Dropdown(this))) - if (typeof option == 'string') data[option].call($this) - }) - } - - $.fn.dropdown.Constructor = Dropdown - - - /* DROPDOWN NO CONFLICT - * ==================== */ - - $.fn.dropdown.noConflict = function () { - $.fn.dropdown = old - return this - } - - - /* APPLY TO STANDARD DROPDOWN ELEMENTS - * =================================== */ - - $(document) - .on('click.dropdown.data-api touchstart.dropdown.data-api', clearMenus) - .on('click.dropdown touchstart.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() }) - .on('touchstart.dropdown.data-api', '.dropdown-menu', function (e) { e.stopPropagation() }) - .on('click.dropdown.data-api touchstart.dropdown.data-api' , toggle, Dropdown.prototype.toggle) - .on('keydown.dropdown.data-api touchstart.dropdown.data-api', toggle + ', [role=menu]' , Dropdown.prototype.keydown) - -}(window.jQuery);/* ========================================================= - * bootstrap-modal.js v2.2.2 - * http://twitter.github.com/bootstrap/javascript.html#modals - * ========================================================= - * Copyright 2012 Twitter, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ========================================================= */ - - -!function ($) { - - "use strict"; // jshint ;_; - - - /* MODAL CLASS DEFINITION - * ====================== */ - - var Modal = function (element, options) { - this.options = options - this.$element = $(element) - .delegate('[data-dismiss="modal"]', 'click.dismiss.modal', $.proxy(this.hide, this)) - this.options.remote && this.$element.find('.modal-body').load(this.options.remote) - } - - Modal.prototype = { - - constructor: Modal - - , toggle: function () { - return this[!this.isShown ? 'show' : 'hide']() - } - - , show: function () { - var that = this - , e = $.Event('show') - - this.$element.trigger(e) - - if (this.isShown || e.isDefaultPrevented()) return - - this.isShown = true - - this.escape() - - this.backdrop(function () { - var transition = $.support.transition && that.$element.hasClass('fade') - - if (!that.$element.parent().length) { - that.$element.appendTo(document.body) //don't move modals dom position - } - - that.$element - .show() - - if (transition) { - that.$element[0].offsetWidth // force reflow - } - - that.$element - .addClass('in') - .attr('aria-hidden', false) - - that.enforceFocus() - - transition ? - that.$element.one($.support.transition.end, function () { that.$element.focus().trigger('shown') }) : - that.$element.focus().trigger('shown') - - }) - } - - , hide: function (e) { - e && e.preventDefault() - - var that = this - - e = $.Event('hide') - - this.$element.trigger(e) - - if (!this.isShown || e.isDefaultPrevented()) return - - this.isShown = false - - this.escape() - - $(document).off('focusin.modal') - - this.$element - .removeClass('in') - .attr('aria-hidden', true) - - $.support.transition && this.$element.hasClass('fade') ? - this.hideWithTransition() : - this.hideModal() - } - - , enforceFocus: function () { - var that = this - $(document).on('focusin.modal', function (e) { - if (that.$element[0] !== e.target && !that.$element.has(e.target).length) { - that.$element.focus() - } - }) - } - - , escape: function () { - var that = this - if (this.isShown && this.options.keyboard) { - this.$element.on('keyup.dismiss.modal', function ( e ) { - e.which == 27 && that.hide() - }) - } else if (!this.isShown) { - this.$element.off('keyup.dismiss.modal') - } - } - - , hideWithTransition: function () { - var that = this - , timeout = setTimeout(function () { - that.$element.off($.support.transition.end) - that.hideModal() - }, 500) - - this.$element.one($.support.transition.end, function () { - clearTimeout(timeout) - that.hideModal() - }) - } - - , hideModal: function (that) { - this.$element - .hide() - .trigger('hidden') - - this.backdrop() - } - - , removeBackdrop: function () { - this.$backdrop.remove() - this.$backdrop = null - } - - , backdrop: function (callback) { - var that = this - , animate = this.$element.hasClass('fade') ? 'fade' : '' - - if (this.isShown && this.options.backdrop) { - var doAnimate = $.support.transition && animate - - this.$backdrop = $('