diff --git a/.travis.yml b/.travis.yml index c6f4a57e9..2cdcd85bf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,23 +6,33 @@ before_install: - sudo apt-get update -qq - sudo apt-get install -qq libhdf5-dev install: - - pip install -q tensorflow + - pip install -q .[tensorflow] - pip install -q .[tests] + # Make sure we have the latest version of numpy - avoid problems we were + # seeing with Python 3 + - pip install -q -U numpy env: global: - T2T_PROBLEM=algorithmic_reverse_binary40_test - T2T_DATA_DIR=/tmp/t2t-data - T2T_TRAIN_DIR=/tmp/t2t-train script: + # Check import + - python -c "from tensor2tensor.models import transformer; print(transformer.Transformer.__name__)" + + # Run tests - pytest --ignore=tensor2tensor/utils/registry_test.py --ignore=tensor2tensor/problems_test.py --ignore=tensor2tensor/utils/trainer_lib_test.py --ignore=tensor2tensor/data_generators/algorithmic_math_test.py - pytest tensor2tensor/utils/registry_test.py - pytest tensor2tensor/utils/trainer_lib_test.py + + # Run installed scripts - t2t-datagen 2>&1 | grep translate && echo passed - - t2t-trainer --registry_help --t2t_usr_dir=./tensor2tensor/test_data/example_usr_dir 2>&1 | grep my_very_own_hparams && echo passed - - python -c "from tensor2tensor.models import transformer; print(transformer.Transformer.__name__)" - t2t-trainer --registry_help - - mkdir $T2T_DATA_DIR - - mkdir $T2T_TRAIN_DIR + + # Test --t2t_usr_dir + - t2t-trainer --registry_help --t2t_usr_dir=./tensor2tensor/test_data/example_usr_dir 2>&1 | grep my_very_own_hparams && echo passed + + # Run data generation, training, and decoding on a dummy problem - t2t-datagen --problem=$T2T_PROBLEM --data_dir=$T2T_DATA_DIR - t2t-trainer --problems=$T2T_PROBLEM --data_dir=$T2T_DATA_DIR --model=transformer --hparams_set=transformer_tiny --train_steps=5 --eval_steps=5 --output_dir=$T2T_TRAIN_DIR - t2t-decoder --problems=$T2T_PROBLEM --data_dir=$T2T_DATA_DIR --model=transformer --hparams_set=transformer_tiny --output_dir=$T2T_TRAIN_DIR --decode_hparams='num_samples=10' diff --git a/README.md b/README.md index ed0f89b5f..de8de0bfe 100644 --- a/README.md +++ b/README.md @@ -148,8 +148,11 @@ t2t-decoder \ --decode_from_file=$DECODE_FILE \ --decode_to_file=translation.en -# Eval BLEU -# (Always report proper BLEU in papers, not the internal approx_bleu.) +# See the translations +cat translation.en + +# Evaluate the BLEU score +# Note: Report this BLEU score in papers, not the internal approx_bleu metric. t2t-bleu --translation=translation.en --reference=ref-translation.de ``` diff --git a/docs/cloud_tpu.md b/docs/cloud_tpu.md index 55144e69c..d375c6741 100644 --- a/docs/cloud_tpu.md +++ b/docs/cloud_tpu.md @@ -10,54 +10,22 @@ Models and hparams that are known to work on TPU: * `resnet50` with `resnet_base` * `revnet104` with `revnet_base` -To run on TPUs, you need to be part of the alpha program; if you're not, these -commands won't work for you currently, but access will expand soon, so get -excited for your future ML supercomputers in the cloud. +TPU access is currently limited, but access will expand soon, so get excited for +your future ML supercomputers in the cloud. ## Tutorial: Transformer En-De translation on TPU -Update `gcloud`: `gcloud components update` +**Note**: You'll need TensorFlow 1.5+. -Set your default zone to a TPU-enabled zone. TPU machines are only available in -certain zones for now. +Configure the `gcloud` CLI: ``` +gcloud components update +gcloud auth application-default login +# Set your default zone to a TPU-enabled zone. gcloud config set compute/zone us-central1-f ``` -Launch a GCE instance; this will run the Python trainer. -``` -gcloud compute instances create $USER-vm \ - --machine-type=n1-standard-8 \ - --image-family=tf-nightly \ - --image-project=ml-images \ - --scopes=https://www.googleapis.com/auth/cloud-platform -``` - -Launch the TPU instance; the Python program will connect to this to train on the -TPU device. -``` -gcloud alpha compute tpus list -# Make an IP with structure 10.240.X.2 that’s unique in the list -TPU_IP=10.240.0.2 -gcloud alpha compute tpus create \ - $USER-tpu \ - --range=${TPU_IP/%2/0}/29 \ - --version=nightly -``` - -SSH in with port forwarding for TensorBoard -``` -gcloud compute ssh $USER-vm -- -L 6006:localhost:6006 -``` - -Now that you're on the cloud instance, install T2T: -``` -pip install tensor2tensor --user -# Add the python bin dir to your path -export PATH=$HOME/.local/bin:$PATH -``` - -Generate data to GCS +Generate data to GCS. If you already have the data, use `gsutil cp` to copy to GCS. ``` GCS_BUCKET=gs://my-bucket @@ -65,21 +33,16 @@ DATA_DIR=$GCS_BUCKET/t2t/data/ t2t-datagen --problem=translate_ende_wmt8k --data_dir=$DATA_DIR ``` -Setup some vars used below. `TPU_IP` and `DATA_DIR` should be the same as what -was used above. Note that the `DATA_DIR` and `OUT_DIR` must be GCS buckets. +Specify an output directory and launch TensorBoard to monitor training: ``` -TPU_IP=10.240.0.2 -DATA_DIR=$GCS_BUCKET/t2t/data/ -OUT_DIR=$GCS_BUCKET/t2t/training/transformer_ende_1 -TPU_MASTER=grpc://$TPU_IP:8470 +OUT_DIR=$GCS_BUCKET/t2t/training/transformer_v1 +tensorboard --logdir=$OUT_DIR ``` -Launch TensorBoard in the background so you can monitor training: -``` -tensorboard --logdir=$OUT_DIR > /tmp/tensorboard_logs.txt 2>&1 & -``` +Note that both the data and output directories must be Google Cloud Storage +buckets (i.e. start with `gs://`). -Train and evaluate. +Launch! It's as simple as adding the `--cloud_tpu` flag. ``` t2t-trainer \ --model=transformer \ @@ -88,21 +51,27 @@ t2t-trainer \ --train_steps=10 \ --eval_steps=10 \ --local_eval_frequency=10 \ - --iterations_per_loop=10 \ - --master=$TPU_MASTER \ - --use_tpu=True \ --data_dir=$DATA_DIR \ - --output_dir=$OUT_DIR + --output_dir=$OUT_DIR \ + --cloud_tpu \ + --cloud_delete_on_done ``` The above command will train for 10 steps, then evaluate for 10 steps. You can (and should) increase the number of total training steps with the `--train_steps` flag. Evaluation will happen every `--local_eval_frequency` -steps, each time for `--eval_steps`. When you increase then number of training -steps, also increase `--iterations_per_loop`, which controls how frequently the -TPU machine returns control to the host machine (1000 seems like a fine number). - -Back on your local machine, open your browser and navigate to `localhost:6006` -for TensorBoard. +steps, each time for `--eval_steps`. The `--cloud_delete_on_done` flag has the +trainer delete the VMs on completion. Voila. Enjoy your new supercomputer. + +Note that checkpoints are compatible between CPU, GPU, and TPU models so you can +switch between hardware at will. + +## Additional flags + +* `--cloud_vm_name`: The name of the VM to use or create. This can be reused + across multiple concurrent runs. +* `--cloud_tpu_name`: The name of the TPU instance to use or create. If you want + to launch multiple jobs on TPU, provide different names here for each one. + Each TPU instance can only be training one model at a time. diff --git a/docs/walkthrough.md b/docs/walkthrough.md index 06a15d1c8..de8de0bfe 100644 --- a/docs/walkthrough.md +++ b/docs/walkthrough.md @@ -133,6 +133,7 @@ t2t-trainer \ DECODE_FILE=$DATA_DIR/decode_this.txt echo "Hello world" >> $DECODE_FILE echo "Goodbye world" >> $DECODE_FILE +echo -e 'Hallo Welt\nAuf Wiedersehen Welt' > ref-translation.de BEAM_SIZE=4 ALPHA=0.6 @@ -144,9 +145,15 @@ t2t-decoder \ --hparams_set=$HPARAMS \ --output_dir=$TRAIN_DIR \ --decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \ - --decode_from_file=$DECODE_FILE + --decode_from_file=$DECODE_FILE \ + --decode_to_file=translation.en -cat $DECODE_FILE.$MODEL.$HPARAMS.beam$BEAM_SIZE.alpha$ALPHA.decodes +# See the translations +cat translation.en + +# Evaluate the BLEU score +# Note: Report this BLEU score in papers, not the internal approx_bleu metric. +t2t-bleu --translation=translation.en --reference=ref-translation.de ``` --- diff --git a/setup.py b/setup.py index ede08f6ae..1d3f14a94 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='tensor2tensor', - version='1.4.2', + version='1.4.3', description='Tensor2Tensor', author='Google Inc.', author_email='no-reply@google.com', @@ -36,7 +36,6 @@ 'future', 'gevent', 'gunicorn', - 'gym', 'numpy', 'requests', 'scipy', @@ -44,8 +43,8 @@ 'six', ], extras_require={ - 'tensorflow': ['tensorflow>=1.4.1'], - 'tensorflow_gpu': ['tensorflow-gpu>=1.4.1'], + 'tensorflow': ['tensorflow>=1.5.0'], + 'tensorflow_gpu': ['tensorflow-gpu>=1.5.0'], 'tests': ['pytest', 'h5py', 'mock'], }, classifiers=[ diff --git a/tensor2tensor/bin/t2t_datagen.py b/tensor2tensor/bin/t2t_datagen.py index 451b99a3a..8c91b7ee0 100644 --- a/tensor2tensor/bin/t2t_datagen.py +++ b/tensor2tensor/bin/t2t_datagen.py @@ -168,6 +168,8 @@ def main(_): tf.logging.warning("It is strongly recommended to specify --data_dir. " "Data will be written to default data_dir=%s.", FLAGS.data_dir) + FLAGS.data_dir = os.path.expanduser(FLAGS.data_dir) + tf.gfile.MakeDirs(FLAGS.data_dir) tf.logging.info("Generating problems:\n%s" % registry.display_list_by_prefix(problems, diff --git a/tensor2tensor/bin/t2t_decoder.py b/tensor2tensor/bin/t2t_decoder.py index bf87949f5..e7ae6b74a 100644 --- a/tensor2tensor/bin/t2t_decoder.py +++ b/tensor2tensor/bin/t2t_decoder.py @@ -53,8 +53,9 @@ "Path to the source file for decoding") flags.DEFINE_string("decode_to_file", None, "Path to the decoded (output) file") -flags.DEFINE_bool("keep_timestamp", True, - "Set the mtime of the decoded file to the checkpoint_path+'.index' mtime.") +flags.DEFINE_bool("keep_timestamp", False, + "Set the mtime of the decoded file to the " + "checkpoint_path+'.index' mtime.") flags.DEFINE_bool("decode_interactive", False, "Interactive local inference mode.") flags.DEFINE_integer("decode_shards", 1, "Number of decoding replicas.") @@ -83,7 +84,7 @@ def decode(estimator, hparams, decode_hp): decode_hp, FLAGS.decode_to_file, checkpoint_path=FLAGS.checkpoint_path) if FLAGS.checkpoint_path and FLAGS.keep_timestamp: - ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + '.index') + ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) else: decoding.decode_from_dataset( diff --git a/tensor2tensor/bin/t2t_trainer.py b/tensor2tensor/bin/t2t_trainer.py index 77cfa98d5..8f1f0dfdc 100644 --- a/tensor2tensor/bin/t2t_trainer.py +++ b/tensor2tensor/bin/t2t_trainer.py @@ -26,6 +26,7 @@ from tensor2tensor import models # pylint: disable=unused-import from tensor2tensor import problems as problems_lib # pylint: disable=unused-import +from tensor2tensor.utils import cloud from tensor2tensor.utils import decoding from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import from tensor2tensor.utils import registry @@ -46,7 +47,7 @@ "available to the t2t-trainer.") flags.DEFINE_integer("random_seed", 1234, "Random seed.") flags.DEFINE_integer("tpu_num_shards", 8, "Number of tpu shards.") -flags.DEFINE_integer("iterations_per_loop", 1000, +flags.DEFINE_integer("iterations_per_loop", 100, "Number of iterations in a TPU training loop.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU.") flags.DEFINE_integer("tpu_infeed_sleep_secs", None, @@ -71,6 +72,15 @@ except: # pylint: disable=bare-except pass +# Google Cloud TPUs +flags.DEFINE_bool("cloud_tpu", False, "Whether to launch on Cloud TPUs.") +flags.DEFINE_string("cloud_vm_name", "%s-vm" % os.getenv("USER"), + "Name of Cloud VM to use or create.") +flags.DEFINE_string("cloud_tpu_name", "%s-tpu" % os.getenv("USER"), + "Name of Cloud TPU instance to use or create.") +flags.DEFINE_bool("cloud_delete_on_done", False, + "Whether to delete the VM and TPU instance when done.") + def get_problem_name(): problems = FLAGS.problems.split("-") @@ -79,10 +89,10 @@ def get_problem_name(): def create_hparams(): - if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set: - tf.logging.warn("Not all hyperparameter sets work on TPU. When available " - "for a given model, prefer hparams_sets with a '_tpu' " - "suffix, e.g. transformer_tpu.") + if (FLAGS.cloud_tpu or FLAGS.use_tpu) and "tpu" not in FLAGS.hparams_set: + tf.logging.warn("Not all hyperparameter sets work on TPU. " + "Prefer hparams_sets with a '_tpu' suffix, " + "e.g. transformer_tpu, if available for your model.") return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams) @@ -109,7 +119,8 @@ def create_experiment_fn(): def create_run_config(hp): save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency) - if FLAGS.save_checkpoints_secs: + save_ckpt_secs = FLAGS.save_checkpoints_secs or None + if save_ckpt_secs: save_ckpt_steps = None return trainer_lib.create_run_config( model_dir=os.path.expanduser(FLAGS.output_dir), @@ -118,7 +129,7 @@ def create_run_config(hp): num_shards=FLAGS.tpu_num_shards, log_device_placement=FLAGS.log_device_placement, save_checkpoints_steps=save_ckpt_steps, - save_checkpoints_secs=FLAGS.save_checkpoints_secs, + save_checkpoints_secs=save_ckpt_secs, keep_checkpoint_max=FLAGS.keep_checkpoint_max, keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, num_gpus=FLAGS.worker_gpu, @@ -126,7 +137,7 @@ def create_run_config(hp): shard_to_cpu=FLAGS.locally_shard_to_cpu, num_async_replicas=FLAGS.worker_replicas, gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction, - enable_graph_rewriter=FLAGS.experimental_optimize_placement, + enable_graph_rewriter=FLAGS.enable_graph_rewriter, use_tpu=FLAGS.use_tpu, schedule=FLAGS.schedule, no_data_parallelism=hp.no_data_parallelism, @@ -156,9 +167,8 @@ def generate_data(): @contextlib.contextmanager def profile_context(): if FLAGS.profile: - with tf.contrib.tfprof.ProfileContext("t2tprof", - trace_steps=range(100), - dump_steps=range(100)) as pctx: + with tf.contrib.tfprof.ProfileContext( + "t2tprof", trace_steps=range(100), dump_steps=range(100)) as pctx: opts = tf.profiler.ProfileOptionBuilder.time_and_memory() pctx.add_auto_profiling("op", opts, range(100)) yield @@ -188,8 +198,7 @@ def save_metadata(hparams): flags_str = FLAGS.flags_into_string() t2t_flags_str = "\n".join([ "--%s=%s" % (f.name, f.value) - for f in FLAGS.flags_by_module_dict()[ - "tensor2tensor.utils.flags"] + for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"] ]) else: flags_dict = FLAGS.__dict__["__flags"] @@ -220,6 +229,29 @@ def execute_schedule(exp): getattr(exp, FLAGS.schedule)() +@contextlib.contextmanager +def maybe_cloud_tpu(): + """If FLAGS.cloud_tpu is set, setup Cloud instances.""" + if not FLAGS.cloud_tpu: + yield + return + + tf.logging.info("Running on Cloud TPU") + + if (not FLAGS.data_dir.startswith("gs://") or + not FLAGS.output_dir.startswith("gs://")): + raise ValueError("To run on Cloud TPUs, data_dir and output_dir need to " + "be gs:// paths, i.e. on Google Cloud Storage.") + + FLAGS.use_tpu = True + with cloud.cloud_tpu( + FLAGS.cloud_vm_name, + FLAGS.cloud_tpu_name, + delete_on_done=FLAGS.cloud_delete_on_done) as tpu_master: + FLAGS.master = tpu_master + yield + + def main(_): tf.logging.set_verbosity(tf.logging.INFO) trainer_lib.set_random_seed(FLAGS.random_seed) @@ -230,14 +262,13 @@ def main(_): generate_data() hparams = create_hparams() - run_config = create_run_config(hparams) - if is_chief(): save_metadata(hparams) - exp_fn = create_experiment_fn() - exp = exp_fn(run_config, hparams) - execute_schedule(exp) + with maybe_cloud_tpu(): + exp_fn = create_experiment_fn() + exp = exp_fn(create_run_config(hparams), hparams) + execute_schedule(exp) if __name__ == "__main__": diff --git a/tensor2tensor/bin/t2t_translate_all.py b/tensor2tensor/bin/t2t_translate_all.py index 90d078575..40046e883 100644 --- a/tensor2tensor/bin/t2t_translate_all.py +++ b/tensor2tensor/bin/t2t_translate_all.py @@ -81,7 +81,7 @@ def main(_): if not os.path.exists(flags_path): shutil.copy2(os.path.join(model_dir, "flags.txt"), flags_path) - locals_and_flags = {'FLAGS': FLAGS} + locals_and_flags = {"FLAGS": FLAGS} for model in bleu_hook.stepfiles_iterator(model_dir, FLAGS.wait_minutes, FLAGS.min_steps): tf.logging.info("Translating " + model.filename) diff --git a/tensor2tensor/data_generators/all_problems.py b/tensor2tensor/data_generators/all_problems.py index ba91965af..80734e06d 100644 --- a/tensor2tensor/data_generators/all_problems.py +++ b/tensor2tensor/data_generators/all_problems.py @@ -22,16 +22,22 @@ from tensor2tensor.data_generators import algorithmic from tensor2tensor.data_generators import algorithmic_math from tensor2tensor.data_generators import audio +from tensor2tensor.data_generators import celeba +from tensor2tensor.data_generators import cifar from tensor2tensor.data_generators import cipher from tensor2tensor.data_generators import cnn_dailymail from tensor2tensor.data_generators import desc2code +from tensor2tensor.data_generators import fsns from tensor2tensor.data_generators import gym from tensor2tensor.data_generators import ice_parsing -from tensor2tensor.data_generators import image +from tensor2tensor.data_generators import imagenet from tensor2tensor.data_generators import imdb from tensor2tensor.data_generators import librispeech from tensor2tensor.data_generators import lm1b +from tensor2tensor.data_generators import mnist +from tensor2tensor.data_generators import mscoco from tensor2tensor.data_generators import multinli +from tensor2tensor.data_generators import ocr from tensor2tensor.data_generators import problem_hparams from tensor2tensor.data_generators import ptb from tensor2tensor.data_generators import snli @@ -40,6 +46,7 @@ from tensor2tensor.data_generators import translate_enfr from tensor2tensor.data_generators import translate_enmk from tensor2tensor.data_generators import translate_enzh +from tensor2tensor.data_generators import twentybn from tensor2tensor.data_generators import wiki from tensor2tensor.data_generators import wsj_parsing diff --git a/tensor2tensor/data_generators/celeba.py b/tensor2tensor/data_generators/celeba.py new file mode 100644 index 000000000..5b85c2590 --- /dev/null +++ b/tensor2tensor/data_generators/celeba.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CelebA.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile + +# Dependency imports + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.utils import registry + +import tensorflow as tf + + +@registry.register_problem +class ImageCeleba(image_utils.ImageProblem): + """CelebA dataset, aligned and cropped images.""" + IMG_DATA = ("img_align_celeba.zip", + "https://drive.google.com/uc?export=download&" + "id=0B7EVK8r0v71pZjFTYXZWM3FlRnM") + LANDMARKS_DATA = ("celeba_landmarks_align", + "https://drive.google.com/uc?export=download&" + "id=0B7EVK8r0v71pd0FJY3Blby1HUTQ") + ATTR_DATA = ("celeba_attr", "https://drive.google.com/uc?export=download&" + "id=0B7EVK8r0v71pblRyaVFSWGxPY0U") + + LANDMARK_HEADINGS = ("lefteye_x lefteye_y righteye_x righteye_y " + "nose_x nose_y leftmouth_x leftmouth_y rightmouth_x " + "rightmouth_y").split() + ATTR_HEADINGS = ( + "5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs " + "Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair " + "Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair " + "Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache " + "Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline " + "Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings " + "Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young" + ).split() + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.input_modality = {"inputs": ("image:identity", 256)} + p.target_modality = ("image:identity", 256) + p.batch_size_multiplier = 256 + p.max_expected_batch_size_per_shard = 4 + p.input_space_id = 1 + p.target_space_id = 1 + + def generator(self, tmp_dir, how_many, start_from=0): + """Image generator for CELEBA dataset. + + Args: + tmp_dir: path to temporary storage directory. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the image as JPEG, + * image/format: the string "jpeg" representing image format, + """ + out_paths = [] + for fname, url in [self.IMG_DATA, self.LANDMARKS_DATA, self.ATTR_DATA]: + path = generator_utils.maybe_download_from_drive(tmp_dir, fname, url) + out_paths.append(path) + + img_path, landmarks_path, attr_path = out_paths # pylint: disable=unbalanced-tuple-unpacking + unzipped_folder = img_path[:-4] + if not tf.gfile.Exists(unzipped_folder): + zipfile.ZipFile(img_path, "r").extractall(tmp_dir) + + with tf.gfile.Open(landmarks_path) as f: + landmarks_raw = f.read() + + with tf.gfile.Open(attr_path) as f: + attr_raw = f.read() + + def process_landmarks(raw_data): + landmarks = {} + lines = raw_data.split("\n") + headings = lines[1].strip().split() + for line in lines[2:-1]: + values = line.strip().split() + img_name = values[0] + landmark_values = [int(v) for v in values[1:]] + landmarks[img_name] = landmark_values + return landmarks, headings + + def process_attrs(raw_data): + attrs = {} + lines = raw_data.split("\n") + headings = lines[1].strip().split() + for line in lines[2:-1]: + values = line.strip().split() + img_name = values[0] + attr_values = [int(v) for v in values[1:]] + attrs[img_name] = attr_values + return attrs, headings + + img_landmarks, _ = process_landmarks(landmarks_raw) + img_attrs, _ = process_attrs(attr_raw) + + image_files = tf.gfile.Glob(unzipped_folder + "/*.jpg") + for filename in image_files[start_from:start_from + how_many]: + img_name = os.path.basename(filename) + landmarks = img_landmarks[img_name] + attrs = img_attrs[img_name] + + with tf.gfile.Open(filename, "r") as f: + encoded_image_data = f.read() + yield { + "image/encoded": [encoded_image_data], + "image/format": ["jpeg"], + "attributes": attrs, + "landmarks": landmarks, + } + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(tmp_dir, 162770), # train + self.training_filepaths(data_dir, self.train_shards, shuffled=False), + self.generator(tmp_dir, 19867, 162770), # dev + self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) + + +@registry.register_problem +class Img2imgCeleba(ImageCeleba): + """8px to 32px problem.""" + + def dataset_filename(self): + return "image_celeba" + + def preprocess_example(self, example, unused_mode, unused_hparams): + image = example["inputs"] + # Remove boundaries in CelebA images. Remove 40 pixels each side + # vertically and 20 pixels each side horizontally. + image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40) + image_8 = image_utils.resize_by_area(image, 8) + image_32 = image_utils.resize_by_area(image, 32) + + example["inputs"] = image_8 + example["targets"] = image_32 + return example diff --git a/tensor2tensor/data_generators/cifar.py b/tensor2tensor/data_generators/cifar.py new file mode 100644 index 000000000..0c03ebc30 --- /dev/null +++ b/tensor2tensor/data_generators/cifar.py @@ -0,0 +1,191 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CIFAR.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile + +# Dependency imports + +import numpy as np + +from six.moves import cPickle + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import mnist +from tensor2tensor.utils import registry + +import tensorflow as tf + +# URLs and filenames for CIFAR data. +_CIFAR10_URL = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" +_CIFAR10_PREFIX = "cifar-10-batches-py/" +_CIFAR10_TRAIN_FILES = [ + "data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", + "data_batch_5" +] +_CIFAR10_TEST_FILES = ["test_batch"] +_CIFAR10_IMAGE_SIZE = 32 + + +def _get_cifar10(directory): + """Download and extract CIFAR to directory unless it is there.""" + filename = os.path.basename(_CIFAR10_URL) + path = generator_utils.maybe_download(directory, filename, _CIFAR10_URL) + tarfile.open(path, "r:gz").extractall(directory) + + +def cifar10_generator(tmp_dir, training, how_many, start_from=0): + """Image generator for CIFAR-10. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces CIFAR-10 images and labels. + """ + _get_cifar10(tmp_dir) + data_files = _CIFAR10_TRAIN_FILES if training else _CIFAR10_TEST_FILES + all_images, all_labels = [], [] + for filename in data_files: + path = os.path.join(tmp_dir, _CIFAR10_PREFIX, filename) + with tf.gfile.Open(path, "r") as f: + data = cPickle.load(f) + images = data["data"] + num_images = images.shape[0] + images = images.reshape((num_images, 3, _CIFAR10_IMAGE_SIZE, + _CIFAR10_IMAGE_SIZE)) + all_images.extend([ + np.squeeze(images[j]).transpose((1, 2, 0)) for j in xrange(num_images) + ]) + labels = data["labels"] + all_labels.extend([labels[j] for j in xrange(num_images)]) + return image_utils.image_generator( + all_images[start_from:start_from + how_many], + all_labels[start_from:start_from + how_many]) + + +@registry.register_problem +class ImageCifar10Tune(mnist.ImageMnistTune): + """Cifar-10 Tune.""" + + @property + def num_channels(self): + return 3 + + @property + def class_labels(self): + return [ + "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", + "ship", "truck" + ] + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + if mode == tf.estimator.ModeKeys.TRAIN: + image = image_utils.cifar_image_augmentation(image) + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar10_generator(tmp_dir, True, 48000) + else: + return cifar10_generator(tmp_dir, True, 2000, 48000) + + +@registry.register_problem +class ImageCifar10(ImageCifar10Tune): + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar10_generator(tmp_dir, True, 50000) + else: + return cifar10_generator(tmp_dir, False, 10000) + + +@registry.register_problem +class ImageCifar10Plain(ImageCifar10): + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + +@registry.register_problem +class ImageCifar10PlainGen(ImageCifar10Plain): + """CIFAR-10 32x32 for image generation without standardization preprep.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + return example + + +@registry.register_problem +class ImageCifar10Plain8(ImageCifar10): + """CIFAR-10 rescaled to 8x8 for output: Conditional image generation.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image = image_utils.resize_by_area(image, 8) + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + +@registry.register_problem +class Img2imgCifar10(ImageCifar10): + """CIFAR-10 rescaled to 8x8 for input and 32x32 for output.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def preprocess_example(self, example, unused_mode, unused_hparams): + + inputs = example["inputs"] + # For Img2Img resize input and output images as desired. + example["inputs"] = image_utils.resize_by_area(inputs, 8) + example["targets"] = image_utils.resize_by_area(inputs, 32) + return example + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.input_modality = {"inputs": ("image:identity", 256)} + p.target_modality = ("image:identity", 256) + p.batch_size_multiplier = 256 + p.max_expected_batch_size_per_shard = 4 + p.input_space_id = 1 + p.target_space_id = 1 diff --git a/tensor2tensor/data_generators/fsns.py b/tensor2tensor/data_generators/fsns.py new file mode 100644 index 000000000..938931c4c --- /dev/null +++ b/tensor2tensor/data_generators/fsns.py @@ -0,0 +1,82 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""FSNS.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +# Dependency imports + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import registry + +import tensorflow as tf + + +@registry.register_problem +class ImageFSNS(image_utils.ImageProblem): + """Problem spec for French Street Name recognition.""" + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + list_url = ("https://raw.githubusercontent.com/tensorflow/models/master/" + "street/python/fsns_urls.txt") + fsns_urls = generator_utils.maybe_download(tmp_dir, "fsns_urls.txt", + list_url) + fsns_files = [ + f.strip() for f in open(fsns_urls, "r") if f.startswith("http://") + ] + for url in fsns_files: + if "/train/train" in url: + generator_utils.maybe_download( + data_dir, "image_fsns-train" + url[-len("-00100-of-00512"):], url) + elif "/validation/validation" in url: + generator_utils.maybe_download( + data_dir, "image_fsns-dev" + url[-len("-00100-of-00512"):], url) + elif "charset" in url: + generator_utils.maybe_download(data_dir, "charset_size134.txt", url) + + def feature_encoders(self, data_dir): + # This vocab file must be present within the data directory. + vocab_filename = os.path.join(data_dir, "charset_size134.txt") + return { + "inputs": text_encoder.ImageEncoder(), + "targets": text_encoder.SubwordTextEncoder(vocab_filename) + } + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} + vocab_size = self._encoders["targets"].vocab_size + p.target_modality = (registry.Modalities.SYMBOL, vocab_size) + p.batch_size_multiplier = 256 + p.max_expected_batch_size_per_shard = 2 + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.EN_TOK + + def example_reading_spec(self): + label_key = "image/unpadded_label" + data_fields, data_items_to_decoders = ( + super(ImageFSNS, self).example_reading_spec()) + data_fields[label_key] = tf.VarLenFeature(tf.int64) + data_items_to_decoders[ + "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) + return data_fields, data_items_to_decoders diff --git a/tensor2tensor/data_generators/gym.py b/tensor2tensor/data_generators/gym.py index 631c2b281..27be9a71b 100644 --- a/tensor2tensor/data_generators/gym.py +++ b/tensor2tensor/data_generators/gym.py @@ -23,8 +23,6 @@ # Dependency imports -import gym - from tensor2tensor.data_generators import generator_utils from tensor2tensor.data_generators import problem from tensor2tensor.utils import registry @@ -33,6 +31,15 @@ +def gym_lib(): + """Access to gym to allow for import of this file without a gym install.""" + try: + import gym # pylint: disable=g-import-not-at-top + except ImportError: + raise ImportError("pip install gym to use gym-based Problems") + return gym + + class GymDiscreteProblem(problem.Problem): """Gym environment with discrete actions and rewards.""" @@ -48,7 +55,7 @@ def env_name(self): @property def env(self): if self._env is None: - self._env = gym.make(self.env_name) + self._env = gym_lib().make(self.env_name) return self._env @property diff --git a/tensor2tensor/data_generators/image.py b/tensor2tensor/data_generators/image.py deleted file mode 100644 index 794d6615a..000000000 --- a/tensor2tensor/data_generators/image.py +++ /dev/null @@ -1,1148 +0,0 @@ -# coding=utf-8 -# Copyright 2017 The Tensor2Tensor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Data generators for image data-sets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import gzip -import io -import json -import os -import random -import struct -import tarfile -import zipfile - -# Dependency imports - -import numpy as np -from six.moves import cPickle -from six.moves import xrange # pylint: disable=redefined-builtin -from six.moves import zip # pylint: disable=redefined-builtin -from tensor2tensor.data_generators import generator_utils -from tensor2tensor.data_generators import problem -from tensor2tensor.data_generators import text_encoder -from tensor2tensor.layers import common_layers -from tensor2tensor.utils import registry - -import tensorflow as tf - -from tensorflow.python.eager import context - - -def resize_by_area(img, size): - """image resize function used by quite a few image problems.""" - return tf.to_int64( - tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA)) - - -class ImageProblem(problem.Problem): - - def example_reading_spec(self, label_repr=None): - data_fields = { - "image/encoded": tf.FixedLenFeature((), tf.string), - "image/format": tf.FixedLenFeature((), tf.string), - } - - data_items_to_decoders = { - "inputs": - tf.contrib.slim.tfexample_decoder.Image( - image_key="image/encoded", - format_key="image/format", - channels=3), - } - - return data_fields, data_items_to_decoders - - -@registry.register_problem("image_celeba_tune") -class ImageCeleba(ImageProblem): - """CelebA dataset, aligned and cropped images.""" - IMG_DATA = ("img_align_celeba.zip", - "https://drive.google.com/uc?export=download&" - "id=0B7EVK8r0v71pZjFTYXZWM3FlRnM") - LANDMARKS_DATA = ("celeba_landmarks_align", - "https://drive.google.com/uc?export=download&" - "id=0B7EVK8r0v71pd0FJY3Blby1HUTQ") - ATTR_DATA = ("celeba_attr", "https://drive.google.com/uc?export=download&" - "id=0B7EVK8r0v71pblRyaVFSWGxPY0U") - - LANDMARK_HEADINGS = ("lefteye_x lefteye_y righteye_x righteye_y " - "nose_x nose_y leftmouth_x leftmouth_y rightmouth_x " - "rightmouth_y").split() - ATTR_HEADINGS = ( - "5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs " - "Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair " - "Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair " - "Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache " - "Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline " - "Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings " - "Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young" - ).split() - - def preprocess_example(self, example, unused_mode, unused_hparams): - - inputs = example["inputs"] - # Remove boundaries in CelebA images. Remove 40 pixels each side - # vertically and 20 pixels each side horizontally. - inputs = tf.image.crop_to_bounding_box(inputs, 40, 20, 218 - 80, 178 - 40) - example["inputs"] = resize_by_area(inputs, 8) - example["targets"] = resize_by_area(inputs, 32) - return example - - def hparams(self, defaults, unused_model_hparams): - p = defaults - p.input_modality = {"inputs": ("image:identity", 256)} - p.target_modality = ("image:identity", 256) - p.batch_size_multiplier = 256 - p.max_expected_batch_size_per_shard = 4 - p.input_space_id = 1 - p.target_space_id = 1 - - def generator(self, tmp_dir, how_many, start_from=0): - """Image generator for CELEBA dataset. - - Args: - tmp_dir: path to temporary storage directory. - how_many: how many images and labels to generate. - start_from: from which image to start. - - Yields: - A dictionary representing the images with the following fields: - * image/encoded: the string encoding the image as JPEG, - * image/format: the string "jpeg" representing image format, - """ - out_paths = [] - for fname, url in [self.IMG_DATA, self.LANDMARKS_DATA, self.ATTR_DATA]: - path = generator_utils.maybe_download_from_drive(tmp_dir, fname, url) - out_paths.append(path) - - img_path, landmarks_path, attr_path = out_paths # pylint: disable=unbalanced-tuple-unpacking - unzipped_folder = img_path[:-4] - if not tf.gfile.Exists(unzipped_folder): - zipfile.ZipFile(img_path, "r").extractall(tmp_dir) - - with tf.gfile.Open(landmarks_path) as f: - landmarks_raw = f.read() - - with tf.gfile.Open(attr_path) as f: - attr_raw = f.read() - - def process_landmarks(raw_data): - landmarks = {} - lines = raw_data.split("\n") - headings = lines[1].strip().split() - for line in lines[2:-1]: - values = line.strip().split() - img_name = values[0] - landmark_values = [int(v) for v in values[1:]] - landmarks[img_name] = landmark_values - return landmarks, headings - - def process_attrs(raw_data): - attrs = {} - lines = raw_data.split("\n") - headings = lines[1].strip().split() - for line in lines[2:-1]: - values = line.strip().split() - img_name = values[0] - attr_values = [int(v) for v in values[1:]] - attrs[img_name] = attr_values - return attrs, headings - - img_landmarks, _ = process_landmarks(landmarks_raw) - img_attrs, _ = process_attrs(attr_raw) - - image_files = tf.gfile.Glob(unzipped_folder + "/*.jpg") - for filename in image_files[start_from:start_from + how_many]: - img_name = os.path.basename(filename) - landmarks = img_landmarks[img_name] - attrs = img_attrs[img_name] - - with tf.gfile.Open(filename, "r") as f: - encoded_image_data = f.read() - yield { - "image/encoded": [encoded_image_data], - "image/format": ["jpeg"], - "attributes": attrs, - "landmarks": landmarks, - } - - @property - def train_shards(self): - return 100 - - @property - def dev_shards(self): - return 10 - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - generator_utils.generate_dataset_and_shuffle( - self.generator(tmp_dir, 162770), # train - self.training_filepaths(data_dir, self.train_shards, shuffled=False), - self.generator(tmp_dir, 19867, 162770), # dev - self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) - - -@registry.register_problem -class ImageFSNS(ImageProblem): - """Problem spec for French Street Name recognition.""" - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - list_url = ("https://raw.githubusercontent.com/tensorflow/models/master/" - "street/python/fsns_urls.txt") - fsns_urls = generator_utils.maybe_download(tmp_dir, "fsns_urls.txt", - list_url) - fsns_files = [ - f.strip() for f in open(fsns_urls, "r") if f.startswith("http://") - ] - for url in fsns_files: - if "/train/train" in url: - generator_utils.maybe_download( - data_dir, "image_fsns-train" + url[-len("-00100-of-00512"):], url) - elif "/validation/validation" in url: - generator_utils.maybe_download( - data_dir, "image_fsns-dev" + url[-len("-00100-of-00512"):], url) - elif "charset" in url: - generator_utils.maybe_download(data_dir, "charset_size134.txt", url) - - def feature_encoders(self, data_dir): - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(data_dir, "charset_size134.txt") - return { - "inputs": text_encoder.ImageEncoder(), - "targets": text_encoder.SubwordTextEncoder(vocab_filename) - } - - def hparams(self, defaults, unused_model_hparams): - p = defaults - p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} - vocab_size = self._encoders["targets"].vocab_size - p.target_modality = (registry.Modalities.SYMBOL, vocab_size) - p.batch_size_multiplier = 256 - p.max_expected_batch_size_per_shard = 2 - p.input_space_id = problem.SpaceID.IMAGE - p.target_space_id = problem.SpaceID.EN_TOK - - def example_reading_spec(self): - label_key = "image/unpadded_label" - data_fields, data_items_to_decoders = ( - super(ImageFSNS, self).example_reading_spec()) - data_fields[label_key] = tf.VarLenFeature(tf.int64) - data_items_to_decoders[ - "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) - return data_fields, data_items_to_decoders - - -class Image2ClassProblem(ImageProblem): - """Base class for image classification problems.""" - - @property - def is_small(self): - raise NotImplementedError() - - @property - def num_classes(self): - raise NotImplementedError() - - @property - def train_shards(self): - raise NotImplementedError() - - @property - def dev_shards(self): - return 1 - - @property - def class_labels(self): - return ["ID_%d" % i for i in range(self.num_classes)] - - def feature_encoders(self, data_dir): - del data_dir - return { - "inputs": text_encoder.ImageEncoder(), - "targets": text_encoder.ClassLabelEncoder(self.class_labels) - } - - def generator(self, data_dir, tmp_dir, is_training): - raise NotImplementedError() - - def example_reading_spec(self): - label_key = "image/class/label" - data_fields, data_items_to_decoders = ( - super(Image2ClassProblem, self).example_reading_spec()) - data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64) - - data_items_to_decoders[ - "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) - return data_fields, data_items_to_decoders - - def hparams(self, defaults, unused_model_hparams): - p = defaults - p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} - p.target_modality = (registry.Modalities.CLASS_LABEL, self.num_classes) - p.batch_size_multiplier = 4 if self.is_small else 256 - p.max_expected_batch_size_per_shard = 8 if self.is_small else 2 - p.loss_multiplier = 3.0 if self.is_small else 1.0 - if self._was_reversed: - p.loss_multiplier = 1.0 - p.input_space_id = problem.SpaceID.IMAGE - p.target_space_id = problem.SpaceID.IMAGE_LABEL - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - generator_utils.generate_dataset_and_shuffle( - self.generator(data_dir, tmp_dir, True), - self.training_filepaths(data_dir, self.train_shards, shuffled=False), - self.generator(data_dir, tmp_dir, False), - self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) - - -def imagenet_preprocess_example(example, mode, resize_size=None): - """Preprocessing used for Imagenet and similar problems.""" - if resize_size is None: - resize_size = [299, 299] - - def preprocess(img): - img = tf.image.resize_images(img, [360, 360]) - img = common_layers.image_augmentation( - tf.to_float(img) / 255., crop_size=resize_size) - return tf.to_int64(img * 255.) - - def resize(img): - return tf.to_int64(tf.image.resize_images(img, resize_size)) - - inputs = tf.cast(example["inputs"], tf.int64) - if mode == tf.estimator.ModeKeys.TRAIN: - example["inputs"] = tf.cond( # Preprocess 90% of the time. - tf.less(tf.random_uniform([]), 0.9), - lambda img=inputs: preprocess(img), - lambda img=inputs: resize(img)) - else: - example["inputs"] = resize(inputs) - return example - - -@registry.register_problem -class ImageImagenet(Image2ClassProblem): - """Imagenet.""" - - @property - def is_small(self): - return False - - @property - def num_classes(self): - return 1000 - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - # TODO(lukaszkaiser): find a better way than printing this. - print("To generate the ImageNet dataset in the proper format, follow " - "instructions at https://github.com/tensorflow/models/blob/master" - "/inception/README.md#getting-started") - - def preprocess_example(self, example, mode, _): - return imagenet_preprocess_example(example, mode) - - -@registry.register_problem -class ImageImagenet224(ImageImagenet): - """Imagenet rescaled to 224x224.""" - - def dataset_filename(self): - return "image_imagenet" # Reuse Imagenet data. - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - tf.logging.warning( - "Generate data for image_imagenet224 with image_imagenet") - - def preprocess_example(self, example, mode, _): - return imagenet_preprocess_example(example, mode, resize_size=[224, 224]) - - -@registry.register_problem -class ImageImagenet32(Image2ClassProblem): - """Imagenet rescaled to 32x32.""" - - def dataset_filename(self): - return "image_imagenet" # Reuse Imagenet data. - - @property - def is_small(self): - return True # Modalities like for CIFAR. - - @property - def num_classes(self): - return 1000 - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - # TODO(lukaszkaiser): find a better way than printing this. - print("To generate the ImageNet dataset in the proper format, follow " - "instructions at https://github.com/tensorflow/models/blob/master" - "/inception/README.md#getting-started") - - def preprocess_example(self, example, mode, unused_hparams): - # Just resize with area. - if self._was_reversed: - example["inputs"] = tf.to_int64( - tf.image.resize_images(example["inputs"], [32, 32], - tf.image.ResizeMethod.AREA)) - else: - example = imagenet_preprocess_example(example, mode) - example["inputs"] = tf.to_int64( - tf.image.resize_images(example["inputs"], [32, 32])) - return example - - -@registry.register_problem -class ImageImagenet64(Image2ClassProblem): - """Imagenet rescaled to 64x64.""" - - def dataset_filename(self): - return "image_imagenet" # Reuse Imagenet data. - - @property - def is_small(self): - return True # Modalities like for CIFAR. - - @property - def num_classes(self): - return 1000 - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - # TODO(lukaszkaiser): find a better way than printing this. - print("To generate the ImageNet dataset in the proper format, follow " - "instructions at https://github.com/tensorflow/models/blob/master" - "/inception/README.md#getting-started") - - def preprocess_example(self, example, mode, unused_hparams): - inputs = example["inputs"] - # Just resize with area. - if self._was_reversed: - example["inputs"] = resize_by_area(inputs, 64) - else: - example = imagenet_preprocess_example(example, mode) - example["inputs"] = example["inputs"] = resize_by_area(inputs, 64) - return example - - -@registry.register_problem -class Img2imgImagenet(ImageProblem): - """Imagenet rescaled to 8x8 for input and 32x32 for output.""" - - def dataset_filename(self): - return "image_imagenet" # Reuse Imagenet data. - - def preprocess_example(self, example, unused_mode, unused_hparams): - - inputs = example["inputs"] - # For Img2Img resize input and output images as desired. - example["inputs"] = resize_by_area(inputs, 8) - example["targets"] = resize_by_area(inputs, 32) - return example - - def hparams(self, defaults, unused_model_hparams): - p = defaults - p.input_modality = {"inputs": ("image:identity", 256)} - p.target_modality = ("image:identity", 256) - p.batch_size_multiplier = 256 - p.max_expected_batch_size_per_shard = 4 - p.input_space_id = 1 - p.target_space_id = 1 - - -def _encoded_images(images): - if context.in_eager_mode(): - for image in images: - yield tf.image.encode_png(image).numpy() - else: - (width, height, channels) = images[0].shape - with tf.Graph().as_default(): - image_t = tf.placeholder(dtype=tf.uint8, shape=(width, height, channels)) - encoded_image_t = tf.image.encode_png(image_t) - with tf.Session() as sess: - for image in images: - enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) - yield enc_string - - -def image_generator(images, labels): - """Generator for images that takes image and labels lists and creates pngs. - - Args: - images: list of images given as [width x height x channels] numpy arrays. - labels: list of ints, same length as images. - - Yields: - A dictionary representing the images with the following fields: - * image/encoded: the string encoding the image as PNG, - * image/format: the string "png" representing image format, - * image/class/label: an integer representing the label, - * image/height: an integer representing the height, - * image/width: an integer representing the width. - Every field is actually a singleton list of the corresponding type. - - Raises: - ValueError: if images is an empty list. - """ - if not images: - raise ValueError("Must provide some images for the generator.") - width, height, _ = images[0].shape - for (enc_image, label) in zip(_encoded_images(images), labels): - yield { - "image/encoded": [enc_image], - "image/format": ["png"], - "image/class/label": [int(label)], - "image/height": [height], - "image/width": [width] - } - - -# URLs and filenames for MNIST data. -_MNIST_URL = "http://yann.lecun.com/exdb/mnist/" -_MNIST_TRAIN_DATA_FILENAME = "train-images-idx3-ubyte.gz" -_MNIST_TRAIN_LABELS_FILENAME = "train-labels-idx1-ubyte.gz" -_MNIST_TEST_DATA_FILENAME = "t10k-images-idx3-ubyte.gz" -_MNIST_TEST_LABELS_FILENAME = "t10k-labels-idx1-ubyte.gz" -_MNIST_IMAGE_SIZE = 28 - - -def _get_mnist(directory): - """Download all MNIST files to directory unless they are there.""" - for filename in [ - _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, - _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME - ]: - generator_utils.maybe_download(directory, filename, _MNIST_URL + filename) - - -def _extract_mnist_images(filename, num_images): - """Extract images from an MNIST file into a numpy array. - - Args: - filename: The path to an MNIST images file. - num_images: The number of images in the file. - - Returns: - A numpy array of shape [number_of_images, height, width, channels]. - """ - with gzip.open(filename) as bytestream: - bytestream.read(16) - buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) - data = np.frombuffer(buf, dtype=np.uint8) - data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1) - return data - - -def _extract_mnist_labels(filename, num_labels): - """Extract labels from an MNIST file into integers. - - Args: - filename: The path to an MNIST labels file. - num_labels: The number of labels in the file. - - Returns: - A int64 numpy array of shape [num_labels] - """ - with gzip.open(filename) as bytestream: - bytestream.read(8) - buf = bytestream.read(num_labels) - labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) - return labels - - -def mnist_common_generator(tmp_dir, training, how_many, data_filename, - label_filename, start_from=0): - """Image generator for MNIST. - - Args: - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - data_filename: file that contains features data. - label_filename: file that contains labels. - start_from: from which image to start. - - Returns: - An instance of image_generator that produces MNIST images. - """ - data_path = os.path.join(tmp_dir, data_filename) - labels_path = os.path.join(tmp_dir, label_filename) - images = _extract_mnist_images(data_path, 60000 if training else 10000) - labels = _extract_mnist_labels(labels_path, 60000 if training else 10000) - # Shuffle the data to make sure classes are well distributed. - data = list(zip(images, labels)) - random.shuffle(data) - images, labels = list(zip(*data)) - return image_generator(images[start_from:start_from + how_many], - labels[start_from:start_from + how_many]) - - -def mnist_generator(tmp_dir, training, how_many, start_from=0): - """Image generator for MNIST. - - Args: - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - start_from: from which image to start. - - Returns: - An instance of image_generator that produces MNIST images. - """ - _get_mnist(tmp_dir) - d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME - l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME - return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from) - - -@registry.register_problem -class ImageMnistTune(Image2ClassProblem): - """MNIST, tuning data.""" - - @property - def is_small(self): - return True - - @property - def num_classes(self): - return 10 - - @property - def class_labels(self): - return [str(c) for c in range(self.num_classes)] - - @property - def train_shards(self): - return 10 - - def generator(self, data_dir, tmp_dir, is_training): - if is_training: - return mnist_generator(tmp_dir, True, 55000) - else: - return mnist_generator(tmp_dir, True, 5000, 55000) - - -@registry.register_problem -class ImageMnist(ImageMnistTune): - - def generator(self, data_dir, tmp_dir, is_training): - if is_training: - return mnist_generator(tmp_dir, True, 60000) - else: - return mnist_generator(tmp_dir, False, 10000) - -# URLs and filenames for MNIST data. -_FASHION_MNIST_URL = ("http://fashion-mnist.s3-website.eu-central-1" - ".amazonaws.com/") -_FASHION_MNIST_LOCAL_FILE_PREFIX = "fashion-" -_FASHION_MNIST_IMAGE_SIZE = 28 - - -def _get_fashion_mnist(directory): - """Download all FashionMNIST files to directory unless they are there.""" - # Fashion mnist files have the same names as MNIST. - # We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir. - for filename in [ - _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, - _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME - ]: - generator_utils.maybe_download(directory, - _FASHION_MNIST_LOCAL_FILE_PREFIX + filename, - _FASHION_MNIST_URL + filename) - - -def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0): - """Image generator for FashionMNIST. - - Args: - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - start_from: from which image to start. - - Returns: - An instance of image_generator that produces MNIST images. - """ - _get_fashion_mnist(tmp_dir) - d = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( - _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME) - l = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( - _MNIST_TRAIN_LABELS_FILENAME if training else - _MNIST_TEST_LABELS_FILENAME) - return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from) - - -@registry.register_problem -class ImageFashionMnist(Image2ClassProblem): - """Fashion MNIST.""" - - @property - def is_small(self): - return True - - @property - def num_classes(self): - return 10 - - @property - def class_labels(self): - return [str(c) for c in range(self.num_classes)] - - @property - def train_shards(self): - return 10 - - def generator(self, data_dir, tmp_dir, is_training): - if is_training: - return fashion_mnist_generator(tmp_dir, True, 60000) - else: - return fashion_mnist_generator(tmp_dir, False, 10000) - - -# URLs and filenames for CIFAR data. -_CIFAR10_URL = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" -_CIFAR10_PREFIX = "cifar-10-batches-py/" -_CIFAR10_TRAIN_FILES = [ - "data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", - "data_batch_5" -] -_CIFAR10_TEST_FILES = ["test_batch"] -_CIFAR10_IMAGE_SIZE = 32 - - -def _get_cifar10(directory): - """Download and extract CIFAR to directory unless it is there.""" - filename = os.path.basename(_CIFAR10_URL) - path = generator_utils.maybe_download(directory, filename, _CIFAR10_URL) - tarfile.open(path, "r:gz").extractall(directory) - - -def cifar10_generator(tmp_dir, training, how_many, start_from=0): - """Image generator for CIFAR-10. - - Args: - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - start_from: from which image to start. - - Returns: - An instance of image_generator that produces CIFAR-10 images and labels. - """ - _get_cifar10(tmp_dir) - data_files = _CIFAR10_TRAIN_FILES if training else _CIFAR10_TEST_FILES - all_images, all_labels = [], [] - for filename in data_files: - path = os.path.join(tmp_dir, _CIFAR10_PREFIX, filename) - with tf.gfile.Open(path, "r") as f: - data = cPickle.load(f) - images = data["data"] - num_images = images.shape[0] - images = images.reshape((num_images, 3, _CIFAR10_IMAGE_SIZE, - _CIFAR10_IMAGE_SIZE)) - all_images.extend([ - np.squeeze(images[j]).transpose((1, 2, 0)) for j in xrange(num_images) - ]) - labels = data["labels"] - all_labels.extend([labels[j] for j in xrange(num_images)]) - return image_generator(all_images[start_from:start_from + how_many], - all_labels[start_from:start_from + how_many]) - - -@registry.register_problem -class ImageCifar10Tune(ImageMnistTune): - """Cifar-10 Tune.""" - - @property - def class_labels(self): - return [ - "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", - "ship", "truck" - ] - - def preprocess_example(self, example, mode, unused_hparams): - example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) - if mode == tf.estimator.ModeKeys.TRAIN: - example["inputs"] = common_layers.cifar_image_augmentation( - example["inputs"]) - example["inputs"] = tf.to_int64(example["inputs"]) - return example - - def generator(self, data_dir, tmp_dir, is_training): - if is_training: - return cifar10_generator(tmp_dir, True, 48000) - else: - return cifar10_generator(tmp_dir, True, 2000, 48000) - - -@registry.register_problem -class ImageCifar10(ImageCifar10Tune): - - def generator(self, data_dir, tmp_dir, is_training): - if is_training: - return cifar10_generator(tmp_dir, True, 50000) - else: - return cifar10_generator(tmp_dir, False, 10000) - - -@registry.register_problem -class ImageCifar10Plain(ImageCifar10): - - def preprocess_example(self, example, mode, unused_hparams): - example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) - example["inputs"] = tf.to_int64(example["inputs"]) - return example - - -@registry.register_problem -class ImageCifar10Plain8(ImageCifar10): - """CIFAR-10 rescaled to 8x8 for output: Conditional image generation.""" - - def dataset_filename(self): - return "image_cifar10_plain" # Reuse CIFAR-10 plain data. - - def preprocess_example(self, example, mode, unused_hparams): - example["inputs"] = resize_by_area(example["inputs"], 8) - return example - - -@registry.register_problem -class Img2imgCifar10(ImageCifar10): - """CIFAR-10 rescaled to 8x8 for input and 32x32 for output.""" - - def dataset_filename(self): - return "image_cifar10_plain" # Reuse CIFAR-10 plain data. - - def preprocess_example(self, example, unused_mode, unused_hparams): - - inputs = example["inputs"] - # For Img2Img resize input and output images as desired. - example["inputs"] = resize_by_area(inputs, 8) - example["targets"] = resize_by_area(inputs, 32) - return example - - def hparams(self, defaults, unused_model_hparams): - p = defaults - p.input_modality = {"inputs": ("image:identity", 256)} - p.target_modality = ("image:identity", 256) - p.batch_size_multiplier = 256 - p.max_expected_batch_size_per_shard = 4 - p.input_space_id = 1 - p.target_space_id = 1 - - -# URLs and filenames for MSCOCO data. -_MSCOCO_ROOT_URL = "http://msvocds.blob.core.windows.net/" -_MSCOCO_URLS = [ - "coco2014/train2014.zip", "coco2014/val2014.zip", "coco2014/test2014.zip", - "annotations-1-0-3/captions_train-val2014.zip" -] -_MSCOCO_TRAIN_PREFIX = "train2014" -_MSCOCO_EVAL_PREFIX = "val2014" -_MSCOCO_TRAIN_CAPTION_FILE = "annotations/captions_train2014.json" -_MSCOCO_EVAL_CAPTION_FILE = "annotations/captions_val2014.json" - - -def _get_mscoco(directory): - """Download and extract MSCOCO datasets to directory unless it is there.""" - for url in _MSCOCO_URLS: - filename = os.path.basename(url) - download_url = os.path.join(_MSCOCO_ROOT_URL, url) - path = generator_utils.maybe_download(directory, filename, download_url) - unzip_dir = os.path.join(directory, filename.strip(".zip")) - if not tf.gfile.Exists(unzip_dir): - zipfile.ZipFile(path, "r").extractall(directory) - - -def mscoco_generator(data_dir, - tmp_dir, - training, - how_many, - start_from=0, - eos_list=None, - vocab_filename=None, - vocab_size=0): - """Image generator for MSCOCO captioning problem with token-wise captions. - - Args: - data_dir: path to the data directory. - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - start_from: from which image to start. - eos_list: optional list of end of sentence tokens, otherwise use default - value `1`. - vocab_filename: file within `tmp_dir` to read vocabulary from. - vocab_size: integer target to generate vocabulary size to. - - Yields: - A dictionary representing the images with the following fields: - * image/encoded: the string encoding the image as JPEG, - * image/format: the string "jpeg" representing image format, - * image/class/label: a list of integers representing the caption, - * image/height: an integer representing the height, - * image/width: an integer representing the width. - Every field is actually a list of the corresponding type. - """ - eos_list = [1] if eos_list is None else eos_list - if vocab_filename is not None: - vocab_symbolizer = generator_utils.get_or_generate_vocab( - data_dir, tmp_dir, vocab_filename, vocab_size) - _get_mscoco(tmp_dir) - caption_filepath = ( - _MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE) - caption_filepath = os.path.join(tmp_dir, caption_filepath) - prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX - caption_file = io.open(caption_filepath) - caption_json = json.load(caption_file) - # Dictionary from image_id to ((filename, height, width), captions). - image_dict = dict() - for image in caption_json["images"]: - image_dict[image["id"]] = [(image["file_name"], image["height"], - image["width"]), []] - annotations = caption_json["annotations"] - annotation_count = len(annotations) - image_count = len(image_dict) - tf.logging.info("Processing %d images and %d labels\n" % (image_count, - annotation_count)) - for annotation in annotations: - image_id = annotation["image_id"] - image_dict[image_id][1].append(annotation["caption"]) - - data = list(image_dict.values())[start_from:start_from + how_many] - random.shuffle(data) - for image_info, labels in data: - image_filename = image_info[0] - image_filepath = os.path.join(tmp_dir, prefix, image_filename) - with tf.gfile.Open(image_filepath, "r") as f: - encoded_image_data = f.read() - height, width = image_info[1], image_info[2] - for label in labels: - if vocab_filename is None: - label = [ord(c) for c in label] + eos_list - else: - label = vocab_symbolizer.encode(label) + eos_list - yield { - "image/encoded": [encoded_image_data], - "image/format": ["jpeg"], - "image/class/label": label, - "image/height": [height], - "image/width": [width] - } - - -class Image2TextProblem(ImageProblem): - """Base class for image-to-text problems.""" - - @property - def is_character_level(self): - raise NotImplementedError() - - @property - def targeted_vocab_size(self): - raise NotImplementedError() # Not needed if self.is_character_level. - - @property - def target_space_id(self): - raise NotImplementedError() - - @property - def train_shards(self): - raise NotImplementedError() - - @property - def dev_shards(self): - raise NotImplementedError() - - def generator(self, data_dir, tmp_dir, is_training): - raise NotImplementedError() - - def example_reading_spec(self): - label_key = "image/class/label" - data_fields, data_items_to_decoders = ( - super(Image2TextProblem, self).example_reading_spec()) - data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64) - data_items_to_decoders[ - "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) - return data_fields, data_items_to_decoders - - def feature_encoders(self, data_dir): - if self.is_character_level: - encoder = text_encoder.ByteTextEncoder() - else: - vocab_filename = os.path.join( - data_dir, "vocab.endefr.%d" % self.targeted_vocab_size) - encoder = text_encoder.SubwordTextEncoder(vocab_filename) - return {"targets": encoder} - - def hparams(self, defaults, unused_model_hparams): - p = defaults - p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} - encoder = self._encoders["targets"] - p.target_modality = (registry.Modalities.SYMBOL, encoder.vocab_size) - p.batch_size_multiplier = 256 - p.max_expected_batch_size_per_shard = 2 - p.loss_multiplier = 1.0 - p.input_space_id = problem.SpaceID.IMAGE - p.target_space_id = self.target_space_id - - def generate_data(self, data_dir, tmp_dir, task_id=-1): - generator_utils.generate_dataset_and_shuffle( - self.generator(data_dir, tmp_dir, True), - self.training_filepaths(data_dir, self.train_shards, shuffled=False), - self.generator(data_dir, tmp_dir, False), - self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) - - -@registry.register_problem -class ImageMsCocoCharacters(Image2TextProblem): - """MSCOCO, character level.""" - - @property - def is_character_level(self): - return True - - @property - def target_space_id(self): - return problem.SpaceID.EN_CHR - - @property - def train_shards(self): - return 100 - - @property - def dev_shards(self): - return 10 - - def preprocess_example(self, example, mode, _): - return imagenet_preprocess_example(example, mode) - - def generator(self, data_dir, tmp_dir, is_training): - if is_training: - return mscoco_generator(data_dir, tmp_dir, True, 80000) - else: - return mscoco_generator(data_dir, tmp_dir, False, 40000) - raise NotImplementedError() - - -@registry.register_problem -class ImageMsCocoTokens8k(ImageMsCocoCharacters): - """MSCOCO, 8k tokens vocab.""" - - @property - def is_character_level(self): - return False - - @property - def targeted_vocab_size(self): - return 2**13 # 8192 - - @property - def target_space_id(self): - return problem.SpaceID.EN_TOK - - @property - def train_shards(self): - return 100 - - @property - def dev_shards(self): - return 10 - - def generator(self, data_dir, tmp_dir, is_training): - vocab_filename = "vocab.endefr.%d" % self.targeted_vocab_size - if is_training: - return mscoco_generator( - data_dir, - tmp_dir, - True, - 80000, - vocab_filename=vocab_filename, - vocab_size=self.targeted_vocab_size) - else: - return mscoco_generator( - data_dir, - tmp_dir, - False, - 40000, - vocab_filename=vocab_filename, - vocab_size=self.targeted_vocab_size) - - -@registry.register_problem -class ImageMsCocoTokens32k(ImageMsCocoTokens8k): - """MSCOCO, 32k tokens vocab.""" - - @property - def targeted_vocab_size(self): - return 2**15 # 32768 - - -@registry.register_problem -class OcrTest(Image2TextProblem): - """OCR test problem.""" - - @property - def is_small(self): - return True - - @property - def is_character_level(self): - return True - - @property - def target_space_id(self): - return problem.SpaceID.EN_CHR - - @property - def train_shards(self): - return 1 - - @property - def dev_shards(self): - return 1 - - def preprocess_example(self, example, mode, _): - # Resize from usual size ~1350x60 to 90x4 in this test. - img = example["inputs"] - example["inputs"] = tf.to_int64( - tf.image.resize_images(img, [90, 4], tf.image.ResizeMethod.AREA)) - return example - - def generator(self, data_dir, tmp_dir, is_training): - # In this test problem, we assume that the data is in tmp_dir/ocr/ in - # files names 0.png, 0.txt, 1.png, 1.txt and so on until num_examples. - num_examples = 2 - ocr_dir = os.path.join(tmp_dir, "ocr/") - tf.logging.info("Looking for OCR data in %s." % ocr_dir) - for i in xrange(num_examples): - image_filepath = os.path.join(ocr_dir, "%d.png" % i) - text_filepath = os.path.join(ocr_dir, "%d.txt" % i) - with tf.gfile.Open(text_filepath, "rb") as f: - label = f.read() - with tf.gfile.Open(image_filepath, "rb") as f: - encoded_image_data = f.read() - # In PNG files width and height are stored in these bytes. - width, height = struct.unpack(">ii", encoded_image_data[16:24]) - yield { - "image/encoded": [encoded_image_data], - "image/format": ["png"], - "image/class/label": label.strip(), - "image/height": [height], - "image/width": [width] - } diff --git a/tensor2tensor/data_generators/image_utils.py b/tensor2tensor/data_generators/image_utils.py new file mode 100644 index 000000000..78fdde262 --- /dev/null +++ b/tensor2tensor/data_generators/image_utils.py @@ -0,0 +1,272 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes and utilities for image datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +# Dependency imports + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import registry + +import tensorflow as tf + +from tensorflow.python.eager import context + + +def resize_by_area(img, size): + """image resize function used by quite a few image problems.""" + return tf.to_int64( + tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA)) + + +class ImageProblem(problem.Problem): + """Base class for problems with images.""" + + @property + def num_channels(self): + """Number of color channels.""" + return 3 + + def example_reading_spec(self, label_repr=None): + data_fields = { + "image/encoded": tf.FixedLenFeature((), tf.string), + "image/format": tf.FixedLenFeature((), tf.string), + } + + data_items_to_decoders = { + "inputs": + tf.contrib.slim.tfexample_decoder.Image( + image_key="image/encoded", + format_key="image/format", + channels=self.num_channels), + } + + return data_fields, data_items_to_decoders + + def preprocess_example(self, example, mode, hparams): + example["inputs"] = tf.image.per_image_standardization(example["inputs"]) + return example + + +class Image2ClassProblem(ImageProblem): + """Base class for image classification problems.""" + + @property + def is_small(self): + raise NotImplementedError() + + @property + def num_classes(self): + raise NotImplementedError() + + @property + def train_shards(self): + raise NotImplementedError() + + @property + def dev_shards(self): + return 1 + + @property + def class_labels(self): + return ["ID_%d" % i for i in range(self.num_classes)] + + def feature_encoders(self, data_dir): + del data_dir + return { + "inputs": text_encoder.ImageEncoder(), + "targets": text_encoder.ClassLabelEncoder(self.class_labels) + } + + def generator(self, data_dir, tmp_dir, is_training): + raise NotImplementedError() + + def example_reading_spec(self): + label_key = "image/class/label" + data_fields, data_items_to_decoders = ( + super(Image2ClassProblem, self).example_reading_spec()) + data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64) + + data_items_to_decoders[ + "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) + return data_fields, data_items_to_decoders + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} + p.target_modality = (registry.Modalities.CLASS_LABEL, self.num_classes) + p.batch_size_multiplier = 4 if self.is_small else 256 + p.max_expected_batch_size_per_shard = 8 if self.is_small else 2 + p.loss_multiplier = 3.0 if self.is_small else 1.0 + if self._was_reversed: + p.loss_multiplier = 1.0 + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.IMAGE_LABEL + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=False), + self.generator(data_dir, tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) + + +def _encoded_images(images): + if context.in_eager_mode(): + for image in images: + yield tf.image.encode_png(image).numpy() + else: + (width, height, channels) = images[0].shape + with tf.Graph().as_default(): + image_t = tf.placeholder(dtype=tf.uint8, shape=(width, height, channels)) + encoded_image_t = tf.image.encode_png(image_t) + with tf.Session() as sess: + for image in images: + enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) + yield enc_string + + +def image_generator(images, labels): + """Generator for images that takes image and labels lists and creates pngs. + + Args: + images: list of images given as [width x height x channels] numpy arrays. + labels: list of ints, same length as images. + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the image as PNG, + * image/format: the string "png" representing image format, + * image/class/label: an integer representing the label, + * image/height: an integer representing the height, + * image/width: an integer representing the width. + Every field is actually a singleton list of the corresponding type. + + Raises: + ValueError: if images is an empty list. + """ + if not images: + raise ValueError("Must provide some images for the generator.") + width, height, _ = images[0].shape + for (enc_image, label) in zip(_encoded_images(images), labels): + yield { + "image/encoded": [enc_image], + "image/format": ["png"], + "image/class/label": [int(label)], + "image/height": [height], + "image/width": [width] + } + + +class Image2TextProblem(ImageProblem): + """Base class for image-to-text problems.""" + + @property + def is_character_level(self): + raise NotImplementedError() + + @property + def targeted_vocab_size(self): + raise NotImplementedError() # Not needed if self.is_character_level. + + @property + def target_space_id(self): + raise NotImplementedError() + + @property + def train_shards(self): + raise NotImplementedError() + + @property + def dev_shards(self): + raise NotImplementedError() + + def generator(self, data_dir, tmp_dir, is_training): + raise NotImplementedError() + + def example_reading_spec(self): + label_key = "image/class/label" + data_fields, data_items_to_decoders = ( + super(Image2TextProblem, self).example_reading_spec()) + data_fields[label_key] = tf.VarLenFeature(tf.int64) + data_items_to_decoders[ + "targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key) + return data_fields, data_items_to_decoders + + def feature_encoders(self, data_dir): + if self.is_character_level: + encoder = text_encoder.ByteTextEncoder() + else: + vocab_filename = os.path.join( + data_dir, "vocab.ende.%d" % self.targeted_vocab_size) + encoder = text_encoder.SubwordTextEncoder(vocab_filename) + input_encoder = text_encoder.ImageEncoder() + return {"inputs": input_encoder, "targets": encoder} + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.input_modality = {"inputs": (registry.Modalities.IMAGE, 256)} + encoder = self._encoders["targets"] + p.target_modality = (registry.Modalities.SYMBOL, encoder.vocab_size) + p.batch_size_multiplier = 256 + p.max_expected_batch_size_per_shard = 4 + p.loss_multiplier = 1.0 + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = self.target_space_id + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=False), + self.generator(data_dir, tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) + + +def image_augmentation(images, do_colors=False, crop_size=None): + """Image augmentation: cropping, flipping, and color transforms.""" + if crop_size is None: + crop_size = [299, 299] + images = tf.random_crop(images, crop_size + [3]) + images = tf.image.random_flip_left_right(images) + if do_colors: # More augmentation, but might be slow. + images = tf.image.random_brightness(images, max_delta=32. / 255.) + images = tf.image.random_saturation(images, lower=0.5, upper=1.5) + images = tf.image.random_hue(images, max_delta=0.2) + images = tf.image.random_contrast(images, lower=0.5, upper=1.5) + return images + + +def cifar_image_augmentation(images): + """Image augmentation suitable for CIFAR-10/100. + + As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). + + Args: + images: a Tensor. + Returns: + Tensor of the same shape as images. + """ + images = tf.image.resize_image_with_crop_or_pad(images, 40, 40) + images = tf.random_crop(images, [32, 32, 3]) + images = tf.image.random_flip_left_right(images) + return images diff --git a/tensor2tensor/data_generators/image_test.py b/tensor2tensor/data_generators/image_utils_test.py similarity index 86% rename from tensor2tensor/data_generators/image_test.py rename to tensor2tensor/data_generators/image_utils_test.py index 59cad4226..5a7afc3b7 100644 --- a/tensor2tensor/data_generators/image_test.py +++ b/tensor2tensor/data_generators/image_utils_test.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Image generators test.""" +"""image_utils test.""" from __future__ import absolute_import from __future__ import division @@ -22,13 +22,20 @@ # Dependency imports import numpy as np -from tensor2tensor.data_generators import image +from tensor2tensor.data_generators import image_utils import tensorflow as tf class ImageTest(tf.test.TestCase): + def testImageAugmentation(self): + x = np.random.rand(500, 500, 3) + with self.test_session() as session: + y = image_utils.image_augmentation(tf.constant(x)) + res = session.run(y) + self.assertEqual(res.shape, (299, 299, 3)) + def testImageGenerator(self): # 2 random images np.random.seed(1111) # To avoid any flakiness. @@ -36,7 +43,7 @@ def testImageGenerator(self): image2 = np.random.randint(0, 255, size=(10, 12, 3)) # Call image generator on the 2 images with labels [1, 2]. encoded_imgs, labels = [], [] - for dictionary in image.image_generator([image1, image2], [1, 2]): + for dictionary in image_utils.image_generator([image1, image2], [1, 2]): self.assertEqual( sorted(list(dictionary)), [ "image/class/label", "image/encoded", "image/format", diff --git a/tensor2tensor/data_generators/imagenet.py b/tensor2tensor/data_generators/imagenet.py new file mode 100644 index 000000000..1ca2ec3e2 --- /dev/null +++ b/tensor2tensor/data_generators/imagenet.py @@ -0,0 +1,363 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ImageNet.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +from tensor2tensor.data_generators import image_utils +from tensor2tensor.utils import registry + +import tensorflow as tf + + +# Derived from ImageNet data +MEAN_RGB = [0.485, 0.456, 0.406] +STDDEV_RGB = [0.229, 0.224, 0.225] + + +def imagenet_preprocess_example(example, mode, resize_size=None): + """Preprocessing used for Imagenet and similar problems.""" + resize_size = resize_size or [299, 299] + assert resize_size[0] == resize_size[1] + + image = example["inputs"] + if mode == tf.estimator.ModeKeys.TRAIN: + image = preprocess_for_train(image, image_size=resize_size[0]) + else: + image = preprocess_for_eval(image, image_size=resize_size[0]) + + example["inputs"] = image + return example + + +@registry.register_problem +class ImageImagenet(image_utils.Image2ClassProblem): + """Imagenet.""" + + @property + def is_small(self): + return False + + @property + def num_classes(self): + return 1000 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + # TODO(lukaszkaiser): find a better way than printing this. + print("To generate the ImageNet dataset in the proper format, follow " + "instructions at https://github.com/tensorflow/models/blob/master" + "/inception/README.md#getting-started") + + def preprocess_example(self, example, mode, _): + return imagenet_preprocess_example(example, mode) + + +class ImageImagenetRescaled(ImageImagenet): + """Imagenet rescaled to rescale_size.""" + + @property + def rescale_size(self): + # return [224, 224] + raise NotImplementedError() + + def dataset_filename(self): + return "image_imagenet" # Reuse Imagenet data. + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + tf.logging.warning( + "Generate data for rescaled ImageNet problems with image_imagenet") + + def preprocess_example(self, example, mode, _): + return imagenet_preprocess_example( + example, mode, resize_size=self.rescale_size) + + +@registry.register_problem +class ImageImagenet224(ImageImagenetRescaled): + """Imagenet rescaled to 224x224.""" + + @property + def rescale_size(self): + return [224, 224] + + +@registry.register_problem +class ImageImagenet32(ImageImagenetRescaled): + """Imagenet rescaled to 32x32.""" + + @property + def rescale_size(self): + return [32, 32] + + @property + def is_small(self): + return True # Modalities like for CIFAR. + + def preprocess_example(self, example, mode, _): + # Just resize with area. + if self._was_reversed: + example["inputs"] = tf.to_int64( + tf.image.resize_images(example["inputs"], self.rescale_size, + tf.image.ResizeMethod.AREA)) + else: + example = imagenet_preprocess_example(example, mode) + example["inputs"] = tf.to_int64( + tf.image.resize_images(example["inputs"], self.rescale_size)) + return example + + +@registry.register_problem +class ImageImagenet64(ImageImagenet32): + """Imagenet rescaled to 64x64.""" + + @property + def rescale_size(self): + return [64, 64] + + +@registry.register_problem +class Img2imgImagenet(image_utils.ImageProblem): + """Imagenet rescaled to 8x8 for input and 32x32 for output.""" + + def dataset_filename(self): + return "image_imagenet" # Reuse Imagenet data. + + def preprocess_example(self, example, unused_mode, unused_hparams): + + inputs = example["inputs"] + # For Img2Img resize input and output images as desired. + example["inputs"] = image_utils.resize_by_area(inputs, 8) + example["targets"] = image_utils.resize_by_area(inputs, 32) + return example + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + tf.logging.warning("Generate data for img2img_imagenet with image_imagenet") + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.input_modality = {"inputs": ("image:identity", 256)} + p.target_modality = ("image:identity", 256) + p.batch_size_multiplier = 256 + p.max_expected_batch_size_per_shard = 4 + p.input_space_id = 1 + p.target_space_id = 1 + + +# The following preprocessing functions were taken from +# cloud_tpu/models/resnet/resnet_preprocessing.py +# ============================================================================== +def _crop(image, offset_height, offset_width, crop_height, crop_width): + """Crops the given image using the provided offsets and sizes. + + Note that the method doesn't assume we know the input image size but it does + assume we know the input image rank. + + Args: + image: `Tensor` image of shape [height, width, channels]. + offset_height: `Tensor` indicating the height offset. + offset_width: `Tensor` indicating the width offset. + crop_height: the height of the cropped image. + crop_width: the width of the cropped image. + + Returns: + the cropped (and resized) image. + + Raises: + InvalidArgumentError: if the rank is not 3 or if the image dimensions are + less than the crop size. + """ + original_shape = tf.shape(image) + + rank_assertion = tf.Assert( + tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) + with tf.control_dependencies([rank_assertion]): + cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) + + size_assertion = tf.Assert( + tf.logical_and( + tf.greater_equal(original_shape[0], crop_height), + tf.greater_equal(original_shape[1], crop_width)), + ["Crop size greater than the image size."]) + + offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) + + # Use tf.slice instead of crop_to_bounding box as it accepts tensors to + # define the crop size. + with tf.control_dependencies([size_assertion]): + image = tf.slice(image, offsets, cropped_shape) + return tf.reshape(image, cropped_shape) + + +def distorted_bounding_box_crop(image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using a one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image: `Tensor` of image (it will be converted to floats in [0, 1]). + bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` + where each coordinate is [0, 1) and the coordinates are arranged + as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding + box supplied. + aspect_ratio_range: An optional list of `float`s. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `float`s. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional `str` for name scope. + Returns: + (cropped image `Tensor`, distorted bbox `Tensor`). + """ + with tf.name_scope(scope, "distorted_bounding_box_crop", [image, bbox]): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an + # allowed range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + cropped_image = tf.slice(image, bbox_begin, bbox_size) + return cropped_image, distort_bbox + + +def _random_crop(image, size): + """Make a random crop of (`size` x `size`).""" + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + random_image, bbox = distorted_bounding_box_crop( + image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(3. / 4, 4. / 3.), + area_range=(0.08, 1.0), + max_attempts=1, + scope=None) + bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3) + + image = tf.cond( + bad, lambda: _center_crop(_do_scale(image, size), size), + lambda: tf.image.resize_bicubic([random_image], [size, size])[0]) + return image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def _at_least_x_are_true(a, b, x): + """At least `x` of `a` and `b` `Tensors` are true.""" + match = tf.equal(a, b) + match = tf.cast(match, tf.int32) + return tf.greater_equal(tf.reduce_sum(match), x) + + +def _do_scale(image, size): + """Rescale the image by scaling the smaller spatial dimension to `size`.""" + shape = tf.cast(tf.shape(image), tf.float32) + w_greater = tf.greater(shape[0], shape[1]) + shape = tf.cond(w_greater, + lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), + lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) + + return tf.image.resize_bicubic([image], shape)[0] + + +def _center_crop(image, size): + """Crops to center of image with specified `size`.""" + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + + offset_height = ((image_height - size) + 1) / 2 + offset_width = ((image_width - size) + 1) / 2 + image = _crop(image, offset_height, offset_width, size, size) + return image + + +def _normalize(image): + """Normalize the image to zero mean and unit variance.""" + offset = tf.constant(MEAN_RGB, shape=[1, 1, 3]) + image -= offset + + scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3]) + image /= scale + return image + + +def preprocess_for_train(image, image_size=224): + """Preprocesses the given image for evaluation. + + Args: + image: `Tensor` representing an image of arbitrary size. + image_size: int, how large the output image should be. + + Returns: + A preprocessed image `Tensor`. + """ + image = _random_crop(image, image_size) + image = _normalize(image) + image = _flip(image) + image = tf.reshape(image, [image_size, image_size, 3]) + return image + + +def preprocess_for_eval(image, image_size=224): + """Preprocesses the given image for evaluation. + + Args: + image: `Tensor` representing an image of arbitrary size. + image_size: int, how large the output image should be. + + Returns: + A preprocessed image `Tensor`. + """ + image = _do_scale(image, image_size + 32) + image = _normalize(image) + image = _center_crop(image, image_size) + image = tf.reshape(image, [image_size, image_size, 3]) + return image + + +# ============================================================================== diff --git a/tensor2tensor/data_generators/imdb.py b/tensor2tensor/data_generators/imdb.py index 95d728b1e..22e674098 100644 --- a/tensor2tensor/data_generators/imdb.py +++ b/tensor2tensor/data_generators/imdb.py @@ -48,6 +48,10 @@ def num_shards(self): def vocab_file(self): return "sentiment_imdb.vocab" + @property + def batch_size_means_tokens(self): + return True + @property def targeted_vocab_size(self): return 2**13 # 8k vocab suffices for this small dataset. diff --git a/tensor2tensor/data_generators/mnist.py b/tensor2tensor/data_generators/mnist.py new file mode 100644 index 000000000..9a6792b62 --- /dev/null +++ b/tensor2tensor/data_generators/mnist.py @@ -0,0 +1,250 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MNIST.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gzip +import os +import random + +# Dependency imports + +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.utils import registry + +import tensorflow as tf + +# URLs and filenames for MNIST data. +_MNIST_URL = "http://yann.lecun.com/exdb/mnist/" +_MNIST_TRAIN_DATA_FILENAME = "train-images-idx3-ubyte.gz" +_MNIST_TRAIN_LABELS_FILENAME = "train-labels-idx1-ubyte.gz" +_MNIST_TEST_DATA_FILENAME = "t10k-images-idx3-ubyte.gz" +_MNIST_TEST_LABELS_FILENAME = "t10k-labels-idx1-ubyte.gz" +_MNIST_IMAGE_SIZE = 28 + + +def _get_mnist(directory): + """Download all MNIST files to directory unless they are there.""" + for filename in [ + _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, + _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME + ]: + generator_utils.maybe_download(directory, filename, _MNIST_URL + filename) + + +def _extract_mnist_images(filename, num_images): + """Extract images from an MNIST file into a numpy array. + + Args: + filename: The path to an MNIST images file. + num_images: The number of images in the file. + + Returns: + A numpy array of shape [number_of_images, height, width, channels]. + """ + with gzip.open(filename) as bytestream: + bytestream.read(16) + buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) + data = np.frombuffer(buf, dtype=np.uint8) + data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1) + return data + + +def _extract_mnist_labels(filename, num_labels): + """Extract labels from an MNIST file into integers. + + Args: + filename: The path to an MNIST labels file. + num_labels: The number of labels in the file. + + Returns: + A int64 numpy array of shape [num_labels] + """ + with gzip.open(filename) as bytestream: + bytestream.read(8) + buf = bytestream.read(num_labels) + labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) + return labels + + +def mnist_common_generator(tmp_dir, + training, + how_many, + data_filename, + label_filename, + start_from=0): + """Image generator for MNIST. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + data_filename: file that contains features data. + label_filename: file that contains labels. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces MNIST images. + """ + data_path = os.path.join(tmp_dir, data_filename) + labels_path = os.path.join(tmp_dir, label_filename) + images = _extract_mnist_images(data_path, 60000 if training else 10000) + labels = _extract_mnist_labels(labels_path, 60000 if training else 10000) + # Shuffle the data to make sure classes are well distributed. + data = list(zip(images, labels)) + random.shuffle(data) + images, labels = list(zip(*data)) + return image_utils.image_generator(images[start_from:start_from + how_many], + labels[start_from:start_from + how_many]) + + +def mnist_generator(tmp_dir, training, how_many, start_from=0): + """Image generator for MNIST. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces MNIST images. + """ + _get_mnist(tmp_dir) + d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME + l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME + return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from) + + +@registry.register_problem +class ImageMnistTune(image_utils.Image2ClassProblem): + """MNIST, tuning data.""" + + @property + def num_channels(self): + return 1 + + @property + def is_small(self): + return True + + @property + def num_classes(self): + return 10 + + @property + def class_labels(self): + return [str(c) for c in range(self.num_classes)] + + @property + def train_shards(self): + return 10 + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1]) + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return mnist_generator(tmp_dir, True, 55000) + else: + return mnist_generator(tmp_dir, True, 5000, 55000) + + +@registry.register_problem +class ImageMnist(ImageMnistTune): + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return mnist_generator(tmp_dir, True, 60000) + else: + return mnist_generator(tmp_dir, False, 10000) + + +# URLs and filenames for MNIST data. +_FASHION_MNIST_URL = ("http://fashion-mnist.s3-website.eu-central-1" + ".amazonaws.com/") +_FASHION_MNIST_LOCAL_FILE_PREFIX = "fashion-" +_FASHION_MNIST_IMAGE_SIZE = 28 + + +def _get_fashion_mnist(directory): + """Download all FashionMNIST files to directory unless they are there.""" + # Fashion mnist files have the same names as MNIST. + # We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir. + for filename in [ + _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, + _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME + ]: + generator_utils.maybe_download(directory, + _FASHION_MNIST_LOCAL_FILE_PREFIX + filename, + _FASHION_MNIST_URL + filename) + + +def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0): + """Image generator for FashionMNIST. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces MNIST images. + """ + _get_fashion_mnist(tmp_dir) + d = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( + _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME) + l = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( + _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME) + return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from) + + +@registry.register_problem +class ImageFashionMnist(image_utils.Image2ClassProblem): + """Fashion MNIST.""" + + @property + def is_small(self): + return True + + @property + def num_classes(self): + return 10 + + @property + def class_labels(self): + return [str(c) for c in range(self.num_classes)] + + @property + def train_shards(self): + return 10 + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return fashion_mnist_generator(tmp_dir, True, 60000) + else: + return fashion_mnist_generator(tmp_dir, False, 10000) diff --git a/tensor2tensor/data_generators/mscoco.py b/tensor2tensor/data_generators/mscoco.py new file mode 100644 index 000000000..8dc1e1bba --- /dev/null +++ b/tensor2tensor/data_generators/mscoco.py @@ -0,0 +1,234 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MS COCO.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import io +import json +import os +import random +import zipfile + +# Dependency imports + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import imagenet +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import registry + +import tensorflow as tf + +# URLs and filenames for MSCOCO data. +_MSCOCO_ROOT_URL = "http://msvocds.blob.core.windows.net/" +_MSCOCO_URLS = [ + "coco2014/train2014.zip", "coco2014/val2014.zip", "coco2014/test2014.zip", + "annotations-1-0-3/captions_train-val2014.zip" +] +_MSCOCO_TRAIN_PREFIX = "train2014" +_MSCOCO_EVAL_PREFIX = "val2014" +_MSCOCO_TRAIN_CAPTION_FILE = "annotations/captions_train2014.json" +_MSCOCO_EVAL_CAPTION_FILE = "annotations/captions_val2014.json" + + +def _get_mscoco(directory): + """Download and extract MSCOCO datasets to directory unless it is there.""" + for url in _MSCOCO_URLS: + filename = os.path.basename(url) + download_url = os.path.join(_MSCOCO_ROOT_URL, url) + path = generator_utils.maybe_download(directory, filename, download_url) + unzip_dir = os.path.join(directory, filename.strip(".zip")) + if not tf.gfile.Exists(unzip_dir): + zipfile.ZipFile(path, "r").extractall(directory) + + +def mscoco_generator(data_dir, + tmp_dir, + training, + how_many, + start_from=0, + eos_list=None, + vocab_filename=None): + """Image generator for MSCOCO captioning problem with token-wise captions. + + Args: + data_dir: path to the data directory. + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + eos_list: optional list of end of sentence tokens, otherwise use default + value `1`. + vocab_filename: file within `tmp_dir` to read vocabulary from. + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the image as JPEG, + * image/format: the string "jpeg" representing image format, + * image/class/label: a list of integers representing the caption, + * image/height: an integer representing the height, + * image/width: an integer representing the width. + Every field is actually a list of the corresponding type. + """ + eos_list = [1] if eos_list is None else eos_list + def get_vocab(): + """Get vocab for caption text encoder.""" + if data_dir is not None and vocab_filename is not None: + vocab_filepath = os.path.join(data_dir, vocab_filename) + if tf.gfile.Exists(vocab_filepath): + tf.logging.info("Found vocab file: %s", vocab_filepath) + vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath) + return vocab_symbolizer + else: + raise ValueError("Vocab file does not exist: %s", vocab_filepath) + return None + + vocab_symbolizer = get_vocab() + _get_mscoco(tmp_dir) + caption_filepath = ( + _MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE) + caption_filepath = os.path.join(tmp_dir, caption_filepath) + prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX + caption_file = io.open(caption_filepath) + caption_json = json.load(caption_file) + # Dictionary from image_id to ((filename, height, width), captions). + image_dict = dict() + for image in caption_json["images"]: + image_dict[image["id"]] = [(image["file_name"], image["height"], + image["width"]), []] + annotations = caption_json["annotations"] + annotation_count = len(annotations) + image_count = len(image_dict) + tf.logging.info("Processing %d images and %d labels\n" % (image_count, + annotation_count)) + for annotation in annotations: + image_id = annotation["image_id"] + image_dict[image_id][1].append(annotation["caption"]) + + data = list(image_dict.values())[start_from:start_from + how_many] + random.shuffle(data) + for image_info, labels in data: + image_filename = image_info[0] + image_filepath = os.path.join(tmp_dir, prefix, image_filename) + with tf.gfile.Open(image_filepath, "r") as f: + encoded_image_data = f.read() + height, width = image_info[1], image_info[2] + for label in labels: + if vocab_filename is None or vocab_symbolizer is None: + label = [ord(c) for c in label] + eos_list + else: + label = vocab_symbolizer.encode(label) + eos_list + yield { + "image/encoded": [encoded_image_data], + "image/format": ["jpeg"], + "image/class/label": label, + "image/height": [height], + "image/width": [width] + } + + +@registry.register_problem +class ImageMsCocoCharacters(image_utils.Image2TextProblem): + """MSCOCO, character level.""" + + @property + def is_character_level(self): + return True + + @property + def target_space_id(self): + return problem.SpaceID.EN_CHR + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + def preprocess_example(self, example, mode, _): + return imagenet.imagenet_preprocess_example(example, mode) + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return mscoco_generator(data_dir, tmp_dir, True, 80000) + else: + return mscoco_generator(data_dir, tmp_dir, False, 40000) + raise NotImplementedError() + + +@registry.register_problem +class ImageMsCocoTokens32k(ImageMsCocoCharacters): + """MSCOCO, 8k tokens vocab.""" + + @property + def is_character_level(self): + return False + + @property + def targeted_vocab_size(self): + return 2**15 # 32768 + + @property + def target_space_id(self): + return problem.SpaceID.EN_TOK + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + def generator(self, data_dir, tmp_dir, is_training): + # We use the translate vocab file as the vocabulary for captions. + # This requires having the vocab file present in the data_dir for the + # generation pipeline to succeed. + vocab_filename = "vocab.ende.%d" % self.targeted_vocab_size + if is_training: + return mscoco_generator( + data_dir, + tmp_dir, + True, + 80000, + vocab_filename=vocab_filename) + else: + return mscoco_generator( + data_dir, + tmp_dir, + False, + 40000, + vocab_filename=vocab_filename) + + +@registry.register_problem +class ImageTextMsCoco(ImageMsCocoTokens32k): + """Problem for using MsCoco for generating images from text.""" + _MSCOCO_IMAGE_SIZE = 32 + + def dataset_filename(self): + return "image_ms_coco_tokens32k" + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"] = image_utils.resize_by_area( + example["inputs"], self._MSCOCO_IMAGE_SIZE) + return example diff --git a/tensor2tensor/data_generators/ocr.py b/tensor2tensor/data_generators/ocr.py new file mode 100644 index 000000000..8e92165bd --- /dev/null +++ b/tensor2tensor/data_generators/ocr.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OCR.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import struct + +# Dependency imports + +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.utils import registry + +import tensorflow as tf + + +@registry.register_problem +class OcrTest(image_utils.Image2TextProblem): + """OCR test problem.""" + + @property + def is_small(self): + return True + + @property + def is_character_level(self): + return True + + @property + def target_space_id(self): + return problem.SpaceID.EN_CHR + + @property + def train_shards(self): + return 1 + + @property + def dev_shards(self): + return 1 + + def preprocess_example(self, example, mode, _): + # Resize from usual size ~1350x60 to 90x4 in this test. + img = example["inputs"] + img = tf.to_int64( + tf.image.resize_images(img, [90, 4], tf.image.ResizeMethod.AREA)) + img = tf.image.per_image_standardization(img) + example["inputs"] = img + return example + + def generator(self, data_dir, tmp_dir, is_training): + # In this test problem, we assume that the data is in tmp_dir/ocr/ in + # files names 0.png, 0.txt, 1.png, 1.txt and so on until num_examples. + num_examples = 2 + ocr_dir = os.path.join(tmp_dir, "ocr/") + tf.logging.info("Looking for OCR data in %s." % ocr_dir) + for i in xrange(num_examples): + image_filepath = os.path.join(ocr_dir, "%d.png" % i) + text_filepath = os.path.join(ocr_dir, "%d.txt" % i) + with tf.gfile.Open(text_filepath, "rb") as f: + label = f.read() + with tf.gfile.Open(image_filepath, "rb") as f: + encoded_image_data = f.read() + # In PNG files width and height are stored in these bytes. + width, height = struct.unpack(">ii", encoded_image_data[16:24]) + yield { + "image/encoded": [encoded_image_data], + "image/format": ["png"], + "image/class/label": label.strip(), + "image/height": [height], + "image/width": [width] + } diff --git a/tensor2tensor/data_generators/problem.py b/tensor2tensor/data_generators/problem.py index 53fa48740..890271dbe 100644 --- a/tensor2tensor/data_generators/problem.py +++ b/tensor2tensor/data_generators/problem.py @@ -119,8 +119,8 @@ def preprocess_example_common(example, hparams, mode): example["targets"] = tf.concat( [example["inputs"], [0], example["targets"]], 0) if hparams.split_to_length: - example["targets"] = tf.reshape( - example["targets"], [-1, hparams.split_to_length, 1, 1]) + example["targets"] = tf.reshape(example["targets"], + [-1, hparams.split_to_length, 1, 1]) if len(example) != 1: raise ValueError("split_to_length only works for LM problems") return tf.data.Dataset.from_tensor_slices(example) @@ -239,10 +239,21 @@ def max_length(self, model_hparams): Returns: an integer """ - return ( - model_hparams.split_to_length or - model_hparams.max_length or - model_hparams.batch_size) + return (model_hparams.split_to_length or model_hparams.max_length or + model_hparams.batch_size) + + def tpu_batch_size_per_shard(self, model_hparams): + """Batch size in examples per TPU core. + + Args: + model_hparams: model hyperparameters + Returns: + an integer + """ + if self.batch_size_means_tokens: + return model_hparams.batch_size // self.max_length(model_hparams) + else: + return model_hparams.batch_size @property def batch_size_means_tokens(self): @@ -425,7 +436,9 @@ def dataset(self, hparams=None, preprocess=True, dataset_split=None, - shard=None): + shard=None, + partition_id=0, + num_partitions=1): """Build a Dataset for this problem. Args: @@ -433,8 +446,7 @@ def dataset(self, data_dir: directory that contains data files. num_threads: int, number of threads to use for decode and preprocess Dataset.map calls. - output_buffer_size: int, how many elements to prefetch in Dataset.map - calls. + output_buffer_size: int, how many elements to prefetch at end of pipeline. shuffle_files: whether to shuffle input files. Default behavior (i.e. when shuffle_files=None) is to shuffle if mode == TRAIN. repeat: whether to repeat the Dataset. Default behavior is to repeat if @@ -447,9 +459,14 @@ def dataset(self, dataset_split: tf.estimator.ModeKeys + ["test"], which split to read data from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode. shard: int, if provided, will only read data from the specified shard. + partition_id: integer - which partition of the dataset to read from + num_partitions: how many partitions in the dataset Returns: Dataset containing dict. + + Raises: + ValueError: if num_partitions is greater than the number of data files. """ is_training = mode == tf.estimator.ModeKeys.TRAIN repeat = repeat or repeat is None and is_training @@ -470,39 +487,13 @@ def dataset(self, data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard) tf.logging.info("Reading data files from %s", data_filepattern) - dataset = tf.data.Dataset.list_files(data_filepattern) - - if shuffle_files: - dataset = dataset.shuffle(buffer_size=1024) + data_files = tf.contrib.slim.parallel_reader.get_data_files( + data_filepattern) + # Functions used in dataset transforms below def _load_records(filename): - return tf.data.TFRecordDataset(filename, buffer_size=16 * 1000 * 1000) - - if hasattr(tf.contrib.data, "parallel_interleave"): - interleave = lambda ds, fn: ds.apply( # pylint: disable=g-long-lambda - tf.contrib.data.parallel_interleave( - fn, sloppy=is_training, cycle_length=16)) - else: - interleave = lambda ds, fn: ds.interleave(fn, cycle_length=16) - - dataset = interleave(dataset, _load_records) - - if repeat: - dataset = dataset.repeat() - - if shuffle_files: - # Skip a random fraction at the beginning of the stream. The skip is - # essential for synchronous highly-parallel training to avoid multiple - # replicas reading the same data in lock-step. - data_files = tf.contrib.slim.parallel_reader.get_data_files( - data_filepattern) - num_skip = random.randint(0, _file_num_records_cached(data_files[0])) - dataset = dataset.skip(num_skip) - - def _maybe_reverse_and_copy(example): - self.maybe_reverse_features(example) - self.maybe_copy_features(example) - return example + # Load records from file with an 8MiB read buffer. + return tf.data.TFRecordDataset(filename, buffer_size=8 * 1024 * 1024) def _preprocess(example): examples = self.preprocess_example(example, mode, hparams) @@ -510,11 +501,32 @@ def _preprocess(example): examples = tf.data.Dataset.from_tensors(examples) return examples - dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads) + def _maybe_reverse_and_copy(example): + self.maybe_reverse_features(example) + self.maybe_copy_features(example) + return example + if len(data_files) < num_partitions: + raise ValueError( + "number of data files (%d) must be at least the number of hosts (%d)" + % (len(data_files), num_partitions)) + data_files = [f for (i, f) in enumerate(data_files) + if i % num_partitions == partition_id] + tf.logging.info( + "partition: %d num_data_files: %d" % (partition_id, len(data_files))) + if shuffle_files: + random.shuffle(data_files) + dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files)) + dataset = dataset.apply( + tf.contrib.data.parallel_interleave( + _load_records, sloppy=is_training, cycle_length=8)) + if repeat: + dataset = dataset.repeat() + dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads) if preprocess: - dataset = interleave(dataset, _preprocess) - + dataset = dataset.apply( + tf.contrib.data.parallel_interleave( + _preprocess, sloppy=is_training, cycle_length=8)) dataset = dataset.map( _maybe_reverse_and_copy, num_parallel_calls=num_threads) @@ -588,17 +600,57 @@ def feature_info(self): self._feature_info = features return features - def make_estimator_input_fn(self, mode, hparams, data_dir=None, + def make_estimator_input_fn(self, + mode, + hparams, + data_dir=None, dataset_kwargs=None): """Return input_fn wrapped for Estimator.""" def estimator_input_fn(params, config): - return self.input_fn(mode, hparams, data_dir=data_dir, params=params, - config=config, dataset_kwargs=dataset_kwargs) + return self.input_fn( + mode, + hparams, + data_dir=data_dir, + params=params, + config=config, + dataset_kwargs=dataset_kwargs) return estimator_input_fn - def input_fn(self, mode, hparams, data_dir=None, params=None, config=None, + def _dataset_partition(self, mode, config): + """Which part of the training data to read. + + If there are multiple parallel calls to input_fn (multiple TPU hosts), + then we want each one to read from a separate partition of the training + data. + + Args: + mode: tf.estimator.ModeKeys + config: RunConfig + Returns: + partition_id: an integer + num_partitions: an integer + """ + if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, "tpu_config"): + return 0, 1 + if config.tpu_config.per_host_input_for_training: + num_partitions = max(config.tpu_config.num_shards // 8, 1) + else: + num_partitions = config.tpu_config.num_shards + partition_id = getattr(self, "_next_partition_id", 0) + self._next_partition_id = partition_id + 1 + tf.logging.info("num_partitions = %d partition_id = %d" % + (num_partitions, partition_id)) + assert partition_id < num_partitions + return partition_id, num_partitions + + def input_fn(self, + mode, + hparams, + data_dir=None, + params=None, + config=None, dataset_kwargs=None): """Builds input pipeline for problem. @@ -615,9 +667,11 @@ def input_fn(self, mode, hparams, data_dir=None, params=None, config=None, Returns: (features_dict, Tensor targets) """ + partition_id, num_partitions = self._dataset_partition(mode, config) + is_training = mode == tf.estimator.ModeKeys.TRAIN if config.use_tpu: - num_threads = 32 + num_threads = 64 else: num_threads = 4 if is_training else 1 @@ -629,10 +683,9 @@ def tpu_valid_size(example): def gpu_valid_size(example): drop_long_sequences = is_training or hparams.eval_drop_long_sequences - return data_reader.example_valid_size( - example, - hparams.min_length, - max_length if drop_long_sequences else 10**9) + return data_reader.example_valid_size(example, hparams.min_length, + max_length + if drop_long_sequences else 10**9) def define_shapes(example): batch_size = config and config.use_tpu and params["batch_size"] @@ -646,7 +699,10 @@ def define_shapes(example): "mode": mode, "data_dir": data_dir, "num_threads": num_threads, - "hparams": hparams}) + "hparams": hparams, + "partition_id": partition_id, + "num_partitions": num_partitions, + }) dataset = self.dataset(**dataset_kwargs) dataset = dataset.map( @@ -662,8 +718,8 @@ def define_shapes(example): else: tf.logging.warning( "Shapes are not fully defined. Assuming batch_size means tokens. " - "You should probably override batch_size_means_tokens() " - "in your problem subclass") + "Override batch_size_means_tokens() " + "in your problem subclass if this is undesired behavior.") batch_size_means_tokens = True # Batching @@ -672,12 +728,13 @@ def define_shapes(example): if config and config.use_tpu: # on TPU, we use params["batch_size"], which specifies the number of # examples across all datashards - tpu_batch_size = params["batch_size"] + batch_size = params["batch_size"] dataset = dataset.apply( - tf.contrib.data.batch_and_drop_remainder(tpu_batch_size)) + tf.contrib.data.batch_and_drop_remainder(batch_size)) else: num_shards = (config and config.data_parallelism.n) or 1 - dataset = dataset.batch(hparams.batch_size * num_shards) + batch_size = hparams.batch_size * num_shards + dataset = dataset.batch(batch_size) else: # batch_size means tokens per datashard if config and config.use_tpu: @@ -687,9 +744,10 @@ def define_shapes(example): dataset.output_shapes, none_filler=max_length) # on TPU, we use params["batch_size"], which specifies the number of # examples across all datashards + batch_size = params["batch_size"] dataset = dataset.apply( tf.contrib.data.padded_batch_and_drop_remainder( - params["batch_size"], padded_shapes)) + batch_size, padded_shapes)) else: # On GPU, bucket by length dataset = dataset.filter(gpu_valid_size) @@ -702,12 +760,11 @@ def define_shapes(example): batching_scheme["batch_sizes"] = [hparams.batch_size] batching_scheme["boundaries"] = [] dataset = data_reader.bucket_by_sequence_length( - dataset, - data_reader.example_length, - batching_scheme["boundaries"], + dataset, data_reader.example_length, batching_scheme["boundaries"], batching_scheme["batch_sizes"]) if not is_training: + def _pad_batch(features): if not config or config.data_parallelism.n <= 1: return features @@ -946,7 +1003,9 @@ def _maybe_pack_examples(self, generator): """Helper to generate_data().""" if self.packed_length: return generator_utils.pack_examples( - generator, self.has_inputs, self.packed_length, + generator, + self.has_inputs, + self.packed_length, chop_long_sequences=not self.has_inputs) else: return generator @@ -964,8 +1023,8 @@ def generate_data(self, data_dir, tmp_dir, task_id=-1): generator_utils.shuffle_dataset(all_paths) else: generator_utils.generate_dataset_and_shuffle( - self._maybe_pack_examples(self.generator(data_dir, tmp_dir, True)), - train_paths, + self._maybe_pack_examples(self.generator(data_dir, tmp_dir, + True)), train_paths, self._maybe_pack_examples(self.generator(data_dir, tmp_dir, False)), dev_paths) @@ -1007,9 +1066,7 @@ def hparams(self, defaults, unused_model_hparams): p.input_modality["targets_position"] = identity def example_reading_spec(self): - data_fields = { - "targets": tf.VarLenFeature(tf.int64) - } + data_fields = {"targets": tf.VarLenFeature(tf.int64)} if self.has_inputs: data_fields["inputs"] = tf.VarLenFeature(tf.int64) @@ -1090,11 +1147,15 @@ def text_filepaths_for_task(self, tmp_dir, task_id): assert task_id >= 0 assert task_id < self.num_train_shards + self.num_dev_shards if task_id < self.num_train_shards: - return [f for i, f in enumerate(self.train_text_filepaths(tmp_dir)) - if i % self.num_train_shards == task_id] + return [ + f for i, f in enumerate(self.train_text_filepaths(tmp_dir)) + if i % self.num_train_shards == task_id + ] else: - return [f for i, f in enumerate(self.dev_text_filepaths(tmp_dir)) - if i % self.num_dev_shards == task_id - self.num_train_shards] + return [ + f for i, f in enumerate(self.dev_text_filepaths(tmp_dir)) + if i % self.num_dev_shards == task_id - self.num_train_shards + ] def filepath_to_unicode_strings(self, filepath): """Read text out of an input file. @@ -1140,8 +1201,8 @@ def file_generator(self, chars_this_file = 0 tf.logging.info("reading file %s" % fname) for text in self.filepath_to_unicode_strings(fname): - if (max_chars_per_file and chars_this_file + len(text) - > max_chars_per_file): + if (max_chars_per_file and + chars_this_file + len(text) > max_chars_per_file): text = text[:max_chars_per_file - chars_this_file] if max_chars_total and chars_total + len(text) > max_chars_total: text = text[:max_chars_total - chars_total] @@ -1237,7 +1298,7 @@ def generate_data(self, data_dir, tmp_dir, task_id=-1): @property def max_chars_for_vocab(self): """Number of characters of training data to use for generating vocab.""" - return 10 ** 7 + return 10**7 @property def target_space_id(self): @@ -1254,7 +1315,7 @@ def num_dev_shards(self): @property def max_dev_chars(self): """Limit dev set to at most this many characters (default 10M).""" - return 10 ** 7 + return 10**7 @property def multiprocess_generate(self): @@ -1277,14 +1338,12 @@ def has_inputs(self): return False def eval_metrics(self): - return [ - metrics.Metrics.ACC, metrics.Metrics.NEG_LOG_PERPLEXITY - ] + return [metrics.Metrics.ACC, metrics.Metrics.NEG_LOG_PERPLEXITY] def to_unicode_ignore_erros(s): - return (unicode(s, "utf-8", errors="ignore") if six.PY2 else - s.decode("utf-8", "ignore")) + return (unicode(s, "utf-8", errors="ignore") + if six.PY2 else s.decode("utf-8", "ignore")) def _are_shapes_fully_defined(shapes_dict): @@ -1359,3 +1418,23 @@ def pad_batch(features, batch_multiple): padded_feature = tf.pad(feature, paddings) padded_features[k] = padded_feature return padded_features + + +def problem_hparams_to_features(problem_hparams): + input_space_id, target_space_id = 0, 0 + if problem_hparams: + input_space_id = problem_hparams.input_space_id + target_space_id = problem_hparams.target_space_id + return { + "problem_choice": 0, + "input_space_id": input_space_id, + "target_space_id": target_space_id, + } + + +def skip_random_fraction(dataset, data_file): + # Skip a random fraction at the beginning of the stream. The skip is + # essential for synchronous highly-parallel training to avoid multiple + # replicas reading the same data in lock-step. + num_skip = random.randint(0, _file_num_records_cached(data_file)) + return dataset.skip(num_skip) diff --git a/tensor2tensor/data_generators/speech_recognition.py b/tensor2tensor/data_generators/speech_recognition.py index c54878045..01a3db564 100644 --- a/tensor2tensor/data_generators/speech_recognition.py +++ b/tensor2tensor/data_generators/speech_recognition.py @@ -33,6 +33,7 @@ from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder from tensor2tensor.layers import common_layers +from tensor2tensor.utils import metrics from tensor2tensor.utils import modality from tensor2tensor.utils import registry @@ -93,9 +94,7 @@ def compute_mel_filterbank_features( num_mel_bins: filterbank size log_noise_floor: clip small values to prevent numeric overflow in log Returns: - tuple of (filterbanks, filterbank_lens) where: - filterbanks are float32 tensor with shape [batch_size, len, num_bins, 1] - filterbank_lens are int64 tensor with shape [batch_size] + filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1] """ # `stfts` is a complex64 Tensor representing the short-time Fourier # Transform of each signal in `signals`. Its shape is @@ -214,6 +213,8 @@ class SpeechRecognitionProblem(problem.Problem): def hparams(self, defaults, model_hparams): p = model_hparams # Filterbank extraction + # The trainer seems to reserve memory for all members of the input dict + p.add_hparam("audio_keep_example_waveforms", False) p.add_hparam("audio_sample_rate", 16000) p.add_hparam("audio_preemphasis", 0.97) p.add_hparam("audio_dither", 1.0 / np.iinfo(np.int16).max) @@ -243,6 +244,9 @@ def target_space_id(self): def feature_encoders(self, _): return { + "inputs": None, # Put None to make sure that the logic in + # decoding.py doesn't try to convert the floats + # into text... "waveforms": AudioEncoder(), "targets": text_encoder.ByteTextEncoder(), } @@ -274,13 +278,19 @@ def preprocess_example(self, example, mode, hparams): mel_fbanks = add_delta_deltas(mel_fbanks) fbank_size = common_layers.shape_list(mel_fbanks) assert fbank_size[0] == 1 - # Later models like to flatten the two spatial dims. Instead, we add a unit - # spatial dim and flatten the frequencies and channels. + # Later models like to flatten the two spatial dims. Instead, we add a + # unit spatial dim and flatten the frequencies and channels. example["inputs"] = tf.reshape( mel_fbanks, [fbank_size[1], 1, fbank_size[2] * fbank_size[3]]) + if not p.audio_keep_example_waveforms: + del example["waveforms"] return super(SpeechRecognitionProblem, self ).preprocess_example(example, mode, hparams) + def eval_metrics(self): + defaults = super(SpeechRecognitionProblem, self).eval_metrics() + return defaults + [metrics.Metrics.EDIT_DISTANCE] + @registry.register_audio_modality class SpeechRecognitionModality(modality.Modality): @@ -303,8 +313,8 @@ def bottom(self, inputs): num_mel_bins = p.audio_num_mel_bins num_channels = 3 if p.audio_add_delta_deltas else 1 # The convention is that the models are flattened along the spatial, - # dimensions, thus the speech preprocessor treats frequencies and channels - # as image colors (last axis) + # dimensions, thus the speech preprocessor treats frequencies and + # channels as image colors (last axis) x.set_shape([None, None, 1, num_mel_bins * num_channels]) # This replaces CMVN estimation on data diff --git a/tensor2tensor/data_generators/twentybn.py b/tensor2tensor/data_generators/twentybn.py new file mode 100644 index 000000000..c70313767 --- /dev/null +++ b/tensor2tensor/data_generators/twentybn.py @@ -0,0 +1,131 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generator for twenty bn video data-set.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +# Dependency imports + +from tensor2tensor.data_generators import image_utils +from tensor2tensor.utils import registry + +import tensorflow as tf + + +_FILE_VIDEO_PATTERN = '20bn-something-something-v1' +_FILE_LABEL_PATTERN = 'something-something-v1-' + +_TWENTYBN_IMAGE_SIZE = 32 + + +def resize_video_frames(images, size): + resized_images = [] + for image in images: + resized_images.append( + tf.to_int64(tf.image.resize_images( + image, [size, size], tf.image.ResizeMethod.BILINEAR))) + return resized_images + + +def twentybn_generator(tmp_dir, training): + """Video generator for twenty-bn dataset. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the dev set. + + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the images of a video as JPG, + * image/format: the string "jpg" representing image format, + * image/class/label: an integer representing the label, + """ + data_suffix = 'train' if training else 'validation' + + def process_labels(): + all_labels = {} + with tf.gfile.Open(tmp_dir + _FILE_LABEL_PATTERN + 'labels.csv') as f: + for (i, label) in enumerate(f): + all_labels[label] = i+1 + return all_labels + + def read_id_to_labels(): + id_to_label = {} + with tf.gfile.Open(tmp_dir + _FILE_LABEL_PATTERN + + data_suffix + '.csv') as f: + for line in f: + values = line.split(';') + id_to_label[int(values[0])] = values[1] + return id_to_label + + # Get the label string to class id dictionary. + all_labels = process_labels() + # Get the video ids to label string dictionary. + id_to_labels = read_id_to_labels() + + # Read video frames as images. + for vname, label_id in id_to_labels.items(): + path = os.path.join(os.path.join(tmp_dir, _FILE_VIDEO_PATTERN), str(vname)) + label = all_labels[label_id] + images = [] + image_files = tf.gfile.Glob(os.path.join(path, '*.jpg')) + + for filename in image_files: + with tf.gfile.Open(filename, 'rb') as f: + encoded_image_data = f.read() + images.append(encoded_image_data) + yield { + 'image/encoded': images, + 'image/format': ['jpg'], + 'image/class/label': [int(label)] + } + + +@registry.register_problem +class VideoTwentybn(image_utils.Image2ClassProblem): + """Videonet.""" + + @property + def is_small(self): + return True + + @property + def num_classes(self): + return 174 + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + def preprocess_example(self, example, unused_mode, unused_hparams): + example['inputs'] = resize_video_frames(example['inputs'], + _TWENTYBN_IMAGE_SIZE) + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return twentybn_generator(tmp_dir, True) + else: + return twentybn_generator(tmp_dir, False) diff --git a/tensor2tensor/layers/common_attention.py b/tensor2tensor/layers/common_attention.py index d7874ceff..e82e6d471 100644 --- a/tensor2tensor/layers/common_attention.py +++ b/tensor2tensor/layers/common_attention.py @@ -70,30 +70,40 @@ def partial(fct, *args, **kwargs): return functools.wraps(fct)(functools.partial(fct, *args, **kwargs)) def register_layer( - fct, + fct_in, default_args=None, default_kwargs=None, use_dp=True, + recompute_grad=False, ): """Turn a function into its standardized version. Args: - fct (fct): The function to register + fct_in (fct): The function to register default_args (list): The default parameters to add to the function. default_kwargs (dict): The default parameters to add to the function. Those arguments can be overwriten when calling the function. use_dp (bool): Wrap the function call within a dataparalellism object if dp is available. Some layers (like moe) must be called without dp. + recompute_grad (bool): If True, recompute the function during the + backward pass to save memory Returns: fct: the standardized layer function. """ # The kwargs given when calling the function overwrite the default ones - fct = partial(fct, *(default_args or []), **(default_kwargs or {})) + fct_in = partial(fct_in, *(default_args or []), **(default_kwargs or {})) - @functools.wraps(fct) + @functools.wraps(fct_in) def decorator(x, *args, **kwargs): """Call the layer function.""" + fct = fct_in # For closure. Could use nonlocal with Python 3 + # Eventually create the memory optimized version of the function + if recompute_grad: + fct = partial(fct, **kwargs) # recompute_grad only accept args + fct = common_layers.recompute_grad(fct) + kwargs = {} + # Eventually use dp (if given and not MoE) if use_dp and dp is not None: y = dp(fct, x, *args, **kwargs) @@ -135,10 +145,48 @@ def decorator(x, *args, **kwargs): dropout_rate=hparams.attention_dropout, )) - # === Local attention layer === + # === Memory efficient full-attention layer === + # Save memory by not storing the activations and + # recomputing them during the backward pass + memeff_attention_base_fn = register_layer( + multihead_attention, + default_kwargs=dict( + total_key_depth=total_key_depth, + total_value_depth=total_value_depth, + output_depth=hparams.hidden_size, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + ), + recompute_grad=True, + ) + def memeff_attention_fn(*args, **kwargs): + """Modify args/kwargs for compatibility with recompute_grad.""" + kwargs = kwargs.copy() + assert len(args) == 1 + x = args[0] + memory_antecedent = kwargs.pop("memory_antecedent", x) # Same as x if None + if kwargs.get("bias", None) is not None: # Case where bias has been set + args = (x, memory_antecedent, kwargs.pop("bias")) + else: + # Otherwise, only 2 args. This is necessary as recompute_grad does not + # support None values. + args = (x, memory_antecedent) + return memeff_attention_base_fn(*args, **kwargs) + + # === Local attention (unmasked) layer === # Reuse same parameters as multihead_attention - # Only works for self attention. Always mask the future. + # Don't mask the future local_attention_fn = partial( + multihead_attention_fn, + block_length=hparams.attention_loc_block_length, + block_width=hparams.attention_loc_block_width, + attention_type="local_unmasked", + ) + + # === Local attention (masked) layer === + # Reuse same parameters as multihead_attention + # Only works for self attention. Always mask the future. + local_attention_masked_fn = partial( multihead_attention_fn, block_length=hparams.attention_loc_block_length, attention_type="local_mask_right", @@ -213,8 +261,9 @@ def decorator(x, *args, **kwargs): layers = dict( a=multihead_attention_fn, # Multihead full attention loc=local_attention_fn, # Local attention + locm=local_attention_masked_fn, # Local masked attention red=compressed_attention_fn, # Memory-compressed attention - mem=None, # Memory efficient + mem=memeff_attention_fn, # Memory efficient fc=conv_hidden_relu, sep=sep_conv_relu, # Fully connected sepm=sep_conv_relu_masked, # masked separable convolution @@ -252,6 +301,8 @@ def add_standard_attention_hparams(hparams): hparams.add_hparam("attention_dropout", 0.0) # Attention: Local hparams.add_hparam("attention_loc_block_length", 256) + # Attention: Local (unmasked only): How much to look left. + hparams.add_hparam("attention_loc_block_width", 128) # Attention: Memory-compressed hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") @@ -986,9 +1037,9 @@ def grouped_attention_multihead(query_antecedent, name, default_name="multihead_attention_sparse", values=[query_antecedent, memory_antecedent]): - q = tf.layers.dense( + q = common_layers.dense( query_antecedent, total_key_depth, use_bias=False, name="q_transform") - kv = tf.layers.dense( + kv = common_layers.dense( memory_antecedent, total_key_depth + total_value_depth, use_bias=False, @@ -1000,7 +1051,7 @@ def grouped_attention_multihead(query_antecedent, # We will train these by auxiliary losses. We use stop_gradient here # to keep these losses from back-propagating to the rest of the model. # We add biases that help balance the usage of the experts. - q_pred = tf.layers.dense( + q_pred = common_layers.dense( tf.stop_gradient(query_antecedent), num_heads * num_groups, use_bias=False, @@ -1008,7 +1059,7 @@ def grouped_attention_multihead(query_antecedent, q_pred = split_heads(q_pred, num_heads) q_bias = tf.get_variable("q_bias", [1, num_heads, 1, num_groups]) q_pred_biased = q_pred + q_bias - m_pred = tf.layers.dense( + m_pred = common_layers.dense( tf.stop_gradient(memory_antecedent), num_heads * num_groups, use_bias=False, @@ -1077,7 +1128,7 @@ def grouped_attention_multihead(query_antecedent, o = tf.reshape(o, [batch, num_heads, length_q, depth_v]) o = combine_heads(o) - o = tf.layers.dense( + o = common_layers.dense( o, output_depth, use_bias=False, name="output_transform") m_total = m_dispatcher.combine(m_total) @@ -1180,7 +1231,8 @@ def dot_product_attention(q, image_shapes=None, name=None, make_image_summary=True, - save_weights_to=None): + save_weights_to=None, + dropout_broadcast_dims=None): """dot-product attention. Args: @@ -1196,6 +1248,9 @@ def dot_product_attention(q, save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. Returns: A Tensor. @@ -1210,7 +1265,8 @@ def dot_product_attention(q, if save_weights_to is not None: save_weights_to[scope.name] = weights # dropping out the attention links for each of the heads - weights = tf.nn.dropout(weights, 1.0 - dropout_rate) + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if expert_utils.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) return tf.matmul(weights, v) @@ -1229,21 +1285,16 @@ def _generate_relative_positions_matrix(length, max_relative_position): return final_mat -def _generate_relative_positions_embeddings(heads, length, depth, +def _generate_relative_positions_embeddings(length, depth, max_relative_position, name): - """Generates tensor of size [heads, length, length, depth].""" + """Generates tensor of size [length, length, depth].""" with tf.variable_scope(name): relative_positions_matrix = _generate_relative_positions_matrix( length, max_relative_position) vocab_size = max_relative_position * 2 + 1 - # Generates embedding for each relative position of dimension heads * depth. - embeddings_table = tf.get_variable("embeddings", - [vocab_size, heads * depth]) + # Generates embedding for each relative position of dimension depth. + embeddings_table = tf.get_variable("embeddings", [vocab_size, depth]) embeddings = tf.gather(embeddings_table, relative_positions_matrix) - # Split embeddings per head. - embeddings = tf.reshape(embeddings, [length, length, heads, depth]) - # Transpose to shape [heads, length, length, depth]. - embeddings = tf.transpose(embeddings, [2, 0, 1, 3]) return embeddings @@ -1255,18 +1306,30 @@ def _relative_attention_inner(x, y, z, transpose): Args: x: Tensor with shape [batch_size, heads, length, length or depth]. y: Tensor with shape [batch_size, heads, length, depth]. - z: Tensor with shape [heads, length, length, depth]. + z: Tensor with shape [length, length, depth]. transpose: Whether to tranpose inner matrices of y and z. Should be true if last dimension of x is depth, not length. Returns: - A Tensor with shape [batch_size, heads, length, a]. + A Tensor with shape [batch_size, heads, length, length or depth]. """ + batch_size = tf.shape(x)[0] + heads = x.get_shape().as_list()[1] + length = tf.shape(x)[2] + + # xy_matmul is [batch_size, heads, length, length or depth] xy_matmul = tf.matmul(x, y, transpose_b=transpose) - x_t = tf.transpose(x, [1, 2, 0, 3]) - x_tz_matmul = tf.matmul(x_t, z, transpose_b=transpose) - x_tz_matmul_t = tf.transpose(x_tz_matmul, [2, 0, 1, 3]) - return xy_matmul + x_tz_matmul_t + # x_t is [length, batch_size, heads, length or depth] + x_t = tf.transpose(x, [2, 0, 1, 3]) + # x_t_r is [length, batch_size * heads, length or depth] + x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1]) + # x_tz_matmul is [length, batch_size * heads, length or depth] + x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose) + # x_tz_matmul_r is [length, batch_size, heads, length or depth] + x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1]) + # x_tz_matmul_r_t is [batch_size, heads, length, length or depth] + x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3]) + return xy_matmul + x_tz_matmul_r_t def dot_product_attention_relative(q, @@ -1276,7 +1339,8 @@ def dot_product_attention_relative(q, max_relative_position, dropout_rate=0.0, image_shapes=None, - name=None): + name=None, + make_image_summary=True): """Calculate relative position-aware dot-product self-attention. The attention calculation is augmented with learned representations for the @@ -1292,6 +1356,7 @@ def dot_product_attention_relative(q, dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. + make_image_summary: Whether to make an attention image summary. Returns: A Tensor. @@ -1311,14 +1376,12 @@ def dot_product_attention_relative(q, q.get_shape().assert_is_compatible_with(v.get_shape()) # Use separate embeddings suitable for keys and values. - heads = q.get_shape().as_list()[1] depth = q.get_shape().as_list()[3] length = common_layers.shape_list(q)[2] relations_keys = _generate_relative_positions_embeddings( - heads, length, depth, max_relative_position, "relative_positions_keys") + length, depth, max_relative_position, "relative_positions_keys") relations_values = _generate_relative_positions_embeddings( - heads, length, depth, max_relative_position, - "relative_positions_values") + length, depth, max_relative_position, "relative_positions_values") # Compute self attention considering the relative position embeddings. logits = _relative_attention_inner(q, k, relations_keys, True) @@ -1326,7 +1389,7 @@ def dot_product_attention_relative(q, logits += bias weights = tf.nn.softmax(logits, name="attention_weights") weights = tf.nn.dropout(weights, 1.0 - dropout_rate) - if not tf.get_variable_scope().reuse: + if not tf.get_variable_scope().reuse and make_image_summary: attention_image_summary(weights, image_shapes) return _relative_attention_inner(weights, v, relations_values, False) @@ -2212,7 +2275,7 @@ def compute_qkv(query_antecedent, memory_antecedent = query_antecedent def _compute(inp, depth, filter_width, padding, name): if filter_width == 1: - return tf.layers.dense(inp, depth, use_bias=False, name=name) + return common_layers.dense(inp, depth, use_bias=False, name=name) else: return common_layers.conv1d(inp, depth, filter_width, padding, name=name) q = _compute( @@ -2246,6 +2309,8 @@ def multihead_attention(query_antecedent, num_memory_blocks=2, name=None, save_weights_to=None, + make_image_summary=True, + dropout_broadcast_dims=None, **kwargs): """Multihead scaled-dot-product attention with input/output transformations. @@ -2289,6 +2354,10 @@ def multihead_attention(query_antecedent, save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. **kwargs (dict): Parameters for the attention function Caching: @@ -2350,10 +2419,13 @@ def multihead_attention(query_antecedent, x, additional_returned_value = x # Unpack elif attention_type == "dot_product": x = dot_product_attention(q, k, v, bias, dropout_rate, image_shapes, - save_weights_to=save_weights_to) + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims) elif attention_type == "dot_product_relative": x = dot_product_attention_relative(q, k, v, bias, max_relative_position, - dropout_rate, image_shapes) + dropout_rate, image_shapes, + make_image_summary=make_image_summary) elif attention_type == "local_mask_right": x = masked_local_attention_1d(q, k, v, block_length=block_length) elif attention_type == "local_unmasked": @@ -2367,7 +2439,7 @@ def multihead_attention(query_antecedent, x = dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) x = combine_heads(x) - x = tf.layers.dense( + x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") if additional_returned_value is not None: return x, additional_returned_value @@ -2431,7 +2503,7 @@ def multihead_attention_2d(query_antecedent, x = masked_local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) x = combine_heads_2d(x) - x = tf.layers.dense( + x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x @@ -2471,16 +2543,16 @@ def ffn_self_attention_layer(x, x_shape = common_layers.shape_list(x) part_depth = filter_depth // num_parts if not share_kv: - combined = tf.layers.dense( + combined = common_layers.dense( x, filter_depth * 3, use_bias=False, name="qkv_transform") combined = tf.expand_dims(combined, axis=2) q, k, v = tf.split(combined, 3, axis=3) else: q = tf.expand_dims( - tf.layers.dense( + common_layers.dense( x, filter_depth, use_bias=False, name="q_transform"), axis=2) kv_combined = tf.expand_dims( - tf.layers.dense( + common_layers.dense( tf.concat([x, x], axis=1), filter_depth, use_bias=False, name="kv_transform"), axis=2) @@ -2495,7 +2567,7 @@ def ffn_self_attention_layer(x, bias = None x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate) x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth]) - x = tf.layers.dense( + x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x @@ -2547,7 +2619,8 @@ def parameter_attention(x, output_depth**0.5) batch_size = common_layers.shape_list(x)[0] length = common_layers.shape_list(x)[1] - q = tf.layers.dense(x, total_key_depth, use_bias=False, name="q_transform") + q = common_layers.dense( + x, total_key_depth, use_bias=False, name="q_transform") if dropout_rate: # This is a cheaper form of attention dropout where we use to use # the same dropout decisions across batch elemets and query positions, @@ -2566,7 +2639,7 @@ def parameter_attention(x, y = tf.transpose(y, [1, 2, 0, 3]) y = tf.reshape(y, [batch_size, length, total_value_depth]) y.set_shape([None, None, total_value_depth]) - y = tf.layers.dense( + y = common_layers.dense( y, output_depth, use_bias=False, name="output_transform") return y @@ -3319,7 +3392,7 @@ def pad_and_reshape(x): block_length, block_length, # Restore the block length dimension ]) - weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block + weights = tf.reduce_sum(weights, axis=3, keepdims=True) # Compress block v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth] v_out = tf.squeeze(v_out, axis=3) return v_out diff --git a/tensor2tensor/layers/common_hparams.py b/tensor2tensor/layers/common_hparams.py index f6728ed86..02a5df2f3 100644 --- a/tensor2tensor/layers/common_hparams.py +++ b/tensor2tensor/layers/common_hparams.py @@ -32,9 +32,10 @@ def basic_params1(): """A set of basic hyperparameters.""" return tf.contrib.training.HParams( - # If the features are variable length, this is in tokens per batch per - # GPU. If the features are of known shape (e.g. image problems), this is - # the actual batch size. + # If the problem consists of variable-length sequences + # (see problem.batch_size_means_tokens()), then this is the number + # of tokens per batch per GPU or per TPU core. Otherwise, this is + # the number of examples per GPU or per TPU core. batch_size=4096, # If True, then if the features are of variable length, the batch_size is # used as the actual batch size (and not tokens per batch). @@ -50,6 +51,8 @@ def basic_params1(): clip_grad_norm=2.0, grad_noise_scale=0.0, summarize_grads=False, + # Whether to log the name and size of every variable + summarize_vars=False, initializer="orthogonal", initializer_gain=1.5, label_smoothing=0.1, @@ -58,9 +61,13 @@ def basic_params1(): optimizer_adam_beta1=0.85, optimizer_adam_beta2=0.997, optimizer_momentum_momentum=0.9, - weight_decay=0.1, + optimizer_momentum_nesterov=False, + weight_decay=1e-6, weight_noise=0.0, learning_rate_decay_scheme="none", + # decay_steps and decay_staircase for learning_rate_decay_scheme=="exp" + learning_rate_decay_steps=5000, + learning_rate_decay_staircase=False, learning_rate_minimum=None, learning_rate_decay_rate=1.0, learning_rate_warmup_steps=100, @@ -94,6 +101,13 @@ def basic_params1(): layer_postprocess_sequence="dan", # dropout rate to use during layer_preprocess and layer_postprocess layer_prepostprocess_dropout=0.1, + # broadcast dimensions for layer_prepostprocess_dropout + # a comma-separated list of integers. + # see common_layers.dropout_with_broadcast_dims() + # Change this to "1" to save memory. + layer_prepostprocess_dropout_broadcast_dims="", + # dropout some symbols (set them to 0) before embedding. + symbol_dropout=0.0, # What type of normalization to use norm_type="layer", # "batch", layer", "noam", "none". # epsilon parameter to normalization function @@ -194,12 +208,6 @@ def basic_params1(): # device training and mostly should be turned on for performance. One # exception are recurrent models: with dynamic loops it must be off. daisy_chain_variables=True, - # This is the actual batch size, *not* tokens per batch (i.e. for - # language models this is the number of sentences in the batch) - tpu_batch_size_per_shard=24, - # Set by t2t_trainer if --use_tpu to let the model know whether we are on - # TPU. Switching on/off tpu should not invalidate checkpoints. - use_tpu=False, # If True in PREDICT mode, then last-position-only optimizations are not # used. force_full_predict=False, @@ -340,7 +348,7 @@ def basic_range1(ranged_hparams): ["uniform", "orthogonal", "uniform_unit_scaling"]) rhp.set_float("initializer_gain", 0.5, 3.5) rhp.set_categorical("learning_rate_decay_scheme", - ["none", "sqrt", "noam", "exp10k"]) + ["none", "sqrt", "noam", "exp"]) rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta1", 0.8, 0.9) rhp.set_float("optimizer_adam_beta2", 0.995, 0.999) diff --git a/tensor2tensor/layers/common_layers.py b/tensor2tensor/layers/common_layers.py index 5e9d9cd45..c8d54fb99 100644 --- a/tensor2tensor/layers/common_layers.py +++ b/tensor2tensor/layers/common_layers.py @@ -40,6 +40,40 @@ allow_defun = False +def is_on_tpu(): + return tf.contrib.framework.get_name_scope().startswith("TPUReplicate") + + +def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs): + """Like tf.nn.dropout but takes broadcast_dims instead of noise_shape. + + Instead of specifying noise_shape, this function takes broadcast_dims - + a list of dimension numbers in which noise_shape should be 1. The random + keep/drop tensor has dimensionality 1 along these dimensions. + + Args: + x: a floating point tensor. + keep_prob: A scalar Tensor with the same type as x. + The probability that each element is kept. + broadcast_dims: an optional list of integers + the dimensions along which to broadcast the keep/drop flags. + **kwargs: keyword arguments to tf.nn.dropout other than "noise_shape". + Returns: + A Tensor with the same size and shape as x. + """ + assert "noise_shape" not in kwargs + if broadcast_dims: + shape = tf.shape(x) + ndims = len(x.get_shape()) + kwargs["noise_shape"] = [ + 1 if i in broadcast_dims else shape[i] for i in xrange(ndims)] + return tf.nn.dropout(x, keep_prob, **kwargs) + + +def comma_separated_string_to_integer_list(s): + return [int(i) for i in s.split(",") if i] + + def saturating_sigmoid(x): """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].""" with tf.name_scope("saturating_sigmoid", [x]): @@ -137,23 +171,6 @@ def shakeshake(xs, equal_grad=False): return shakeshake2(arg1, arg2) -def standardize_images(x): - """Image standardization on batches (tf.image.per_image_standardization).""" - with tf.name_scope("standardize_images", [x]): - x = tf.to_float(x) - x_mean = tf.reduce_mean(x, axis=[1, 2, 3], keep_dims=True) - x_variance = tf.reduce_mean( - tf.square(x - x_mean), axis=[1, 2, 3], keep_dims=True) - x_shape = shape_list(x) - num_pixels = tf.to_float(x_shape[1] * x_shape[2] * 3) - x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels)) - # TODO(lukaszkaiser): remove hack below, needed for greedy decoding for now. - if x.shape and len(x.shape) == 4 and x.shape[3] == 1: - x = tf.concat([x, x, x], axis=3) # Not used, just a dead tf.cond branch. - x.set_shape([None, None, None, 3]) - return x - - def convert_rgb_to_real(x): """Conversion of pixel values to real numbers.""" with tf.name_scope("rgb_to_real", [x]): @@ -164,44 +181,42 @@ def convert_rgb_to_real(x): return x -def image_augmentation(images, do_colors=False, crop_size=None): - """Image augmentation: cropping, flipping, and color transforms.""" - if crop_size is None: - crop_size = [299, 299] - images = tf.random_crop(images, crop_size + [3]) - images = tf.image.random_flip_left_right(images) - if do_colors: # More augmentation, but might be slow. - images = tf.image.random_brightness(images, max_delta=32. / 255.) - images = tf.image.random_saturation(images, lower=0.5, upper=1.5) - images = tf.image.random_hue(images, max_delta=0.2) - images = tf.image.random_contrast(images, lower=0.5, upper=1.5) - return images +def flatten4d3d(x): + """Flatten a 4d-tensor into a 3d-tensor by joining width and height.""" + xshape = shape_list(x) + result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]]) + return result + +# TODO(noam): remove this function after TPUs do gather faster. +def gather(params, indices): + """Version of tf.gather that works faster on tpu.""" + if not is_on_tpu(): + return tf.gather(params, indices) + vocab_size = params.get_shape().as_list()[0] + indices_flat = tf.reshape(indices, [-1]) + out = tf.matmul(tf.one_hot(indices_flat, vocab_size), params) + out = eu.reshape_like(out, tf.expand_dims(indices, -1)) + return out -def cifar_image_augmentation(images): - """Image augmentation suitable for CIFAR-10/100. - As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). +def dropout_no_scaling(x, keep_prob): + """Like tf.nn.dropout, but does not scale up. Works on integers also. Args: - images: a Tensor. + x: a Tensor + keep_prob: a floating point number Returns: - Tensor of the same shape as images. + a Tensor of the same size and shape as x """ - images = tf.image.resize_image_with_crop_or_pad(images, 40, 40) - images = tf.random_crop(images, [32, 32, 3]) - images = tf.image.random_flip_left_right(images) - return images - - -def flatten4d3d(x): - """Flatten a 4d-tensor into a 3d-tensor by joining width and height.""" - xshape = shape_list(x) - result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]]) - return result + if keep_prob == 1.0: + return x + return x * tf.cast( + tf.less(tf.random_uniform(tf.shape(x)), keep_prob), x.dtype) -def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0): +def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0, + symbol_dropout_rate=0.0): """Embed x of type int64 into dense vectors, reducing to max 4 dimensions.""" with tf.variable_scope( name, default_name="embedding", values=[x], reuse=reuse): @@ -211,7 +226,8 @@ def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0): # parameter server. This avoids excess computation on the parameter server. if not tfe_context.in_eager_mode(): embedding_var = eu.convert_gradient_to_tensor(embedding_var) - emb_x = tf.gather(embedding_var, x) + x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate) + emb_x = gather(embedding_var, x) if multiplier != 1.0: emb_x *= multiplier static_shape = emb_x.shape.as_list() @@ -462,7 +478,7 @@ def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"): a Tensor with shape [batch, length, filters]. """ if kernel_size == 1: - return tf.layers.dense(inputs, filters, name=name, use_bias=True) + return dense(inputs, filters, name=name, use_bias=True) if padding == "SAME": assert kernel_size % 2 == 1 first_offset = -((kernel_size - 1) // 2) @@ -475,7 +491,7 @@ def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"): for i in xrange(kernel_size): shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs shifted.set_shape(inputs.get_shape()) - results.append(tf.layers.dense( + results.append(dense( shifted, filters, use_bias=(i == 0), name=name + "_%d" % i)) ret = tf.add_n(results) ret *= kernel_size ** -0.5 @@ -493,8 +509,8 @@ def layer_norm_vars(filters): def layer_norm_compute_python(x, epsilon, scale, bias): """Layer norm raw computation.""" - mean = tf.reduce_mean(x, axis=[-1], keep_dims=True) - variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True) + mean = tf.reduce_mean(x, axis=[-1], keepdims=True) + variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True) norm_x = (x - mean) * tf.rsqrt(variance + epsilon) return norm_x * scale + bias @@ -563,7 +579,8 @@ def layer_prepostprocess(previous_value, depth, epsilon, default_name, - name=None): + name=None, + dropout_broadcast_dims=None): """Apply a sequence of functions to the input or output of a layer. The sequence is specified as a string which may contain the following @@ -585,6 +602,9 @@ def layer_prepostprocess(previous_value, epsilon: a float (parameter for normalization) default_name: a string name: a string + dropout_broadcast_dims: an optional list of integers less than 3 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. Returns: a Tensor @@ -599,7 +619,8 @@ def layer_prepostprocess(previous_value, x = apply_norm(x, norm_type, depth, epsilon) else: assert c == "d", ("Unknown sequence step %s" % c) - x = tf.nn.dropout(x, 1.0 - dropout_rate) + x = dropout_with_broadcast_dims( + x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) return x @@ -634,6 +655,8 @@ def layer_preprocess(layer_input, hparams): norm_type=hparams.norm_type, depth=None, epsilon=hparams.norm_epsilon, + dropout_broadcast_dims=comma_separated_string_to_integer_list( + getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")), default_name="layer_prepostprocess") @@ -667,6 +690,8 @@ def layer_postprocess(layer_input, layer_output, hparams): norm_type=hparams.norm_type, depth=None, epsilon=hparams.norm_epsilon, + dropout_broadcast_dims=comma_separated_string_to_integer_list( + getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")), default_name="layer_postprocess") @@ -907,7 +932,7 @@ def decompress_seqcnn(x, targets_shape[0], targets_shape[1], targets_shape[2], channels, hidden_size ]) - return tf.layers.dense(outputs, targets_vocab_size) + return dense(outputs, targets_vocab_size) def simple_attention(target, source, bias=None): @@ -1146,7 +1171,7 @@ def mask_from_embedding(emb): Returns: a 0.0/1.0 Tensor with shape [batch, width, height, 1]. """ - return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keep_dims=True)) + return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True)) def mask_leq(target_length, source_length): @@ -1295,13 +1320,15 @@ def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask): return inputs -def dense_relu_dense(inputs, filter_size, output_size, dropout=0.0): +def dense_relu_dense(inputs, filter_size, output_size, dropout=0.0, + dropout_broadcast_dims=None): """Hidden layer with RELU activation followed by linear projection.""" - h = tf.layers.dense( + h = dense( inputs, filter_size, use_bias=True, activation=tf.nn.relu, name="conv1") if dropout != 0.0: - h = tf.nn.dropout(h, 1.0 - dropout) - o = tf.layers.dense(h, output_size, use_bias=True, name="conv2") + h = dropout_with_broadcast_dims( + h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims) + o = dense(h, output_size, use_bias=True, name="conv2") return o @@ -1347,7 +1374,7 @@ def sepconv_relu_sepconv(inputs, else: is_3d = False h = separable_conv( - inputs, filter_size, first_kernel_size, ctivation=tf.nn.relu, + inputs, filter_size, first_kernel_size, activation=tf.nn.relu, padding=padding, name="conv1") if dropout != 0.0: h = tf.nn.dropout(h, 1.0 - dropout) @@ -1676,7 +1703,7 @@ def smoothing_cross_entropy(logits, depth=vocab_size, on_value=confidence, off_value=low_confidence) - xentropy = tf.nn.softmax_cross_entropy_with_logits( + xentropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits=logits, labels=soft_targets) return xentropy - normalizing @@ -1710,7 +1737,7 @@ def global_pool_1d(inputs, pooling_type="MAX", mask=None): if mask is not None: # Some elems are dummy elems so we can't just reduce the average. output = tf.reduce_sum(inputs, axis=1) - num_elems = tf.reduce_sum(mask, axis=1, keep_dims=True) + num_elems = tf.reduce_sum(mask, axis=1, keepdims=True) output = tf.div(output, tf.maximum(num_elems, 1)) else: output = tf.reduce_mean(inputs, axis=1) @@ -1746,6 +1773,29 @@ def running_global_pool_1d(inputs, pooling_type="MAX"): return output +def gated_linear_unit_layer(x, name=None): + """Gated linear unit layer. + + Paper: Language Modeling with Gated Convolutional Networks. + Link: https://arxiv.org/abs/1612.08083 + x = Wx * sigmoid(W'x). + + Args: + x: A tensor + name: A string + + Returns: + x: A tensor + """ + + with tf.variable_scope( + name, default_name="glu_layer", values=[x]): + depth = shape_list(x)[-1] + x = tf.layers.dense(x, depth * 2, activation=None) + x, gating_x = tf.split(x, 2, axis=-1) + return x * tf.nn.sigmoid(gating_x) + + def linear_set_layer(layer_size, inputs, context=None, @@ -2484,3 +2534,97 @@ def _step(source_replica, target_replica, x_split, op="plus_eq"): if maybe_reduce: y = expand_by_device(original_parallelism, parallelism, y) return y + + +def recompute_grad(fn): + """Decorator that recomputes the function on the backwards pass. + + Args: + fn: a function that takes Tensors (all as positional arguments) and returns + a tuple of Tensors. + + Returns: + A wrapped fn that is identical to fn when called, but its activations will + be discarded and recomputed on the backwards pass (i.e. on a call to + tf.gradients). + """ + + @functools.wraps(fn) + def wrapped(*args): + return _recompute_grad(fn, args) + + return wrapped + + +def _recompute_grad(fn, args): + """See recompute_grad.""" + + cached_vs = [] + cached_arg_scope = [] + + def grad_fn(inputs, variables, outputs, output_grads): + """Recompute outputs for gradient computation.""" + del outputs + variables = [underlying_variable_ref(v) for v in variables] + # Recompute outputs + with tf.control_dependencies(output_grads): + with tf.contrib.framework.arg_scope(cached_arg_scope[0]): + with tf.variable_scope(cached_vs[0], reuse=True): + outputs = fn(*inputs) + + if not (isinstance(outputs, list) or isinstance(outputs, tuple)): + outputs = [outputs] + outputs = list(outputs) + grads = tf.gradients(outputs, inputs + variables, output_grads) + grad_inputs = grads[:len(inputs)] + grad_vars = grads[len(inputs):] + if is_on_tpu(): + # TODO(noam): remove this hack once XLA does the right thing. + # Force the gradinets on the inputs to be computed before the variables + # are updated. This saves memory by preventing XLA from making an extra + # copy of the variables. + grad_vars = force_dependency(grad_vars, grad_inputs) + return grad_inputs, grad_vars + + @fn_with_custom_grad(grad_fn) + def fn_with_recompute(*args): + cached_vs.append(tf.get_variable_scope()) + # TODO(rsepassi): Rm conditional in TF 1.5 + if hasattr(tf.contrib.framework, "current_arg_scope"): + cached_arg_scope.append(tf.contrib.framework.current_arg_scope()) + else: + cached_arg_scope.append({}) + return fn(*args) + + return fn_with_recompute(*args) + + +def force_dependency(xs, ys): + """Force all of xs to depend on all of ys, using a false data dependency. + + XLA seems to ignore control dependencies. + + Args: + xs: a list of tensors + ys: a list of tensors: + Returns: + a list of tensors of the same length as xs + """ + def _first_element(x): + ndims = x.get_shape().ndims + return tf.reshape(tf.slice(x, [0] * ndims, [1] * ndims), []) + my_zero = tf.add_n([_first_element(y) for y in ys if y is not None]) * 1e-30 + return [x + my_zero for x in xs] + + +def dense(x, units, **kwargs): + """Identical to tf.layers.dense, Memory optimization on tpu.""" + fn = lambda x: tf.layers.dense(x, units, **kwargs) + if is_on_tpu(): + # TODO(noam): remove this hack once XLA does the right thing. + # Forces the gradinets on the inputs to be computed before the variables + # are updated. This saves memory by preventing XLA from making an extra + # copy of the variables. + return _recompute_grad(fn, [x]) + else: + return fn(x) diff --git a/tensor2tensor/layers/common_layers_test.py b/tensor2tensor/layers/common_layers_test.py index ee07c48d3..38c009926 100644 --- a/tensor2tensor/layers/common_layers_test.py +++ b/tensor2tensor/layers/common_layers_test.py @@ -29,20 +29,6 @@ class CommonLayersTest(tf.test.TestCase): - def testStandardizeImages(self): - x = np.random.rand(5, 7, 7, 3) - with self.test_session() as session: - y = common_layers.standardize_images(tf.constant(x)) - res = session.run(y) - self.assertEqual(res.shape, (5, 7, 7, 3)) - - def testImageAugmentation(self): - x = np.random.rand(500, 500, 3) - with self.test_session() as session: - y = common_layers.image_augmentation(tf.constant(x)) - res = session.run(y) - self.assertEqual(res.shape, (299, 299, 3)) - def testSaturatingSigmoid(self): x = np.array([-120.0, -100.0, 0.0, 100.0, 120.0], dtype=np.float32) with self.test_session() as session: @@ -589,5 +575,52 @@ def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs): self.assertAllClose(g1, g2) +class RecomputeTest(tf.test.TestCase): + + def testRecompute(self): + + def layer(x, name=None): + with tf.variable_scope(name, default_name="layer"): + x = tf.contrib.layers.layer_norm(x) + x = tf.layers.conv1d( + x, + 10, + 1, + use_bias=False, + kernel_initializer=tf.constant_initializer(42.42)) + x = tf.nn.relu(x) + return x + + def fn(x): + out = x + for _ in range(3): + out = layer(out) + return out + + @common_layers.recompute_grad + def fn_recompute(x): + return fn(x) + + x = tf.random_uniform((3, 1, 3)) + recompute_vars = None + with tf.variable_scope("recompute") as vs: + out1 = tf.reduce_sum(fn_recompute(x)) + recompute_vars = vs.trainable_variables() + reg_vars = None + with tf.variable_scope("regular") as vs: + out2 = tf.reduce_sum(fn(x)) + reg_vars = vs.trainable_variables() + + grad1 = tf.gradients(out1, recompute_vars) + grad2 = tf.gradients(out2, reg_vars) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + outs = sess.run([out1, out2, grad1, grad2]) + self.assertAllClose(outs[0], outs[1]) + for g1, g2 in zip(outs[2], outs[3]): + self.assertAllClose(g1, g2) + + if __name__ == "__main__": tf.test.main() diff --git a/tensor2tensor/layers/modalities.py b/tensor2tensor/layers/modalities.py index 0e41dd086..26063388b 100644 --- a/tensor2tensor/layers/modalities.py +++ b/tensor2tensor/layers/modalities.py @@ -32,15 +32,6 @@ from tensorflow.python.eager import context -# TODO(noam): remove this function after TPUs do gather faster. -def tpu_gather(params, indices): - vocab_size = params.get_shape().as_list()[0] - indices_flat = tf.reshape(indices, [-1]) - out = tf.matmul(tf.one_hot(indices_flat, vocab_size), params) - out = eu.reshape_like(out, tf.expand_dims(indices, -1)) - return out - - @registry.register_symbol_modality("default") class SymbolModality(modality.Modality): """Modality for sets of discrete symbols. @@ -107,8 +98,9 @@ def bottom_simple(self, x, name, reuse): # Squeeze out the channels dimension. x = tf.squeeze(x, axis=3) var = self._get_weights() - ret = (tpu_gather(var, x) if self._model_hparams.use_tpu - else tf.gather(var, x)) + x = common_layers.dropout_no_scaling( + x, 1.0 - self._model_hparams.symbol_dropout) + ret = common_layers.gather(var, x) if self._model_hparams.multiply_embedding_mode == "sqrt_depth": ret *= self._body_input_depth**0.5 ret *= tf.expand_dims(tf.to_float(tf.not_equal(x, 0)), -1) @@ -160,7 +152,7 @@ def top(self, body_output, _): else: body_output = tf.reshape(body_output, [-1, body_output_shape[-1]]) logits = tf.matmul(body_output, var, transpose_b=True) - if (self._model_hparams.use_tpu and + if (common_layers.is_on_tpu() and self._model_hparams.mode == tf.estimator.ModeKeys.TRAIN): # TPU does not react kindly to extra dimensions. # TODO(noam): remove this once TPU is more forgiving of extra dims. @@ -206,10 +198,10 @@ class ImageModality(modality.Modality): def bottom(self, inputs): with tf.variable_scope(self.name): - inputs = common_layers.standardize_images(inputs) + inputs = tf.to_float(inputs) if not context.in_eager_mode(): tf.summary.image("inputs", inputs, max_outputs=2) - return tf.to_float(inputs) + return inputs def targets_bottom(self, inputs): with tf.variable_scope(self.name): @@ -243,36 +235,38 @@ def top(self, body_output, _): return res -@registry.register_image_modality("image_identity_compress") -class ImageIdentityCompressModality(modality.Modality): - """Modality for images used in generation.""" +@registry.register_image_modality("image_channel_compress") +class ImageChannelCompressModality(modality.Modality): + """Modality for images using channel compression for generation.""" def bottom_compress(self, inputs, name="bottom"): """Transform input from data space to model space. - Perform conversion of RGB pixel values to a real number and combine values - for each pixel to form representation of image_length x image_length dims. + Perform conversion of RGB pixel values to a real number in the range -1 to 1 + and combine channel values for each pixel to form a representation of + size image_length x image_length dims. Args: - inputs: A Tensor with shape [batch, ...] + inputs: A Tensor representing pixel intensities as integers. [batch, ...] name: string, scope. Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_depth]. """ with tf.variable_scope(name): + inputs = tf.to_float(inputs) + tf.summary.image("inputs", inputs, max_outputs=2) inputs = common_layers.convert_rgb_to_real(inputs) ishape = common_layers.shape_list(inputs) inputs = tf.reshape(inputs, [-1, ishape[1], ishape[2] * ishape[3], 1]) inputs.set_shape([None, None, None, 1]) # We compress RGB intensities for each pixel using a conv. - x = common_layers.conv_block( - inputs, - self._body_input_depth, [((1, 1), (1, 3))], - first_relu=False, - padding="VALID", - strides=(1, 3), - force2d=True, - name="conv_input") + x = tf.layers.conv2d(inputs, + self._body_input_depth, (1, 3), + padding="VALID", + strides=(1, 3), + activation=tf.nn.relu, + name="conv_input") + x.set_shape([None, None, None, self._body_input_depth]) return x def bottom(self, inputs): @@ -287,16 +281,18 @@ def top(self, body_output, _): img_len = self._model_hparams.img_len channels = self._model_hparams.num_channels batch = common_layers.shape_list(body_output)[0] - x = common_layers.conv( + x = tf.layers.conv2d( body_output, - hidden_dim * channels, (1, 1), + hidden_dim*channels, (1, 1), + strides=(1, 1), padding="VALID", activation=tf.nn.relu, name="decompress_conv") x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_dim]) - x.set_shape([None, None, None, hidden_dim]) - x = common_layers.conv( - x, self.top_dimensionality, (1, 1), name="output_conv") + x = common_layers.layer_preprocess(x, self._model_hparams) + x = tf.layers.dense(x, 256, + use_bias=True, activation=None, + name="output_conv") x = tf.reshape(x, [-1, img_len, img_len, channels, self.top_dimensionality]) return x @@ -425,7 +421,7 @@ def top(self, body_output, _): """ with tf.variable_scope(self.name): x = body_output - x = tf.reduce_mean(x, axis=[1, 2], keep_dims=True) + x = tf.reduce_mean(x, axis=[1, 2], keepdims=True) res = tf.layers.dense(x, self._vocab_size) return tf.expand_dims(res, 3) @@ -490,19 +486,6 @@ def loss(self, top_out, targets): return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights) -@registry.register_generic_modality("zero_loss") -@registry.register_audio_modality("zero_loss") -@registry.register_image_modality("zero_loss") -@registry.register_symbol_modality("zero_loss") -@registry.register_class_label_modality("zero_loss") -@registry.register_real_modality("zero_loss") -class IdentityZeroLossModality(IdentityModality): - """Identity with 0 loss.""" - - def loss(self, top_out, targets): - return tf.constant(0., tf.float32), tf.constant(0., tf.float32) - - @registry.register_symbol_modality("identity") class IdentitySymbolModality(SymbolModality): """Symbol modality with identity top and bottom transformations. @@ -515,3 +498,7 @@ def bottom(self, x): def top(self, body_output, _): return body_output + + def targets_bottom(self, x): + """SymbolModality overrides targets_bottom, so need to override here too.""" + return self.bottom(x) diff --git a/tensor2tensor/layers/rev_block.py b/tensor2tensor/layers/rev_block.py index 88bf622ab..1684ebf2c 100644 --- a/tensor2tensor/layers/rev_block.py +++ b/tensor2tensor/layers/rev_block.py @@ -18,15 +18,11 @@ From [The Reversible Residual Network: Backpropagation Without Storing Activations](https://arxiv.org/abs/1707.04585). - -Also contains the @recompute_grad decorator, which recomputes the forward -function on the backwards pass. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import functools import re # Dependency imports @@ -351,59 +347,3 @@ def rev_block(x1, """ block = RevBlock(f, g, num_layers, f_side_input, g_side_input, is_training) return block.forward(x1, x2) - - -def recompute_grad(fn): - """Decorator that recomputes the function on the backwards pass. - - Args: - fn: a function that takes Tensors (all as positional arguments) and returns - a tuple of Tensors. - - Returns: - A wrapped fn that is identical to fn when called, but its activations will - be discarded and recomputed on the backwards pass (i.e. on a call to - tf.gradients). - """ - - @functools.wraps(fn) - def wrapped(*args): - return _recompute_grad(fn, args) - - return wrapped - - -def _recompute_grad(fn, args): - """See recompute_grad.""" - - cached_vs = [] - cached_arg_scope = [] - - def grad_fn(inputs, variables, outputs, output_grads): - """Recompute outputs for gradient computation.""" - del outputs - # Recompute outputs - with tf.control_dependencies(output_grads): - with tf.contrib.framework.arg_scope(cached_arg_scope[0]): - with tf.variable_scope(cached_vs[0], reuse=True): - outputs = fn(*inputs) - - if not (isinstance(outputs, list) or isinstance(outputs, tuple)): - outputs = [outputs] - outputs = list(outputs) - grads = tf.gradients(outputs, inputs + variables, output_grads) - grad_inputs = grads[:len(inputs)] - grad_vars = grads[len(inputs):] - return grad_inputs, grad_vars - - @common_layers.fn_with_custom_grad(grad_fn) - def fn_with_recompute(*args): - cached_vs.append(tf.get_variable_scope()) - # TODO(rsepassi): Rm conditional in TF 1.5 - if hasattr(tf.contrib.framework, "current_arg_scope"): - cached_arg_scope.append(tf.contrib.framework.current_arg_scope()) - else: - cached_arg_scope.append({}) - return fn(*args) - - return fn_with_recompute(*args) diff --git a/tensor2tensor/layers/rev_block_test.py b/tensor2tensor/layers/rev_block_test.py index acc68f9bd..de7a9bca2 100644 --- a/tensor2tensor/layers/rev_block_test.py +++ b/tensor2tensor/layers/rev_block_test.py @@ -183,52 +183,5 @@ def f(x): self._testRevBlock(x=x, f=f) -class RecomputeTest(tf.test.TestCase): - - def testRecompute(self): - - def layer(x, name=None): - with tf.variable_scope(name, default_name="layer"): - x = tf.contrib.layers.layer_norm(x) - x = tf.layers.conv1d( - x, - 10, - 1, - use_bias=False, - kernel_initializer=tf.constant_initializer(42.42)) - x = tf.nn.relu(x) - return x - - def fn(x): - out = x - for _ in range(3): - out = layer(out) - return out - - @rev_block.recompute_grad - def fn_recompute(x): - return fn(x) - - x = tf.random_uniform((3, 1, 3)) - recompute_vars = None - with tf.variable_scope("recompute") as vs: - out1 = tf.reduce_sum(fn_recompute(x)) - recompute_vars = vs.trainable_variables() - reg_vars = None - with tf.variable_scope("regular") as vs: - out2 = tf.reduce_sum(fn(x)) - reg_vars = vs.trainable_variables() - - grad1 = tf.gradients(out1, recompute_vars) - grad2 = tf.gradients(out2, reg_vars) - - with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) - outs = sess.run([out1, out2, grad1, grad2]) - self.assertAllClose(outs[0], outs[1]) - for g1, g2 in zip(outs[2], outs[3]): - self.assertAllClose(g1, g2) - - if __name__ == "__main__": tf.test.main() diff --git a/tensor2tensor/models/__init__.py b/tensor2tensor/models/__init__.py index ef92ccaff..982327b92 100644 --- a/tensor2tensor/models/__init__.py +++ b/tensor2tensor/models/__init__.py @@ -26,7 +26,7 @@ from tensor2tensor.models import aligned from tensor2tensor.models import attention_lm from tensor2tensor.models import attention_lm_moe -from tensor2tensor.models import bluenet +from tensor2tensor.models import basic from tensor2tensor.models import bytenet from tensor2tensor.models import cycle_gan from tensor2tensor.models import gene_expression diff --git a/tensor2tensor/models/basic.py b/tensor2tensor/models/basic.py new file mode 100644 index 000000000..1354259b5 --- /dev/null +++ b/tensor2tensor/models/basic.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic models for testing simple tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow as tf + + +@registry.register_model +class BasicFcRelu(t2t_model.T2TModel): + + def body(self, features): + hparams = self._hparams + x = features["inputs"] + shape = common_layers.shape_list(x) + x = tf.reshape(x, [-1, shape[1] * shape[2] * shape[3]]) + for i in xrange(hparams.num_hidden_layers): + x = tf.layers.dense(x, hparams.hidden_size, name="layer_%d" % i) + x = tf.nn.dropout(x, keep_prob=1.0 - hparams.dropout) + x = tf.nn.relu(x) + return tf.expand_dims(tf.expand_dims(x, axis=1), axis=1) # 4D For T2T. + + +@registry.register_hparams +def basic_fc_small(): + """Small fully connected model.""" + hparams = common_hparams.basic_params1() + hparams.learning_rate = 0.1 + hparams.batch_size = 128 + hparams.hidden_size = 256 + hparams.num_hidden_layers = 2 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.weight_decay = 0.0 + hparams.dropout = 0.0 + return hparams diff --git a/tensor2tensor/models/bluenet.py b/tensor2tensor/models/bluenet.py deleted file mode 100644 index 86625a834..000000000 --- a/tensor2tensor/models/bluenet.py +++ /dev/null @@ -1,545 +0,0 @@ -# coding=utf-8 -# Copyright 2017 The Tensor2Tensor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""BlueNet: and out of the blue network to experiment with shake-shake.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -# Dependency imports - -import numpy as np - -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensor2tensor.layers import common_hparams -from tensor2tensor.layers import common_layers -from tensor2tensor.utils import registry -from tensor2tensor.utils import t2t_model - -import tensorflow as tf - -# var: 1d tensor, raw weights for each choice -# tempered_var: raw weights with temperature applied -# inv_t: inverse of the temperature to use when normalizing `var` -# normalized: same shape as var, but where each item is between 0 and 1, and -# the sum is 1 -SelectionWeights = collections.namedtuple( - "SelectionWeights", ["var", "tempered_var", "inv_t", "normalized"]) - - -def create_selection_weights(name, - type_, - shape, - inv_t=1, - initializer=tf.zeros_initializer(), - regularizer=None, - names=None): - """Create a SelectionWeights tuple. - - Args: - name: Name for the underlying variable containing the unnormalized weights. - type_: "softmax" or "sigmoid" or ("softmax_topk", k) where k is an int. - shape: Shape for the variable. - inv_t: Inverse of the temperature to use in normalization. - initializer: Initializer for the variable, passed to `tf.get_variable`. - regularizer: Regularizer for the variable. A callable which accepts - `tempered_var` and `normalized`. - names: Name of each selection. - - Returns: - The created SelectionWeights tuple. - - Raises: - ValueError: if type_ is not in the supported range. - """ - var = tf.get_variable(name, shape, initializer=initializer) - - if callable(inv_t): - inv_t = inv_t(var) - if inv_t == 1: - tempered_var = var - else: - tempered_var = var * inv_t - - if type_ == "softmax": - weights = tf.nn.softmax(tempered_var) - elif type_ == "sigmoid": - weights = tf.nn.sigmoid(tempered_var) - elif isinstance(type_, (list, tuple)) and type_[0] == "softmax_topk": - assert len(shape) == 1 - # TODO(rshin): Change this to select without replacement? - selection = tf.multinomial(tf.expand_dims(var, axis=0), 4) - selection = tf.squeeze(selection, axis=0) # [k] selected classes. - to_run = tf.one_hot(selection, shape[0]) # [k x nmodules] one-hot. - # [nmodules], 0=not run, 1=run. - to_run = tf.minimum(tf.reduce_sum(to_run, axis=0), 1) - weights = tf.nn.softmax(tempered_var - 1e9 * (1.0 - to_run)) - else: - raise ValueError("Unknown type: %s" % type_) - - if regularizer is not None: - loss = regularizer(tempered_var, weights) - if loss is not None: - tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss) - - if names is not None: - tf.get_collection_ref("selection_weight_names/" + var.name).extend( - names.flatten() if isinstance(names, np.ndarray) else names) - tf.add_to_collection("selection_weight_names_tensor/" + var.name, - tf.constant(names)) - - return SelectionWeights( - var=var, tempered_var=tempered_var, inv_t=inv_t, normalized=weights) - - -def kernel_premultiplier(max_kernel_size, kernel_sizes, input_channels, - kernel_selection_weights, channel_selection_weights): - """Get weights to multiply the kernel with, before convolving. - - Args: - max_kernel_size: (int, int) tuple giving the largest kernel size. - kernel_sizes: A list of (height, width) pairs of integers, containing - different kernel sizes to use. - input_channels: A list of (begin, end) pairs of integers, which describe - which channels in the input to use. - kernel_selection_weights: SelectionWeights object to use for choosing - among kernel sizes. - channel_selection_weights: SelectionWeights object to use for choosing - among which input channels to use. - - Returns: - The multiplier. - """ - kernel_weights = [] - for kernel_i, (h, w) in enumerate(kernel_sizes): - top = (max_kernel_size[0] - h) // 2 - bot = max_kernel_size[0] - h - top - left = (max_kernel_size[1] - w) // 2 - right = max_kernel_size[1] - w - left - kernel_weight = tf.fill((h, w), - kernel_selection_weights.normalized[kernel_i]) - if top != 0 or bot != 0 or left != 0 or right != 0: - kernel_weight = tf.pad(kernel_weight, [[top, bot], [left, right]]) - kernel_weights.append(kernel_weight) - kernel_weight = tf.add_n(kernel_weights) - - channel_weights = [] - min_channel = np.min(input_channels) - max_channel = np.max(input_channels) - for channel_i, (begin, end) in enumerate(input_channels): - channel_weight = tf.pad( - tf.fill((end - begin,), - channel_selection_weights.normalized[channel_i]), - [[begin - min_channel, max_channel - end]]) - channel_weights.append(channel_weight) - channel_weight = tf.add_n(channel_weights) - - multiplier = (tf.reshape(kernel_weight, max_kernel_size + - (1, 1)) * tf.reshape(channel_weight, (1, 1, -1, 1))) - return multiplier - - -def make_subseparable_kernel(kernel_size, input_channels, filters, separability, - kernel_initializer, kernel_regularizer): - """Make a kernel to do subseparable convolution wiht `tf.nn.conv2d`. - - Args: - kernel_size: (height, width) tuple. - input_channels: Number of input channels. - filters: Number of output channels. - separability: Integer denoting separability. - kernel_initializer: Initializer to use for the kernel. - kernel_regularizer: Regularizer to use for the kernel. - - Returns: - A 4D tensor. - """ - if separability == 1: - # Non-separable convolution - return tf.get_variable( - "kernel", - kernel_size + (input_channels, filters), - initializer=kernel_initializer, - regularizer=kernel_regularizer) - - elif separability == 0 or separability == -1: - # Separable convolution - # TODO(rshin): Check initialization is as expected, as these are not 4D. - depthwise_kernel = tf.get_variable( - "depthwise_kernel", - kernel_size + (input_channels,), - initializer=kernel_initializer, - regularizer=kernel_regularizer) - - pointwise_kernel = tf.get_variable( - "pointwise_kernel", (input_channels, filters), - initializer=kernel_initializer, - regularizer=kernel_regularizer) - - expanded_depthwise_kernel = tf.transpose( - tf.scatter_nd( - indices=tf.tile( - tf.expand_dims(tf.range(0, input_channels), axis=1), [1, 2]), - updates=tf.transpose(depthwise_kernel, (2, 0, 1)), - shape=(input_channels, input_channels) + kernel_size), (2, 3, 0, 1)) - - return tf.reshape( - tf.matmul( - tf.reshape(expanded_depthwise_kernel, (-1, input_channels)), - pointwise_kernel), kernel_size + (input_channels, filters)) - - elif separability >= 2: - assert filters % separability == 0, (filters, separability) - assert input_channels % separability == 0, (filters, separability) - - raise NotImplementedError - - elif separability <= -2: - separability *= -1 - assert filters % separability == 0, (filters, separability) - assert input_channels % separability == 0, (filters, separability) - - raise NotImplementedError - - -def multi_subseparable_conv(inputs, - filters, - kernel_sizes, - input_channels, - separabilities, - kernel_selection_weights=None, - channel_selection_weights=None, - separability_selection_weights=None, - kernel_selection_weights_params=None, - channel_selection_weights_params=None, - separability_selection_weights_params=None, - kernel_initializer=None, - kernel_regularizer=None, - scope=None): - """Simultaneously compute different kinds of convolutions on subsets of input. - - Args: - inputs: 4D tensor containing the input, in NHWC format. - filters: Integer, number of output channels. - kernel_sizes: A list of (height, width) pairs of integers, containing - different kernel sizes to use. - input_channels: A list of (begin, end) pairs of integers, which describe - which channels in the input to use. - separabilities: An integer or a list, how separable are the convolutions. - kernel_selection_weights: SelectionWeights object to use for choosing - among kernel sizes. - channel_selection_weights: SelectionWeights object to use for choosing - among which input channels to use. - separability_selection_weights: SelectionWeights object to use for choosing - separability. - kernel_selection_weights_params: dict with up to three keys - - initializer - - regularizer - - inv_t - channel_selection_weights_params: dict with up to three keys - - initializer - - regularizer - - inv_t - separability_selection_weights_params: dict with up to three keys - - initializer - - regularizer - - inv_t - kernel_initializer: Initializer to use for kernels. - kernel_regularizer: Regularizer to use for kernels. - scope: the scope to use. - - Returns: - Result of convolution. - """ - kernel_selection_weights_params = kernel_selection_weights_params or {} - channel_selection_weights_params = channel_selection_weights_params or {} - if separability_selection_weights_params is None: - separability_selection_weights_params = {} - - # Get input image size. - input_shape = inputs.get_shape().as_list() - assert len(input_shape) == 4 - in_channels = input_shape[3] - assert in_channels is not None - - max_kernel_size = tuple(np.max(kernel_sizes, axis=0)) - max_num_channels = np.max(input_channels) - np.min(input_channels) - - with tf.variable_scope(scope or "selection_weights"): - if kernel_selection_weights is None: - kernel_selection_weights = create_selection_weights( - "kernels", - "softmax", (len(kernel_sizes),), - names=["kernel_h{}_w{}".format(h, w) for h, w in kernel_sizes], - **kernel_selection_weights_params) - - if channel_selection_weights is None: - channel_selection_weights = create_selection_weights( - "channels", - "softmax", (len(input_channels),), - names=["channels_{}_{}".format(c1, c2) for c1, c2 in input_channels], - **channel_selection_weights_params) - - if separability_selection_weights is None: - separability_selection_weights = create_selection_weights( - "separability", - "softmax", (len(separabilities),), - names=["separability_{}".format(s) for s in separabilities], - **separability_selection_weights_params) - - kernels = [] - for separability in separabilities: - with tf.variable_scope("separablity_{}".format(separability)): - kernel = make_subseparable_kernel(max_kernel_size, max_num_channels, - filters, separability, - kernel_initializer, kernel_regularizer) - - premultiplier = kernel_premultiplier( - max_kernel_size, kernel_sizes, input_channels, - kernel_selection_weights, channel_selection_weights) - - kernels.append(kernel * premultiplier) - - kernel = tf.add_n([ - separability_selection_weights.normalized[i] * k - for i, k in enumerate(kernels) - ]) - - if np.min(input_channels) != 0 or np.max(input_channels) != in_channels: - inputs = inputs[:, :, :, np.min(input_channels):np.max(input_channels)] - - return tf.nn.conv2d( - inputs, - filter=kernel, - strides=[1, 1, 1, 1], - padding="SAME", - data_format="NHWC", - name="conv2d") - - -def conv_module(kw, kh, sep, div): - - def convfn(x, hparams): - return common_layers.subseparable_conv( - x, - hparams.hidden_size // div, (kw, kh), - padding="SAME", - separability=sep, - name="conv_%d%d_sep%d_div%d" % (kw, kh, sep, div)) - - return convfn - - -def multi_conv_module(kernel_sizes, seps): - - def convfn(x, hparams): - return multi_subseparable_conv(x, hparams.hidden_size, kernel_sizes, - [(0, hparams.hidden_size)], seps) - - return convfn - - -def layernorm_module(x, hparams): - return common_layers.layer_norm(x, hparams.hidden_size, name="layer_norm") - - -def noamnorm_module(x, hparams): - del hparams # Unused. - return common_layers.noam_norm(x) - - -def identity_module(x, hparams): - del hparams # Unused. - return x - - -def first_binary_module(x, y, hparams): - del y, hparams # Unused. - return x - - -def second_binary_module(x, y, hparams): - del x, hparams # Unused. - return y - - -def sum_binary_module(x, y, hparams): - del hparams # Unused. - return x + y - - -def shakeshake_binary_module(x, y, hparams): - del hparams # Unused. - return common_layers.shakeshake2(x, y) - - -def run_binary_modules(modules, cur1, cur2, hparams): - """Run binary modules.""" - selection_weights = create_selection_weights( - "selection", - "softmax", - shape=[len(modules)], - inv_t=100.0 * common_layers.inverse_exp_decay( - hparams.anneal_until, min_value=0.01)) - all_res = [modules[n](cur1, cur2, hparams) for n in xrange(len(modules))] - all_res = tf.concat([tf.expand_dims(r, axis=0) for r in all_res], axis=0) - res = all_res * tf.reshape(selection_weights.normalized, [-1, 1, 1, 1, 1]) - return tf.reduce_sum(res, axis=0) - - -def run_unary_modules_basic(modules, cur, hparams): - """Run unary modules.""" - selection_weights = create_selection_weights( - "selection", - "softmax", - shape=[len(modules)], - inv_t=100.0 * common_layers.inverse_exp_decay( - hparams.anneal_until, min_value=0.01)) - all_res = [modules[n](cur, hparams) for n in xrange(len(modules))] - all_res = tf.concat([tf.expand_dims(r, axis=0) for r in all_res], axis=0) - res = all_res * tf.reshape(selection_weights.normalized, [-1, 1, 1, 1, 1]) - return tf.reduce_sum(res, axis=0) - - -def run_unary_modules_sample(modules, cur, hparams, k): - """Run modules, sampling k.""" - selection_weights = create_selection_weights( - "selection", ("softmax_topk", k), - shape=[len(modules)], - inv_t=100.0 * common_layers.inverse_exp_decay( - hparams.anneal_until, min_value=0.01)) - all_res = [ - tf.cond( - tf.less(selection_weights.normalized[n], 1e-6), - lambda: tf.zeros_like(cur), - lambda i=n: modules[i](cur, hparams)) for n in xrange(len(modules)) - ] - all_res = tf.concat([tf.expand_dims(r, axis=0) for r in all_res], axis=0) - res = all_res * tf.reshape(selection_weights.normalized, [-1, 1, 1, 1, 1]) - return tf.reduce_sum(res, axis=0) - - -def run_unary_modules(modules, cur, hparams): - if len(modules) < 8: - return run_unary_modules_basic(modules, cur, hparams) - return run_unary_modules_sample(modules, cur, hparams, 4) - - -def batch_deviation(x): - """Average deviation of the batch.""" - x_mean = tf.reduce_mean(x, axis=[0], keep_dims=True) - x_variance = tf.reduce_mean(tf.square(x - x_mean), axis=[0], keep_dims=True) - return tf.reduce_mean(tf.sqrt(x_variance)) - - -@registry.register_model -class BlueNet(t2t_model.T2TModel): - - def body(self, features): - hparams = self._hparams - # TODO(rshin): Give identity_module lower weight by default. - multi_conv = multi_conv_module( - kernel_sizes=[(3, 3), (5, 5), (7, 7)], seps=[0, 1]) - conv_modules = [multi_conv, identity_module] - activation_modules = [ - identity_module, lambda x, _: tf.nn.relu(x), lambda x, _: tf.nn.elu(x), - lambda x, _: tf.tanh(x) - ] - norm_modules = [identity_module, layernorm_module, noamnorm_module] - binary_modules = [ - first_binary_module, second_binary_module, sum_binary_module, - shakeshake_binary_module - ] - inputs = features["inputs"] - - def run_unary(x, name): - """A single step of unary modules.""" - x_shape = x.get_shape() - with tf.variable_scope(name): - with tf.variable_scope("norm"): - x = run_unary_modules(norm_modules, x, hparams) - x.set_shape(x_shape) - with tf.variable_scope("activation"): - x = run_unary_modules(activation_modules, x, hparams) - x.set_shape(x_shape) - with tf.variable_scope("conv"): - x = run_unary_modules(conv_modules, x, hparams) - x.set_shape(x_shape) - return tf.nn.dropout(x, 1.0 - hparams.dropout), batch_deviation(x) - - cur1, cur2, cur3, extra_loss = inputs, inputs, inputs, 0.0 - cur_shape = inputs.get_shape() - for i in xrange(hparams.num_hidden_layers): - with tf.variable_scope("layer_%d" % i): - cur1, loss1 = run_unary(cur1, "unary1") - cur2, loss2 = run_unary(cur2, "unary2") - cur3, loss3 = run_unary(cur2, "unary3") - extra_loss += (loss1 + loss2 + loss3) / float(hparams.num_hidden_layers) - with tf.variable_scope("binary1"): - next1 = run_binary_modules(binary_modules, cur1, cur2, hparams) - next1.set_shape(cur_shape) - with tf.variable_scope("binary2"): - next2 = run_binary_modules(binary_modules, cur1, cur3, hparams) - next2.set_shape(cur_shape) - with tf.variable_scope("binary3"): - next3 = run_binary_modules(binary_modules, cur2, cur3, hparams) - next3.set_shape(cur_shape) - cur1, cur2, cur3 = next1, next2, next3 - - anneal = common_layers.inverse_exp_decay(hparams.anneal_until) - extra_loss *= hparams.batch_deviation_loss_factor * anneal - return cur1, extra_loss - - -@registry.register_hparams -def bluenet_base(): - """Set of hyperparameters.""" - hparams = common_hparams.basic_params1() - hparams.batch_size = 4096 - hparams.hidden_size = 256 - hparams.dropout = 0.2 - hparams.symbol_dropout = 0.5 - hparams.label_smoothing = 0.1 - hparams.clip_grad_norm = 2.0 - hparams.num_hidden_layers = 8 - hparams.kernel_height = 3 - hparams.kernel_width = 3 - hparams.learning_rate_decay_scheme = "exp10k" - hparams.learning_rate = 0.05 - hparams.learning_rate_warmup_steps = 3000 - hparams.initializer_gain = 1.0 - hparams.weight_decay = 3.0 - hparams.num_sampled_classes = 0 - hparams.sampling_method = "argmax" - hparams.optimizer_adam_epsilon = 1e-6 - hparams.optimizer_adam_beta1 = 0.85 - hparams.optimizer_adam_beta2 = 0.997 - hparams.add_hparam("anneal_until", 40000) - hparams.add_hparam("batch_deviation_loss_factor", 5.0) - return hparams - - -@registry.register_hparams -def bluenet_tiny(): - hparams = bluenet_base() - hparams.batch_size = 1024 - hparams.hidden_size = 128 - hparams.num_hidden_layers = 4 - hparams.learning_rate_decay_scheme = "none" - return hparams diff --git a/tensor2tensor/models/bluenet_test.py b/tensor2tensor/models/bluenet_test.py deleted file mode 100644 index 15f1f46e6..000000000 --- a/tensor2tensor/models/bluenet_test.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# Copyright 2017 The Tensor2Tensor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""BlueNet tests.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import numpy as np - -from tensor2tensor.data_generators import problem_hparams -from tensor2tensor.models import bluenet - -import tensorflow as tf - - -class BlueNetTest(tf.test.TestCase): - - def testBlueNet(self): - vocab_size = 9 - x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1)) - y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 1, 1, 1)) - hparams = bluenet.bluenet_tiny() - p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size) - with self.test_session() as session: - tf.train.get_or_create_global_step() - features = { - "inputs": tf.constant(x, dtype=tf.int32), - "targets": tf.constant(y, dtype=tf.int32), - } - model = bluenet.BlueNet( - hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) - logits, _ = model(features) - session.run(tf.global_variables_initializer()) - res = session.run(logits) - self.assertEqual(res.shape, (3, 5, 1, 1, vocab_size)) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/models/bytenet.py b/tensor2tensor/models/bytenet.py index ceefd54b5..d65580f1f 100644 --- a/tensor2tensor/models/bytenet.py +++ b/tensor2tensor/models/bytenet.py @@ -98,7 +98,7 @@ def bytenet_base(): hparams.num_hidden_layers = 4 hparams.kernel_height = 3 hparams.kernel_width = 1 - hparams.learning_rate_decay_scheme = "exp50k" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 diff --git a/tensor2tensor/models/cycle_gan.py b/tensor2tensor/models/cycle_gan.py index 2867be214..0efeaeae5 100644 --- a/tensor2tensor/models/cycle_gan.py +++ b/tensor2tensor/models/cycle_gan.py @@ -29,38 +29,40 @@ import tensorflow as tf - def discriminator(x, compress, hparams, name, reuse=None): with tf.variable_scope(name, reuse=reuse): - x = tf.stop_gradient(2 * x) - x # Reverse gradient. + x = tf.stop_gradient(2 * x) - x # Reverse gradient. if compress: x = transformer_vae.compress(x, None, False, hparams, "compress") else: - x = transformer_vae.residual_conv(x, 1, 3,hparams, "compress_rc") + x = transformer_vae.residual_conv(x, 1, 3, hparams, "compress_rc") y = tf.reduce_mean(x, axis=1) return tf.tanh(tf.layers.dense(y, 1, name="reduce")) + def generator(x, hparams, name, reuse=False): with tf.variable_scope(name, reuse=reuse): - return transformer_vae.residual_conv(x, 1, 3, hparams,"generator") + return transformer_vae.residual_conv(x, 1, 3, hparams, "generator") + - -def loss(real_input, fake_input, compress, hparams, lsgan, name): +def lossfn(real_input, fake_input, compress, hparams, lsgan, name): eps = 1e-12 with tf.variable_scope(name): d1 = discriminator(real_input, compress, hparams, "discriminator") - d2 = discriminator(fake_input, compress, hparams, "discriminator", reuse=True) + d2 = discriminator(fake_input, compress, hparams, "discriminator", + reuse=True) if lsgan: - dloss = tf.reduce_mean(tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2)) - gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9)) - loss = (dloss + gloss)/2 - else: #cross_entropy - dloss = -tf.reduce_mean(tf.log(d1 + eps)) - tf.reduce_mean(tf.log(1 - d2 + eps)) - gloss = -tf.reduce_mean(tf.log(d2 + eps)) - loss = (dloss + gloss)/2 + dloss = tf.reduce_mean( + tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2)) + gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9)) + loss = (dloss + gloss)/2 + else: # cross_entropy + dloss = -tf.reduce_mean( + tf.log(d1 + eps)) - tf.reduce_mean(tf.log(1 - d2 + eps)) + gloss = -tf.reduce_mean(tf.log(d2 + eps)) + loss = (dloss + gloss)/2 return loss - def split_on_batch(x): batch_size = tf.shape(x)[0] @@ -78,38 +80,37 @@ def cycle_gan_internal(inputs, targets, _, hparams): targets = common_layers.embedding( targets_orig, hparams.vocab_size, hparams.hidden_size, "embed", reuse=True) - - X, _ = split_on_batch(inputs) - _, Y = split_on_batch(targets) - X_unembeded, _ = split_on_batch(inputs_orig) - _, Y_unembeded = split_on_batch(targets_orig) - + x, _ = split_on_batch(inputs) + _, y = split_on_batch(targets) # Y --> X - Y_fake = generator(Y, hparams, 'Fy', reuse=False) - YtoXloss = loss(X, Y_fake, True, hparams, True, "YtoX") - + y_fake = generator(y, hparams, "Fy", reuse=False) + y_to_x_loss = lossfn(y, y_fake, True, hparams, True, "YtoX") + # X --> Y - X_fake = generator(X, hparams, 'Gx', reuse=False) - XtoYloss = loss(Y, X_fake, True, hparams, True, "XtoY") - + x_fake = generator(x, hparams, "Gx", reuse=False) + x_to_y_loss = lossfn(y, x_fake, True, hparams, True, "XtoY") + # Cycle-Consistency - Y_fake_ = generator(Y_fake, hparams, 'Gx', reuse=True) - X_fake_ = generator(X_fake, hparams, 'Fy', reuse=True) - XtoXloss = hparams.cycle_loss_multiplier1 * tf.reduce_mean(tf.abs(X_fake_ - X)) - YtoYloss = hparams.cycle_loss_multiplier2 * tf.reduce_mean(tf.abs(Y_fake_ - Y)) - cycloss = XtoXloss + YtoYloss - - - sample_generated = generator(inputs, hparams, 'Gx', reuse=True) - sample_generated = tf.layers.dense(sample_generated, hparams.vocab_size, name="softmax", reuse=None) - sample_generated = tf.stop_gradient(tf.expand_dims(sample_generated, axis=2)) - - losses = {"cycloss": cycloss, - "YtoXloss": YtoXloss, - "XtoYloss": XtoYloss} - + y_fake_ = generator(y_fake, hparams, "Gx", reuse=True) + x_fake_ = generator(x_fake, hparams, "Fy", reuse=True) + x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean( + tf.abs(x_fake_ - x)) + y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean( + tf.abs(y_fake_ - y)) + cycloss = x_to_x_loss + y_to_y_loss + + sample_generated = generator(inputs, hparams, "Gx", reuse=True) + sample_generated = tf.layers.dense( + sample_generated, hparams.vocab_size, name="softmax", reuse=None) + sample_generated = tf.stop_gradient( + tf.expand_dims(sample_generated, axis=2)) + + losses = {"cycloss": cycloss, + "y_to_x_loss": y_to_x_loss, + "x_to_y_loss": x_to_y_loss} + return sample_generated, losses @@ -132,15 +133,8 @@ def cycle_gan_small(): hparams.weight_decay = 3.0 hparams.learning_rate = 0.05 hparams.kl_warmup_steps = 5000 - #hparams.hidden_size = 8 hparams.learning_rate_warmup_steps = 3000 hparams.add_hparam("vocab_size", 66) # Vocabulary size, need to set here. hparams.add_hparam("cycle_loss_multiplier1", 10.0) hparams.add_hparam("cycle_loss_multiplier2", 10.0) return hparams - -# line 43 - 80 -82 are changed : residual network config -#line 42 is changed - compress function - - - diff --git a/tensor2tensor/models/lstm.py b/tensor2tensor/models/lstm.py index 8a0b5a41f..9348cdc2c 100644 --- a/tensor2tensor/models/lstm.py +++ b/tensor2tensor/models/lstm.py @@ -214,3 +214,18 @@ def lstm_luong_attention_multi(): hparams = lstm_luong_attention() hparams.num_heads = 4 return hparams + + +@registry.register_hparams +def lstm_asr_v1(): + """Basic LSTM Params.""" + hparams = lstm_bahdanau_attention() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 256 + hparams.batch_size = 36 + hparams.max_input_seq_length = 600000 + hparams.max_target_seq_length = 350 + hparams.max_length = hparams.max_input_seq_length + hparams.min_length_bucket = hparams.max_input_seq_length // 2 + hparams.learning_rate = 0.05 + return hparams diff --git a/tensor2tensor/models/lstm_test.py b/tensor2tensor/models/lstm_test.py index 863518fa1..f9e472af9 100644 --- a/tensor2tensor/models/lstm_test.py +++ b/tensor2tensor/models/lstm_test.py @@ -57,7 +57,7 @@ def testLSTMSeq2SeqAttention(self): p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size) x = tf.constant(x, dtype=tf.int32) - x._shape = tf.TensorShape([None, None, 1, 1]) + x = tf.placeholder_with_default(x, shape=[None, None, 1, 1]) with self.test_session() as session: features = { diff --git a/tensor2tensor/models/multimodel_test.py b/tensor2tensor/models/multimodel_test.py index 86f92ced6..6f490b280 100644 --- a/tensor2tensor/models/multimodel_test.py +++ b/tensor2tensor/models/multimodel_test.py @@ -23,7 +23,7 @@ import numpy as np -from tensor2tensor.data_generators import image # pylint: disable=unused-import +from tensor2tensor.data_generators import cifar # pylint: disable=unused-import from tensor2tensor.models import multimodel from tensor2tensor.utils import registry diff --git a/tensor2tensor/models/neural_gpu.py b/tensor2tensor/models/neural_gpu.py index 681423190..fe1519344 100644 --- a/tensor2tensor/models/neural_gpu.py +++ b/tensor2tensor/models/neural_gpu.py @@ -111,7 +111,7 @@ def neural_gpu(): hparams.num_hidden_layers = 1 hparams.kernel_height = 3 hparams.kernel_width = 1 - hparams.learning_rate_decay_scheme = "exp50k" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.02 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 diff --git a/tensor2tensor/models/resnet.py b/tensor2tensor/models/resnet.py index 5858c364b..44c88dc8b 100644 --- a/tensor2tensor/models/resnet.py +++ b/tensor2tensor/models/resnet.py @@ -14,7 +14,7 @@ # limitations under the License. """Resnets.""" -# Copied from cloud_tpu/models/resnet_garden and modified +# Copied from cloud_tpu/models/resnet/resnet_model.py and modified from __future__ import absolute_import from __future__ import division @@ -28,225 +28,479 @@ import tensorflow as tf -# TODO(rsepassi): make hparams -_BATCH_NORM_DECAY = 0.997 -_BATCH_NORM_EPSILON = 1e-5 +BATCH_NORM_DECAY = 0.9 +BATCH_NORM_EPSILON = 1e-5 -def bottleneck_block(inputs, filters, is_training, projection_shortcut, strides, - data_format): - """Bottleneck block variant for residual networks with BN before convolutions. +def batch_norm_relu(inputs, + is_training, + relu=True, + init_zero=False, + data_format="channels_first"): + """Performs a batch normalization followed by a ReLU. Args: - inputs: A tensor of size [batch, channels, height, width]. - filters: The number of filters for the first two convolutions. Note that the - third and final convolution will use 4 times as many filters. - is_training: A Boolean for whether the model is in training or inference - mode. Needed for batch normalization. - projection_shortcut: The function to use for projection shortcuts (typically - a 1x1 convolution when downsampling the input). - strides: The block's stride. If greater than 1, this block will ultimately - downsample the input. - data_format: channels_{first, last} + inputs: `Tensor` of shape `[batch, channels, ...]`. + is_training: `bool` for whether the model is training. + relu: `bool` if False, omits the ReLU operation. + init_zero: `bool` if True, initializes scale parameter of batch + normalization with 0 instead of 1 (default). + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. Returns: - The output tensor of the block. + A normalized `Tensor` with the same `data_format`. """ + if init_zero: + gamma_initializer = tf.zeros_initializer() + else: + gamma_initializer = tf.ones_initializer() + + if data_format == "channels_first": + axis = 1 + else: + axis = 3 + + inputs = tf.layers.batch_normalization( + inputs=inputs, + axis=axis, + momentum=BATCH_NORM_DECAY, + epsilon=BATCH_NORM_EPSILON, + center=True, + scale=True, + training=is_training, + fused=True, + gamma_initializer=gamma_initializer) + + if relu: + inputs = tf.nn.relu(inputs) + return inputs + + +def fixed_padding(inputs, kernel_size, data_format="channels_first"): + """Pads the input along the spatial dimensions independently of input size. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]` or + `[batch, height, width, channels]` depending on `data_format`. + kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d` + operations. Should be a positive integer. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + + Returns: + A padded `Tensor` of the same `data_format` with size either intact + (if `kernel_size == 1`) or padded (if `kernel_size > 1`). + """ + pad_total = kernel_size - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + if data_format == "channels_first": + padded_inputs = tf.pad( + inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) + else: + padded_inputs = tf.pad( + inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + + return padded_inputs + + +def conv2d_fixed_padding(inputs, + filters, + kernel_size, + strides, + data_format="channels_first"): + """Strided 2-D convolution with explicit padding. + + The padding is consistent and is based only on `kernel_size`, not on the + dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). + + Args: + inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. + filters: `int` number of filters in the convolution. + kernel_size: `int` size of the kernel to be used in the convolution. + strides: `int` strides of the convolution. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + + Returns: + A `Tensor` of shape `[batch, filters, height_out, width_out]`. + """ + if strides > 1: + inputs = fixed_padding(inputs, kernel_size, data_format=data_format) + + return tf.layers.conv2d( + inputs=inputs, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=("SAME" if strides == 1 else "VALID"), + use_bias=False, + kernel_initializer=tf.variance_scaling_initializer(), + data_format=data_format) + + +def residual_block(inputs, + filters, + is_training, + projection_shortcut, + strides, + final_block, + data_format="channels_first"): + """Standard building block for residual networks with BN before convolutions. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first two convolutions. Note that + the third and final convolution will use 4 times as many filters. + is_training: `bool` for whether the model is in training. + projection_shortcut: `function` to use for projection shortcuts (typically + a 1x1 convolution to match the filter dimensions). If None, no + projection is used and the input is passed as unchanged through the + shortcut connection. + strides: `int` block stride. If greater than 1, this block will ultimately + downsample the input. + final_block: unused parameter to keep the same function signature as + `bottleneck_block`. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + + Returns: + The output `Tensor` of the block. + """ + del final_block shortcut = inputs - out = inputs - out = batch_norm_relu(out, is_training, data_format) + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) - # The projection shortcut should come after the first batch norm and ReLU - # since it performs a 1x1 convolution. if projection_shortcut is not None: - shortcut = projection_shortcut(out) - - do_bn_relus = [False, True, True] - kernel_sizes = [1, 3, 1] - layer_strides = [1, strides, 1] - filter_sizes = [filters, filters, 4 * filters] - - for do_bn_relu, kernel_size, layer_stride, filter_size in zip( - do_bn_relus, kernel_sizes, layer_strides, filter_sizes): - if do_bn_relu: - out = batch_norm_relu(out, is_training, data_format) - out = conv2d_fixed_padding( - inputs=out, - filters=filter_size, - kernel_size=kernel_size, - strides=layer_stride, - data_format=data_format) + shortcut = projection_shortcut(inputs) - return out + shortcut + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=3, + strides=strides, + data_format=data_format) + + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=3, + strides=1, + data_format=data_format) + + return inputs + shortcut -def batch_norm_relu(inputs, is_training, data_format): - """Performs a batch normalization followed by a ReLU.""" - # We set fused=True for a significant performance boost. - out = tf.layers.batch_normalization( +def bottleneck_block(inputs, + filters, + is_training, + projection_shortcut, + strides, + final_block, + data_format="channels_first"): + """Bottleneck block variant for residual networks with BN after convolutions. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first two convolutions. Note that + the third and final convolution will use 4 times as many filters. + is_training: `bool` for whether the model is in training. + projection_shortcut: `function` to use for projection shortcuts (typically + a 1x1 convolution to match the filter dimensions). If None, no + projection is used and the input is passed as unchanged through the + shortcut connection. + strides: `int` block stride. If greater than 1, this block will ultimately + downsample the input. + final_block: `bool` set to True if it is this the final block in the group. + This is changes the behavior of batch normalization initialization for + the final batch norm in a block. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + + Returns: + The output `Tensor` of the block. + """ + # TODO(chrisying): this block is technically the post-activation resnet-v1 + # bottlneck unit. Test with v2 (pre-activation) and replace if there is no + # difference for consistency. + shortcut = inputs + if projection_shortcut is not None: + shortcut = projection_shortcut(inputs) + + inputs = conv2d_fixed_padding( inputs=inputs, - axis=1 if data_format == "channels_first" else 3, - momentum=_BATCH_NORM_DECAY, - epsilon=_BATCH_NORM_EPSILON, - center=True, - scale=True, - training=is_training, - fused=True) - out = tf.nn.relu(out) - return out + filters=filters, + kernel_size=1, + strides=1, + data_format=data_format) + + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=3, + strides=strides, + data_format=data_format) + + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=4 * filters, + kernel_size=1, + strides=1, + data_format=data_format) + inputs = batch_norm_relu( + inputs, + is_training, + relu=False, + init_zero=final_block, + data_format=data_format) + return tf.nn.relu(inputs + shortcut) -def block_layer(inputs, filters, block_fn, blocks, strides, is_training, - data_format, name): + +def block_layer(inputs, + filters, + block_fn, + blocks, + strides, + is_training, + name, + data_format="channels_first"): """Creates one layer of blocks for the ResNet model. Args: - inputs: A tensor of size [batch, channels, height, width]. - filters: The number of filters for the first convolution of the layer. - block_fn: The block to use within the model, either `building_block` or - `bottleneck_block`. - blocks: The number of blocks contained in the layer. - strides: The stride to use for the first convolution of the layer. If - greater than 1, this layer will ultimately downsample the input. - is_training: Either True or False, whether we are currently training the - model. Needed for batch norm. - data_format: channels_{first, last} - name: A string name for the tensor output of the block layer. + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first convolution of the layer. + block_fn: `function` for the block to use within the model + blocks: `int` number of blocks contained in the layer. + strides: `int` stride to use for the first convolution of the layer. If + greater than 1, this layer will downsample the input. + is_training: `bool` for whether the model is training. + name: `str`name for the Tensor output of the block layer. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. Returns: - The output tensor of the block layer. + The output `Tensor` of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = 4 * filters if block_fn is bottleneck_block else filters def projection_shortcut(inputs): - return conv2d_fixed_padding( + inputs = conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format) + return batch_norm_relu( + inputs, is_training, relu=False, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn(inputs, filters, is_training, projection_shortcut, strides, - data_format) + False, data_format) - for _ in range(1, blocks): - inputs = block_fn(inputs, filters, is_training, None, 1, data_format) + for i in range(1, blocks): + inputs = block_fn(inputs, filters, is_training, None, 1, (i + 1 == blocks), + data_format) return tf.identity(inputs, name) -def fixed_padding(inputs, kernel_size, data_format): - """Pads the input along the spatial dimensions independently of input size. +def resnet_v2(inputs, + block_fn, + layers, + data_format="channels_first", + is_training=False): + """Resnet model. Args: - inputs: A 4D tensor layed out according to data_format - kernel_size: The kernel to be used in the conv2d or max_pool2d operation. - Should be a positive integer. - data_format: channels_{first, last} + inputs: `Tensor` images. + block_fn: `function` for the block to use within the model. Either + `residual_block` or `bottleneck_block`. + layers: list of 4 `int`s denoting the number of blocks to include in each + of the 4 block groups. Each group consists of blocks that take inputs of + the same resolution. + data_format: `str`, "channels_first" `[batch, channels, height, + width]` or "channels_last" `[batch, height, width, channels]`. + is_training: bool, build in training mode or not. Returns: - A tensor of size [batch, channels, height_out, width_out] with the - input either intact (if kernel_size == 1) or padded (if kernel_size > 1). + Pre-logit activations. """ - pad_total = kernel_size - 1 - pad_beg = pad_total // 2 - pad_end = pad_total - pad_beg - spatial_pads = [[pad_beg, pad_end], [pad_beg, pad_end]] - if data_format == "channels_first": - pads = [[0, 0], [0, 0]] + spatial_pads - else: - assert data_format == "channels_last" - pads = [[0, 0]] + spatial_pads + [[0, 0]] - padded_inputs = tf.pad(inputs, pads) - return padded_inputs - + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=64, + kernel_size=7, + strides=2, + data_format=data_format) + inputs = tf.identity(inputs, "initial_conv") + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) -def conv2d_fixed_padding(**kwargs): - """conv2d with fixed_padding, based only on kernel_size.""" - strides = kwargs["strides"] - if strides > 1: - kwargs["inputs"] = fixed_padding(kwargs["inputs"], kwargs["kernel_size"], - kwargs["data_format"]) - - defaults = { - "padding": ("SAME" if strides == 1 else "VALID"), - "use_bias": False, - "kernel_initializer": tf.variance_scaling_initializer(), - } - defaults.update(kwargs) - - return tf.layers.conv2d(**defaults) - - -def resnet50(inputs, hparams): - """Resnet50.""" - is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN - block_fn = bottleneck_block - - out = inputs - data_format = "channels_first" if hparams.use_nchw else "channels_last" - if hparams.use_nchw: - # Convert from channels_last (NHWC) to channels_first (NCHW). This provides - # a large performance boost on GPU. - out = tf.transpose(inputs, [0, 3, 1, 2]) - - out = conv2d_fixed_padding( - inputs=out, filters=64, kernel_size=7, strides=2, data_format=data_format) - out = tf.identity(out, "initial_conv") - out = tf.layers.max_pooling2d( - inputs=out, + inputs = tf.layers.max_pooling2d( + inputs=inputs, pool_size=3, strides=2, padding="SAME", data_format=data_format) - out = tf.identity(out, "initial_max_pool") - - for i, (num_filters, stride, block_size) in enumerate( - zip(hparams.num_filters, hparams.strides, hparams.layer_sizes)): - out = block_layer( - inputs=out, - filters=num_filters, - block_fn=block_fn, - blocks=block_size, - strides=stride, - is_training=is_training, - data_format=data_format, - name="block_layer_%d" % i) - - out = batch_norm_relu(out, is_training, data_format) - out = tf.layers.average_pooling2d( - inputs=out, + inputs = tf.identity(inputs, "initial_max_pool") + + inputs = block_layer( + inputs=inputs, + filters=64, + block_fn=block_fn, + blocks=layers[0], + strides=1, + is_training=is_training, + name="block_layer1", + data_format=data_format) + inputs = block_layer( + inputs=inputs, + filters=128, + block_fn=block_fn, + blocks=layers[1], + strides=2, + is_training=is_training, + name="block_layer2", + data_format=data_format) + inputs = block_layer( + inputs=inputs, + filters=256, + block_fn=block_fn, + blocks=layers[2], + strides=2, + is_training=is_training, + name="block_layer3", + data_format=data_format) + inputs = block_layer( + inputs=inputs, + filters=512, + block_fn=block_fn, + blocks=layers[3], + strides=2, + is_training=is_training, + name="block_layer4", + data_format=data_format) + + inputs = tf.layers.average_pooling2d( + inputs=inputs, pool_size=7, strides=1, padding="VALID", data_format=data_format) - out = tf.identity(out, "final_avg_pool") - - if hparams.use_nchw: - # Back to NHWC - out = tf.transpose(out, [0, 2, 3, 1]) - return out + inputs = tf.identity(inputs, "final_avg_pool") + inputs = tf.reshape(inputs, + [-1, 2048 if block_fn is bottleneck_block else 512]) + return inputs @registry.register_model -class Resnet50(t2t_model.T2TModel): +class Resnet(t2t_model.T2TModel): def body(self, features): - return resnet50(features["inputs"], self.hparams) + hp = self.hparams + block_fns = { + "residual": residual_block, + "bottleneck": bottleneck_block, + } + assert hp.block_fn in block_fns + + inputs = features["inputs"] + + data_format = "channels_last" + if hp.use_nchw: + # Convert from channels_last (NHWC) to channels_first (NCHW). This + # provides a large performance boost on GPU. + inputs = tf.transpose(inputs, [0, 3, 1, 2]) + data_format = "channels_first" + + out = resnet_v2( + inputs, + block_fns[hp.block_fn], + hp.layer_sizes, + data_format, + is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) + + out = tf.expand_dims(out, 1) + out = tf.expand_dims(out, 1) + return out -@registry.register_hparams def resnet_base(): """Set of hyperparameters.""" + # For imagenet on TPU: + # Set train_steps=120000 + # Set eval_steps=48 + + # Base hparams = common_hparams.basic_params1() + + # Model-specific parameters hparams.add_hparam("layer_sizes", [3, 4, 6, 3]) + hparams.add_hparam("block_fn", "bottleneck") hparams.add_hparam("use_nchw", True) - hparams.add_hparam("num_filters", [64, 128, 256, 512]) - hparams.add_hparam("strides", [1, 2, 2, 2]) - # Can run with a batch size of 128 with Problem ImageImagenet224 + # Variable init + hparams.initializer = "normal_unit_scaling" + hparams.initializer_gain = 2. + + # Optimization + hparams.optimizer = "Momentum" + hparams.optimizer_momentum_momentum = 0.9 + hparams.optimizer_momentum_nesterov = True + hparams.weight_decay = 1e-4 + hparams.clip_grad_norm = 0.0 + # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) + hparams.learning_rate = 0.4 + hparams.learning_rate_decay_scheme = "cosine" + # For image_imagenet224, 120k training steps, which effectively makes this a + # cosine decay (i.e. no cycles). + hparams.learning_rate_cosine_cycle_steps = 120000 + hparams.batch_size = 128 - hparams.tpu_batch_size_per_shard = 128 return hparams + + +@registry.register_hparams +def resnet_50(): + hp = resnet_base() + return hp + + +@registry.register_hparams +def resnet_18(): + hp = resnet_base() + hp.block_fn = "residual" + hp.layer_sizes = [2, 2, 2, 2] + return hp + + +@registry.register_hparams +def resnet_34(): + hp = resnet_base() + hp.block_fn = "residual" + return hp + + +@registry.register_hparams +def resnet_101(): + hp = resnet_base() + hp.layer_sizes = [3, 4, 23, 3] + return hp + + +@registry.register_hparams +def resnet_152(): + hp = resnet_base() + hp.layer_sizes = [3, 8, 36, 3] + return hp + + +@registry.register_hparams +def resnet_200(): + hp = resnet_base() + hp.layer_sizes = [3, 24, 36, 3] + return hp diff --git a/tensor2tensor/models/resnet_test.py b/tensor2tensor/models/resnet_test.py index d911dcbd7..ddcb1627b 100644 --- a/tensor2tensor/models/resnet_test.py +++ b/tensor2tensor/models/resnet_test.py @@ -33,7 +33,6 @@ def resnet_tiny_cpu(): hparams = resnet.resnet_base() hparams.layer_sizes = [2, 2, 2, 2] - hparams.num_filters = [10, 20, 30, 40] hparams.use_nchw = False return hparams @@ -55,14 +54,14 @@ def _testResnet(self, img_size, output_size): "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), } - model = resnet.Resnet50(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) + model = resnet.Resnet(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams) logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (batch_size,) + output_size + (1, vocab_size)) def testResnetLarge(self): - self._testResnet(img_size=299, output_size=(4, 4)) + self._testResnet(img_size=224, output_size=(1, 1)) if __name__ == "__main__": diff --git a/tensor2tensor/models/revnet.py b/tensor2tensor/models/revnet.py index 23e87d1b4..28b4cf681 100644 --- a/tensor2tensor/models/revnet.py +++ b/tensor2tensor/models/revnet.py @@ -277,7 +277,7 @@ def final_block(x1, x2, dim='2d', training=True, scope='final_block'): # Global average pooling net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'], - name='final_pool', keep_dims=True) + name='final_pool', keepdims=True) return net @@ -353,17 +353,25 @@ def revnet_base(): hparams.add_hparam('num_channels_init_block', 64) hparams.add_hparam('dim', '2d') - hparams.optimizer = 'Momentum' - hparams.learning_rate = 0.4 - - hparams.learning_rate_boundaries = [40000, 80000, 120000, 140000] - hparams.learning_rate_multiples = [0.1, 0.01, 0.001, 0.0002] - hparams.learning_rate_decay_scheme = 'piecewise' + # Variable init + hparams.initializer = 'normal_unit_scaling' + hparams.initializer_gain = 2. + # Optimization + hparams.optimizer = 'Momentum' + hparams.optimizer_momentum_momentum = 0.9 + hparams.optimizer_momentum_nesterov = True hparams.weight_decay = 1e-4 + hparams.clip_grad_norm = 0.0 + # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) + hparams.learning_rate = 0.4 + hparams.learning_rate_decay_scheme = 'cosine' + # For image_imagenet224, 120k training steps, which effectively makes this a + # cosine decay (i.e. no cycles). + hparams.learning_rate_cosine_cycle_steps = 120000 # Can run with a batch size of 128 with Problem ImageImagenet224 - hparams.tpu_batch_size_per_shard = 128 + hparams.batch_size = 128 return hparams @@ -381,7 +389,7 @@ def revnet_cifar_base(): hparams.init_kernel_size = 3 hparams.init_maxpool = False hparams.strides = [1, 2, 2] - hparams.tpu_batch_size_per_shard = 128 + hparams.batch_size = 128 hparams.weight_decay = 5e-3 hparams.learning_rate = 0.1 diff --git a/tensor2tensor/models/shake_shake.py b/tensor2tensor/models/shake_shake.py index d1745bff8..5e1680edb 100644 --- a/tensor2tensor/models/shake_shake.py +++ b/tensor2tensor/models/shake_shake.py @@ -21,8 +21,6 @@ # Dependency imports -from six.moves import xrange # pylint: disable=redefined-builtin - from tensor2tensor.layers import common_hparams from tensor2tensor.layers import common_layers from tensor2tensor.utils import registry @@ -31,61 +29,100 @@ import tensorflow as tf -def shake_shake_block_branch(x, conv_filters, stride): +def shake_shake_skip_connection(x, output_filters, stride, is_training): + """Adds a residual connection to the filter x for the shake-shake model.""" + curr_filters = common_layers.shape_list(x)[-1] + if curr_filters == output_filters: + return x + stride_spec = [1, stride, stride, 1] + # Skip path 1. + path1 = tf.nn.avg_pool(x, [1, 1, 1, 1], stride_spec, "VALID") + path1 = tf.layers.conv2d(path1, int(output_filters / 2), (1, 1), + padding="SAME", name="path1_conv") + + # Skip path 2. + pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] # First pad with 0's then crop. + path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :] + path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, "VALID") + path2 = tf.layers.conv2d(path2, int(output_filters / 2), (1, 1), + padding="SAME", name="path2_conv") + + # Concat and apply BN. + final_path = tf.concat(values=[path1, path2], axis=-1) + final_path = tf.layers.batch_normalization( + final_path, training=is_training, name="final_path_bn") + return final_path + + +def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward, + hparams): + """Building a 2 branching convnet.""" + is_training = hparams.mode == tf.contrib.learn.ModeKeys.TRAIN x = tf.nn.relu(x) - x = tf.layers.conv2d( - x, conv_filters, (3, 3), strides=(stride, stride), padding="SAME") - x = tf.layers.batch_normalization(x) + x = tf.layers.conv2d(x, output_filters, (3, 3), strides=(stride, stride), + padding="SAME", name="conv1") + x = tf.layers.batch_normalization(x, training=is_training, name="bn1") x = tf.nn.relu(x) - x = tf.layers.conv2d(x, conv_filters, (3, 3), strides=(1, 1), padding="SAME") - x = tf.layers.batch_normalization(x) + x = tf.layers.conv2d(x, output_filters, (3, 3), padding="SAME", name="conv2") + x = tf.layers.batch_normalization(x, training=is_training, name="bn2") + if is_training: + x = x * rand_backward + tf.stop_gradient(x * rand_forward - + x * rand_backward) + else: + x *= 1.0 / hparams.shake_shake_num_branches return x -def downsampling_residual_branch(x, conv_filters): - x = tf.nn.relu(x) - x1 = tf.layers.average_pooling2d(x, pool_size=(1, 1), strides=(2, 2)) - x1 = tf.layers.conv2d(x1, conv_filters / 2, (1, 1), padding="SAME") - x2 = tf.pad(x[:, 1:, 1:], [[0, 0], [0, 1], [0, 1], [0, 0]]) - x2 = tf.layers.average_pooling2d(x2, pool_size=(1, 1), strides=(2, 2)) - x2 = tf.layers.conv2d(x2, conv_filters / 2, (1, 1), padding="SAME") - return tf.concat([x1, x2], axis=3) - - -def shake_shake_block(x, conv_filters, stride, hparams): - """A shake-shake block.""" - with tf.variable_scope("branch_1"): - branch1 = shake_shake_block_branch(x, conv_filters, stride) - with tf.variable_scope("branch_2"): - branch2 = shake_shake_block_branch(x, conv_filters, stride) - if x.shape[-1] == conv_filters: - skip = tf.identity(x) - else: - skip = downsampling_residual_branch(x, conv_filters) - - # TODO(rshin): Use different alpha for each image in batch. - if hparams.mode == tf.estimator.ModeKeys.TRAIN: - if hparams.shakeshake_type == "batch": - shaken = common_layers.shakeshake2(branch1, branch2) - elif hparams.shakeshake_type == "image": - shaken = common_layers.shakeshake2_indiv(branch1, branch2) - elif hparams.shakeshake_type == "equal": - shaken = common_layers.shakeshake2_py(branch1, branch2, equal=True) - else: - raise ValueError("Invalid shakeshake_type: {!r}".format(shaken)) +def shake_shake_block(x, output_filters, stride, hparams): + """Builds a full shake-shake sub layer.""" + is_training = hparams.mode == tf.contrib.learn.ModeKeys.TRAIN + batch_size = common_layers.shape_list(x)[0] + + # Generate random numbers for scaling the branches. + rand_forward = [ + tf.random_uniform( + [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32) + for _ in range(hparams.shake_shake_num_branches) + ] + rand_backward = [ + tf.random_uniform( + [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32) + for _ in range(hparams.shake_shake_num_branches) + ] + # Normalize so that all sum to 1. + total_forward = tf.add_n(rand_forward) + total_backward = tf.add_n(rand_backward) + rand_forward = [samp / total_forward for samp in rand_forward] + rand_backward = [samp / total_backward for samp in rand_backward] + zipped_rand = zip(rand_forward, rand_backward) + + branches = [] + for branch, (r_forward, r_backward) in enumerate(zipped_rand): + with tf.variable_scope("branch_{}".format(branch)): + b = shake_shake_branch(x, output_filters, stride, r_forward, r_backward, + hparams) + b = tf.nn.dropout(b, 1.0 - hparams.layer_prepostprocess_dropout) + branches.append(b) + res = shake_shake_skip_connection(x, output_filters, stride, is_training) + if hparams.shake_shake_concat: + concat_values = [res] + branches + concat_output = tf.concat(values=concat_values, axis=-1) + concat_output = tf.nn.relu(concat_output) + concat_output = tf.layers.conv2d( + concat_output, output_filters, (1, 1), name="concat_1x1") + concat_output = tf.layers.batch_normalization( + concat_output, training=is_training, name="concat_bn") + return concat_output else: - shaken = common_layers.shakeshake2_py(branch1, branch2, equal=True) - shaken.set_shape(branch1.get_shape()) - - return skip + shaken + return res + tf.add_n(branches) -def shake_shake_stage(x, num_blocks, conv_filters, initial_stride, hparams): - with tf.variable_scope("block_0"): - x = shake_shake_block(x, conv_filters, initial_stride, hparams) - for i in xrange(1, num_blocks): - with tf.variable_scope("block_{}".format(i)): - x = shake_shake_block(x, conv_filters, 1, hparams) +def shake_shake_layer(x, output_filters, num_blocks, stride, hparams): + """Builds many sub layers into one full layer.""" + for block_num in range(num_blocks): + curr_stride = stride if (block_num == 0) else 1 + with tf.variable_scope("layer_{}".format(block_num)): + x = shake_shake_block(x, output_filters, curr_stride, hparams) return x @@ -100,60 +137,57 @@ class ShakeShake(t2t_model.T2TModel): def body(self, features): hparams = self._hparams + is_training = hparams.mode == tf.contrib.learn.ModeKeys.TRAIN inputs = features["inputs"] assert (hparams.num_hidden_layers - 2) % 6 == 0 - blocks_per_stage = (hparams.num_hidden_layers - 2) // 6 - - # For canonical Shake-Shake, the entry flow is a 3x3 convolution with 16 - # filters then a batch norm. Instead we will rely on the one in - # SmallImageModality, which seems to instead use a layer norm. + assert hparams.hidden_size % 16 == 0 + k = hparams.hidden_size // 16 + n = (hparams.num_hidden_layers - 2) // 6 x = inputs - with tf.variable_scope("shake_shake_stage_1"): - x = shake_shake_stage(x, blocks_per_stage, hparams.base_filters, 1, - hparams) - with tf.variable_scope("shake_shake_stage_2"): - x = shake_shake_stage(x, blocks_per_stage, hparams.base_filters * 2, 2, - hparams) - with tf.variable_scope("shake_shake_stage_3"): - x = shake_shake_stage(x, blocks_per_stage, hparams.base_filters * 4, 2, - hparams) - - # For canonical Shake-Shake, we should perform 8x8 average pooling and then - # have a fully-connected layer (which produces the logits for each class). - # Instead, we rely on the Xception exit flow in ClassLabelModality. - # - # Also, this model_fn does not return an extra_loss. However, TensorBoard - # reports an exponential moving average for extra_loss, where the initial - # value for the moving average may be a large number, so extra_loss will - # look large at the beginning of training. + + x = tf.layers.conv2d(x, 16, (3, 3), padding="SAME", name="init_conv") + x = tf.layers.batch_normalization(x, training=is_training, name="init_bn") + with tf.variable_scope("L1"): + x = shake_shake_layer(x, 16 * k, n, 1, hparams) + with tf.variable_scope("L2"): + x = shake_shake_layer(x, 32 * k, n, 2, hparams) + with tf.variable_scope("L3"): + x = shake_shake_layer(x, 64 * k, n, 2, hparams) + x = tf.nn.relu(x) + + # Global avg on [1, 2] (we're nhwc) and dense to num_classes done by top. return x @registry.register_hparams -def shakeshake_cifar10(): - """Parameters for CIFAR-10.""" - tf.logging.warning("shakeshake_cifar10 hparams have not been verified to " - "achieve good performance.") +def shakeshake_small(): + """Parameters for CIFAR-10. Gets to about 96% accuracy@700K steps, 1 GPU.""" hparams = common_hparams.basic_params1() hparams.batch_size = 128 - hparams.hidden_size = 16 + hparams.hidden_size = 32 + hparams.layer_prepostprocess_dropout = 0.0 hparams.dropout = 0 hparams.label_smoothing = 0.0 - hparams.clip_grad_norm = 2.0 + hparams.clip_grad_norm = 0.0 # No clipping for now, one can also try 2.0. hparams.num_hidden_layers = 26 - hparams.kernel_height = -1 # Unused - hparams.kernel_width = -1 # Unused hparams.learning_rate_decay_scheme = "cosine" # Model should be run for 700000 steps with batch size 128 (~1800 epochs) hparams.learning_rate_cosine_cycle_steps = 700000 hparams.learning_rate = 0.2 - hparams.learning_rate_warmup_steps = 3000 + hparams.learning_rate_warmup_steps = 100 # That's basically unused. hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 - # TODO(rshin): Adjust so that effective value becomes ~1e-4 - hparams.weight_decay = 3.0 + hparams.weight_decay = 1e-4 hparams.optimizer = "Momentum" hparams.optimizer_momentum_momentum = 0.9 - hparams.add_hparam("base_filters", 16) - hparams.add_hparam("shakeshake_type", "batch") + hparams.add_hparam("shake_shake_num_branches", 2) + hparams.add_hparam("shake_shake_concat", int(False)) + return hparams + + +@registry.register_hparams +def shakeshake_big(): + hparams = shakeshake_small() + hparams.layer_prepostprocess_dropout = 0.0 + hparams.hidden_size = 96 return hparams diff --git a/tensor2tensor/models/slicenet.py b/tensor2tensor/models/slicenet.py index a18676967..e77412513 100644 --- a/tensor2tensor/models/slicenet.py +++ b/tensor2tensor/models/slicenet.py @@ -322,7 +322,7 @@ def slicenet_params1(): hparams.kernel_height = 3 hparams.kernel_width = 1 hparams.norm_type = "layer" - hparams.learning_rate_decay_scheme = "exp50k" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 diff --git a/tensor2tensor/models/slicenet_test.py b/tensor2tensor/models/slicenet_test.py index 7efdf7a33..55bebf910 100644 --- a/tensor2tensor/models/slicenet_test.py +++ b/tensor2tensor/models/slicenet_test.py @@ -23,7 +23,7 @@ import numpy as np -from tensor2tensor.data_generators import image # pylint: disable=unused-import +from tensor2tensor.data_generators import cifar # pylint: disable=unused-import from tensor2tensor.layers import modalities # pylint: disable=unused-import from tensor2tensor.models import slicenet from tensor2tensor.utils import registry diff --git a/tensor2tensor/models/super_lm.py b/tensor2tensor/models/super_lm.py index d004087a6..079507d2a 100644 --- a/tensor2tensor/models/super_lm.py +++ b/tensor2tensor/models/super_lm.py @@ -20,6 +20,8 @@ Each shard (device) has a similar structure with different weights. Occasional cross-replica-sum across shards. +Example problem: languagemodel_lm1b8k_packed + """ from __future__ import absolute_import @@ -43,13 +45,6 @@ ModeKeys = tf.estimator.ModeKeys # pylint: disable=invalid-name -def _embedding(inputs, vocab_size, dense_size): - embedding_var = tf.get_variable("embedding", [vocab_size, dense_size]) - emb_x = tf.gather(embedding_var, tf.to_int32(inputs)) - emb_x *= dense_size ** 0.5 - return emb_x - - @registry.register_model class SuperLM(t2t_model.T2TModel): """Attention net. See file docstring.""" @@ -72,7 +67,10 @@ def body(self, features): shifted_targets = common_layers.shift_right_2d(targets) # Bypass the symbol modality and use a different embedding on each shard. decoder_input = mp( - _embedding, shifted_targets, vocab_size, hparams.hidden_size) + common_layers.embedding, shifted_targets, vocab_size, + hparams.hidden_size, + multiplier=hparams.hidden_size**0.5, + symbol_dropout_rate=hparams.symbol_dropout) decoder_self_attention_bias = mp( common_attention.attention_bias_lower_triangle, tf.shape(targets)[1]) @@ -147,7 +145,6 @@ def _super_stack(inputs, extra_loss: an optional scalar """ layers = hparams.layers.strip(",").split(",") - ffn_hidden_sizes = [int(s) for s in hparams.ffn_hidden_sizes.split(",")] moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")] if hparams.diet_experts: hsize, = moe_hidden_sizes @@ -159,8 +156,8 @@ def _diet_expert(x): hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size) # scaled_dot_product_attention_with_projections uses a 3d attention bias # (no heads), where multihead_attention uses 4d attention bias. - mix_size = int(hparams.mix_fraction * hparams.hidden_size) attention_bias_3d = mp(tf.squeeze, attention_bias, 1) + mix_size = int(hparams.mix_fraction * hparams.hidden_size) accumulator = inputs x = inputs extra_losses = [] @@ -203,17 +200,15 @@ def _split(t): x, None, attention_bias, # bias - hparams.attention_key_channels or hparams.hidden_size, - hparams.attention_value_channels or hparams.hidden_size, + hparams.multihead_attention_key_channels or hparams.hidden_size, + hparams.multihead_attention_value_channels or hparams.hidden_size, hparams.hidden_size, - hparams.num_heads, + hparams.multihead_attention_num_heads, hparams.attention_dropout) elif layer_type == "ffn": - y = mp( - expert_utils.ffn_expert_fn( - hparams.hidden_size, ffn_hidden_sizes, hparams.hidden_size), - mp(expert_utils.flatten_all_but_last, x)) - x = mp(expert_utils.reshape_like, y, x) + x = mp( + common_layers.dense_relu_dense, x, + hparams.filter_size, hparams.hidden_size) elif layer_type == "conv": # convolution x = mp( @@ -252,18 +247,20 @@ def super_lm_base(): hparams.moe_hidden_sizes = "512" hparams.batch_size = 16384 hparams.max_length = 0 + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. hparams.layer_prepostprocess_dropout = 0.0 + hparams.symbol_dropout = 0.1 + hparams.add_hparam("attention_dropout", 0.0) hparams.label_smoothing = 0.0 hparams.clip_grad_norm = 0. # i.e. no gradient clipping - hparams.optimizer_adam_epsilon = 1e-9 + hparams.optimizer = "Adafactor" hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 8000 hparams.initializer_gain = 1.0 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 - hparams.optimizer_adam_beta1 = 0.9 - hparams.optimizer_adam_beta2 = 0.999 hparams.shared_embedding_and_softmax_weights = False hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" @@ -271,15 +268,12 @@ def super_lm_base(): hparams.no_data_parallelism = True # bypass the symbol modality so that we can use model parallelism. hparams.target_modality = "symbol:identity" - hparams.add_hparam("ffn_hidden_sizes", "512") # Add new ones like this. + hparams.add_hparam("filter_size", 512) hparams.add_hparam("mix_fraction", 0.5) # attention-related flags - hparams.add_hparam("num_heads", 4) - hparams.add_hparam("attention_key_channels", 0) - hparams.add_hparam("attention_value_channels", 0) - # All hyperparameters ending in "dropout" are automatically set to 0.0 - # when not in training mode. - hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("multihead_attention_num_heads", 4) + hparams.add_hparam("multihead_attention_key_channels", 0) + hparams.add_hparam("multihead_attention_value_channels", 0) hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam( "layers", ("n,att,m,d,a," "n,ffn,m,d,a,") * 4 + "n,ffn,d") @@ -304,7 +298,7 @@ def super_lm_big(): """Big model.""" hparams = super_lm_base() hparams.hidden_size = 1024 - hparams.ffn_hidden_sizes = "2048" + hparams.filter_size = 2048 return hparams @@ -366,3 +360,46 @@ def super_lm_moe_4b_diet(): hparams.moe_num_experts = 128 hparams.diet_experts = True return hparams + + +@registry.register_hparams +def super_lm_tpu(): + """Hyperparameters for data-parallel training on TPU. + + This is not the intended usage - we would really like to use model-parallelism + with the model shards mapping to cores and cross_replica_sum used for + communication. Currently, we replicate the entire model on each core. + + Returns: + An hparams object. + """ + hparams = super_lm_base() + hparams.batch_size = 4096 + return hparams + + +@registry.register_hparams +def super_lm_big_tpu(): + hparams = super_lm_big() + hparams.batch_size = 1024 + return hparams + + +@registry.register_hparams +def super_lm_tpu_memtest(): + """Crazy set of hyperparameters to test memory optimizations. + + Quality will be very poor due to lack of attention layers. + 853M parameters + This seems to run on TPU for languagemodel_lm1b8k_packed as of 2018-01-19. + + Returns: + An hparams object. + """ + hparams = super_lm_base() + hparams.num_model_shards = 1 + hparams.layers = "ffn," * 8 + hparams.hidden_size = 4096 + hparams.filter_size = 12000 + hparams.batch_size = 512 + return hparams diff --git a/tensor2tensor/models/transformer.py b/tensor2tensor/models/transformer.py index 67a342e67..b241cc24a 100644 --- a/tensor2tensor/models/transformer.py +++ b/tensor2tensor/models/transformer.py @@ -37,7 +37,6 @@ import tensorflow as tf -from tensorflow.python.eager import context from tensorflow.python.util import nest @@ -123,7 +122,8 @@ def decode(self, nonpadding=nonpadding, save_weights_to=self.attention_weights) - if hparams.use_tpu and hparams.mode == tf.estimator.ModeKeys.TRAIN: + if (common_layers.is_on_tpu() and + hparams.mode == tf.estimator.ModeKeys.TRAIN): # TPU does not react kindly to extra dimensions. # TODO(noam): remove this once TPU is more forgiving of extra dims. return decoder_output @@ -172,16 +172,19 @@ def _greedy_infer(self, features, decode_length): decode_length: an integer. How many additional timesteps to decode. Returns: - samples: [batch_size, input_length + decode_length] - logits: Not returned - losses: Not returned + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } Raises: NotImplementedError: If there are multiple data shards. """ with tf.variable_scope(self.name): - decoded_ids, _ = self._fast_decode(features, decode_length) - return decoded_ids, None, None + return self._fast_decode(features, decode_length) def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): """Beam search decoding. @@ -195,12 +198,17 @@ def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): the preference for slonger translations. Returns: - samples: an integer `Tensor`. Top samples from the beam search + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } """ with tf.variable_scope(self.name): - decoded_ids, scores = self._fast_decode(features, decode_length, - beam_size, top_beams, alpha) - return {"outputs": decoded_ids, "scores": scores} + return self._fast_decode( + features, decode_length, beam_size, top_beams, alpha) def _fast_decode(self, features, @@ -222,7 +230,13 @@ def _fast_decode(self, the preference for slonger translations. Returns: - samples: an integer `Tensor`. Top samples from the beam search + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } Raises: NotImplementedError: If there are multiple data shards. @@ -358,9 +372,13 @@ def fast_decode(encoder_output, eos_id: End-of-sequence symbol in beam search. Returns: - Pair of tensors `(decoded_ids, scores)`, where `decoded_ids` is a 2-d or 3-d - (when doing beam search with top_beams > 1) tensor containing result of - decoding, and `scores` is the beam search scores. + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if top_beams == 1 or + [batch_size, top_beams, <= decode_length] otherwise + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } """ batch_size = common_layers.shape_list(encoder_output)[0] @@ -376,15 +394,6 @@ def fast_decode(encoder_output, for layer in range(num_layers) } - # Set 2nd dim to None since it's not invariant in the tf.while_loop - # Note: Tensor.set_shape() does not work here since it merges shape info. - # TODO(llion); Find a more robust solution. - # pylint: disable=protected-access - if not context.in_eager_mode(): - for layer in cache: - cache[layer]["k"]._shape = tf.TensorShape([None, None, key_channels]) - cache[layer]["v"]._shape = tf.TensorShape([None, None, value_channels]) - # pylint: enable=protected-access cache["encoder_output"] = encoder_output cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias @@ -407,31 +416,36 @@ def fast_decode(encoder_output, decoded_ids = decoded_ids[:, :top_beams, 1:] else: # Greedy - def inner_loop(i, next_id, decoded_ids, cache): + def inner_loop(i, finished, next_id, decoded_ids, cache): logits, cache = symbols_to_logits_fn(next_id, i, cache) temperature = (0.0 if hparams.sampling_method == "argmax" else hparams.sampling_temp) - next_id = tf.expand_dims( - common_layers.sample_with_temperature(logits, temperature), axis=1) + next_id = common_layers.sample_with_temperature(logits, temperature) + finished |= tf.equal(next_id, eos_id) + next_id = tf.expand_dims(next_id, axis=1) decoded_ids = tf.concat([decoded_ids, next_id], axis=1) - return i + 1, next_id, decoded_ids, cache + return i + 1, finished, next_id, decoded_ids, cache + + def is_not_finished(i, finished, *_): + return (i < decode_length) & tf.logical_not(tf.reduce_all(finished)) decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64) - scores = None + finished = tf.constant(False, shape=[batch_size]) next_id = tf.zeros([batch_size, 1], dtype=tf.int64) - _, _, decoded_ids, _ = tf.while_loop( - # TODO(llion): Early stopping. - lambda i, *_: tf.less(i, decode_length), + _, _, _, decoded_ids, _ = tf.while_loop( + is_not_finished, inner_loop, - [tf.constant(0), next_id, decoded_ids, cache], + [tf.constant(0), finished, next_id, decoded_ids, cache], shape_invariants=[ tf.TensorShape([]), + tf.TensorShape([None]), tf.TensorShape([None, None]), tf.TensorShape([None, None]), - nest.map_structure(lambda t: tf.TensorShape(t.shape), cache), + nest.map_structure(beam_search.get_state_shape_invariants, cache), ]) + scores = None - return decoded_ids, scores + return {"outputs": decoded_ids, "scores": scores} @registry.register_model @@ -567,7 +581,8 @@ def transformer_encoder(encoder_input, hparams, name="encoder", nonpadding=None, - save_weights_to=None): + save_weights_to=None, + make_image_summary=True): """A stack of transformer layers. Args: @@ -585,11 +600,15 @@ def transformer_encoder(encoder_input, save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors """ x = encoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): if nonpadding is not None: padding = 1.0 - nonpadding @@ -598,7 +617,7 @@ def transformer_encoder(encoder_input, encoder_self_attention_bias) nonpadding = 1.0 - padding pad_remover = None - if hparams.use_pad_remover: + if hparams.use_pad_remover and not common_layers.is_on_tpu(): pad_remover = expert_utils.PadRemover(padding) for layer in xrange(hparams.num_encoder_layers or hparams.num_hidden_layers): @@ -615,7 +634,9 @@ def transformer_encoder(encoder_input, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, - max_relative_position=hparams.max_relative_position) + max_relative_position=hparams.max_relative_position, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = transformer_ffn_layer( @@ -636,7 +657,8 @@ def transformer_decoder(decoder_input, cache=None, name="decoder", nonpadding=None, - save_weights_to=None): + save_weights_to=None, + make_image_summary=True): """A stack of transformer layers. Args: @@ -658,11 +680,15 @@ def transformer_decoder(decoder_input, save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors """ x = decoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): for layer in xrange(hparams.num_decoder_layers or hparams.num_hidden_layers): @@ -682,19 +708,25 @@ def transformer_decoder(decoder_input, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, - cache=layer_cache) + cache=layer_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims) x = common_layers.layer_postprocess(x, y, hparams) if encoder_output is not None: with tf.variable_scope("encdec_attention"): # TODO(llion): Add caching. y = common_attention.multihead_attention( - common_layers.layer_preprocess( - x, hparams), encoder_output, encoder_decoder_attention_bias, + common_layers.layer_preprocess(x, hparams), + encoder_output, + encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, - hparams.hidden_size, hparams.num_heads, + hparams.hidden_size, + hparams.num_heads, hparams.attention_dropout, - save_weights_to=save_weights_to) + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = transformer_ffn_layer( @@ -730,6 +762,9 @@ def transformer_ffn_layer(x, a Tensor of shape [batch_size, length, hparams.hidden_size] """ ffn_layer = hparams.ffn_layer + relu_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "relu_dropout_broadcast_dims", ""))) if ffn_layer == "conv_hidden_relu": # Backwards compatibility ffn_layer = "dense_relu_dense" @@ -744,7 +779,8 @@ def transformer_ffn_layer(x, x, hparams.filter_size, hparams.hidden_size, - dropout=hparams.relu_dropout) + dropout=hparams.relu_dropout, + dropout_broadcast_dims=relu_dropout_broadcast_dims) if pad_remover: # Restore `conv_output` to the original shape of `x`, including padding. conv_output = tf.reshape( @@ -803,6 +839,7 @@ def transformer_base_v1(): hparams.label_smoothing = 0.1 hparams.shared_embedding_and_softmax_weights = True hparams.symbol_modality_num_shards = 16 + # Add new ones like this. hparams.add_hparam("filter_size", 2048) # Layer-related flags. If zero, these fall back on hparams.num_hidden_layers. @@ -818,7 +855,9 @@ def transformer_base_v1(): # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("attention_dropout_broadcast_dims", "") hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("relu_dropout_broadcast_dims", "") hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam("nbr_decoder_problems", 1) hparams.add_hparam("proximity_bias", False) @@ -830,6 +869,7 @@ def transformer_base_v1(): @registry.register_hparams def transformer_base_v2(): + """Set of hyperparameters.""" hparams = transformer_base_v1() hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" @@ -843,33 +883,10 @@ def transformer_base_v2(): @registry.register_hparams def transformer_base(): - return transformer_base_v2() - - -@registry.register_hparams -def transformer_n_da(): - """Normalize on layer input, instead of after residual connection. - - This version seems to cure failure-to-learn bugs - for example, with very - deep networks or hard-to-learn mappings. - - Probably this should become the default. - - Returns: - a hyperparameters. - """ - hparams = transformer_base() - hparams.layer_preprocess_sequence = "n" - hparams.layer_postprocess_sequence = "da" - # This version seems to benefit from a higher learning rate. - hparams.learning_rate = 0.4 - return hparams - - -@registry.register_hparams -def transformer_n_da_l10(): - hparams = transformer_n_da() - hparams.num_hidden_layers = 10 + # Update parameters here, then occasionally cut a versioned set, e.g. + # transformer_base_v2. + hparams = transformer_base_v2() + hparams.optimizer_adam_beta2 = 0.997 return hparams @@ -890,7 +907,6 @@ def transformer_big_single_gpu(): hparams = transformer_big() hparams.layer_prepostprocess_dropout = 0.1 hparams.learning_rate_warmup_steps = 16000 - hparams.optimizer_adam_beta2 = 0.998 return hparams @@ -1205,29 +1221,58 @@ def transformer_relative_big(): return hparams +def update_hparams_for_tpu(hparams): + """Change hparams to be compatible with TPU training.""" + + # Adafactor uses less memory than Adam. + hparams.optimizer = "Adafactor" + + # Avoid an expensive concat on TPU. + # >1 shards helps with faster parameter distribution on multi-GPU machines + hparams.symbol_modality_num_shards = 1 + + # Adaptive batch sizes and sequence lengths are not supported on TPU. + # Instead, every batch has the same sequence length and the same batch size. + # Longer sequences are dropped and shorter ones are padded. + # + # It is therefore suggested to use a problem where examples have been combined + # to a longer length, e.g. the "_packed" problems. + # + # For problems with variable sequence lengths, this parameter controls the + # maximum sequence length. Shorter sequences are dropped and longer ones + # are padded. + # + # For problems with fixed sequence lengths - e.g. the "_packed" problems, + # this hyperparameter is ignored. + hparams.max_length = 64 + + # TPUs have less memory than GPUs, so decrease the batch size + hparams.batch_size = 2048 + + # Using noise broadcast in the dropout layers saves memory during training. + hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads + hparams.relu_dropout_broadcast_dims = "1" # length + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length + + @registry.register_hparams def transformer_tpu(): """HParams for Transformer model on TPU.""" hparams = transformer_base() update_hparams_for_tpu(hparams) - hparams.tpu_batch_size_per_shard = 56 return hparams @registry.register_hparams def transformer_packed_tpu(): - """For packed problems, length 256, batch 14.""" - hparams = transformer_base() - update_hparams_for_tpu(hparams) - hparams.tpu_batch_size_per_shard = 12 - return hparams + """Deprecated alias for transformer_tpu().""" + return transformer_tpu() @registry.register_hparams def transformer_big_tpu(): hparams = transformer_big() update_hparams_for_tpu(hparams) - hparams.tpu_batch_size_per_shard = 16 return hparams @@ -1266,7 +1311,7 @@ def transformer_tpu_range(rhp): def transformer_tpu_batch_range(rhp): hparams = transformer_tpu() common_hparams.fill_ranged_hparams_from_hparams(hparams, rhp) - rhp.set_discrete("tpu_batch_size_per_shard", [1, 2, 3, 4]) + rhp.set_discrete("batch_size", [256, 512, 768, 1024]) @registry.register_hparams @@ -1281,23 +1326,6 @@ def transformer_small_tpu(): return hparams -def update_hparams_for_tpu(hparams): - """Change hparams to be compatible with TPU training.""" - hparams.use_pad_remover = False # where op not supported - hparams.optimizer = "TrueAdam" - hparams.learning_rate = 0.2 - # Avoid an expensive concat on TPU - hparams.symbol_modality_num_shards = 1 - - # Inputs - # Each example in the batch will be of (padded) length hparams.max_length - # It is suggested to use a dataset that where examples have been combined - # to a longer length, e.g. the "_packed" datasets. If that's the case, reduce - # the tpu_batch_size_per_shard as necessary to fit in memory. - # For translate_ende_wmt32k_packed, transformer_packed_tpu is a good config. - hparams.max_length = 64 - - @registry.register_hparams def transformer_clean(): """No dropout, label smoothing, max_length.""" @@ -1318,6 +1346,13 @@ def transformer_clean_big(): return hparams +@registry.register_hparams +def transformer_clean_big_tpu(): + hparams = transformer_clean_big() + update_hparams_for_tpu(hparams) + return hparams + + @registry.register_hparams def transformer_tpu_with_conv(): """Cut down on the number of heads, and use convs instead.""" @@ -1328,12 +1363,21 @@ def transformer_tpu_with_conv(): @registry.register_hparams -def transformer_tpu_base_language_model(): - """Hparams for training languagemodel_lm1b8k on tpu.""" +def transformer_lm_tpu_0(): + """Hparams for training languagemodel_lm1b8k on tpu. 92M Params.""" hparams = transformer_clean_big() update_hparams_for_tpu(hparams) - hparams.tpu_batch_size_per_shard = 16 hparams.num_heads = 4 # heads are expensive on tpu - hparams.learning_rate_warmup_steps = 1000 + hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def transformer_lm_tpu_1(): + """Hparams for training languagemodel_lm1b8k on tpu. 335M Params.""" + hparams = transformer_lm_tpu_0() + hparams.hidden_size = 2048 + hparams.filter_size = 8192 return hparams diff --git a/tensor2tensor/models/transformer_moe.py b/tensor2tensor/models/transformer_moe.py index 202c4c9f3..efa67bf27 100644 --- a/tensor2tensor/models/transformer_moe.py +++ b/tensor2tensor/models/transformer_moe.py @@ -300,6 +300,74 @@ def transformer_moe_8k(): return hparams +@registry.register_hparams +def transformer_moe_8k_lm(): + """Language modeling params. + + Will have the following architecture by default: + * No encoder. + * Decoder architecture: + * Layer 0: a - sepm (masked self-attention/masked separable convolutions) + * Layer 1: a - sepm + * Layer 2: a - moe (mixture of expert layers in the middle) + * Layer 3: a - sepm + * Layer 4: a - sepm + + Returns: + hparams + """ + hparams = transformer_moe_8k() + + # Use masked versions of local attention and separable convolution + hparams.default_ff = "sepm" + + # hparams.layer_types contains the network architecture: + # Start with '#' for decoder only architecture + hparams.layer_types = "#a/a/a-moe/a/a" # 5 full attention layers with 1 moe + # For long sequences, if running out of memory, it's possible to use the + # one of those two optimized versions instead: + # * Memory efficient multihead attention (slow): + # hparams.layer_types = "#mem/mem/mem-moe/mem/mem" + # * Alternate between local/compressed attention layers (faster): + # hparams.layer_types = "#locm/red/locm-moe/red/locm" + + return hparams + + +@registry.register_hparams +def transformer_moe_2k(): + """Base transformers model with moe. + + Will have the following architecture: + * No encoder. + * Layer 0: a - sep (self-attention - unmasked separable convolutions) + * Layer 1: a - sep + * Layer 2: a - sep + * Layer 3: a - sep + * Layer 4: a - sep + * Decoder architecture: + * Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep) + * Layer 1: a - a - sepm + * Layer 2: a - a - moe (mixture of expert layers in the middle) + * Layer 3: a - a - sepm + * Layer 4: a - a - sepm + + Returns: + hparams + """ + hparams = transformer_moe_8k() + hparams.batch_size = 2048 + + hparams.default_ff = "sep" + + # hparams.layer_types contains the network architecture: + encoder_archi = "a/a/a/a/a" + decoder_archi = "a-sepm/a-sepm/a-moe/a-sepm/a-sepm" + hparams.layer_types = "{}#{}".format(encoder_archi, decoder_archi) + + return hparams + + @registry.register_hparams def transformer_moe_12k(): """Hyper parameters specifics for long sequence generation.""" @@ -317,6 +385,7 @@ def transformer_moe_prepend_8k(): hparams.prepend_mode = "prepend_inputs_masked_attention" hparams.eval_drop_long_sequences = False hparams.max_input_seq_length = 7500, - hparams.layer_types = "loc/red/loc-moe/red/loc" + hparams.default_ff = "sepm" + hparams.layer_types = "locm/red/locm-moe/red/locm" hparams.moe_num_experts = 256 return hparams diff --git a/tensor2tensor/models/transformer_sketch.py b/tensor2tensor/models/transformer_sketch.py index 7ef78bc59..913243f00 100644 --- a/tensor2tensor/models/transformer_sketch.py +++ b/tensor2tensor/models/transformer_sketch.py @@ -27,7 +27,6 @@ from tensor2tensor.models import transformer from tensor2tensor.models import transformer_vae from tensor2tensor.models.transformer import transformer_base -from tensor2tensor.models.transformer import transformer_n_da from tensor2tensor.models.transformer import transformer_small from tensor2tensor.utils import registry @@ -58,7 +57,7 @@ def encode(self, inputs, target_space, hparams): @registry.register_hparams def transformer_sketch(): """Basic transformer_sketch hparams.""" - hparams = transformer_n_da() + hparams = transformer_base() hparams.batch_size = 2048 hparams.max_length = 784 hparams.clip_grad_norm = 5. @@ -148,7 +147,7 @@ def transformer_sketch_ranged(rhp): ["uniform", "orthogonal", "uniform_unit_scaling"]) rhp.set_float("initializer_gain", 0.5, 3.5) rhp.set_categorical("learning_rate_decay_scheme", - ["none", "sqrt", "noam", "exp10k"]) + ["none", "sqrt", "noam", "exp"]) rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta1", 0.8, 0.9) rhp.set_float("optimizer_adam_beta2", 0.995, 0.999) diff --git a/tensor2tensor/models/transformer_test.py b/tensor2tensor/models/transformer_test.py index d4502e585..f67476006 100644 --- a/tensor2tensor/models/transformer_test.py +++ b/tensor2tensor/models/transformer_test.py @@ -95,10 +95,11 @@ def testGreedyVsFast(self): model.set_mode(tf.estimator.ModeKeys.PREDICT) with tf.variable_scope(tf.get_variable_scope(), reuse=True): - greedy_result, _, _ = model._slow_greedy_infer(features, decode_length) + greedy_result = model._slow_greedy_infer( + features, decode_length)["outputs"] greedy_result = tf.squeeze(greedy_result, axis=[2, 3]) - fast_result, _, _ = model._greedy_infer(features, decode_length) + fast_result = model._greedy_infer(features, decode_length)["outputs"] with self.test_session(): greedy_res = greedy_result.eval() diff --git a/tensor2tensor/models/transformer_vae.py b/tensor2tensor/models/transformer_vae.py index ec87f6a86..ac9a66b77 100644 --- a/tensor2tensor/models/transformer_vae.py +++ b/tensor2tensor/models/transformer_vae.py @@ -18,13 +18,20 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function + +import math + # Dependency imports + from six.moves import xrange # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention from tensor2tensor.layers import common_layers from tensor2tensor.models import transformer from tensor2tensor.utils import expert_utils from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model + import tensorflow as tf from tensorflow.python.training import moving_averages @@ -49,6 +56,22 @@ def residual_conv(x, repeat, k, hparams, name, reuse=None): return x +def attend(x, source, hparams, name): + with tf.variable_scope(name): + x = tf.squeeze(x, axis=2) + if len(source.get_shape()) > 3: + source = tf.squeeze(source, axis=2) + source = common_attention.add_timing_signal_1d(source) + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), source, None, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, hparams.num_heads, + hparams.attention_dropout) + res = common_layers.layer_postprocess(x, y, hparams) + return tf.expand_dims(res, axis=2) + + def decompress_step(source, hparams, first_relu, is_2d, name): """Decompression function.""" with tf.variable_scope(name): @@ -67,15 +90,15 @@ def top_k_softmax(x, k): """Calculate softmax(x), select top-k and rescale to sum to 1.""" x = tf.nn.softmax(x) top_x, _ = tf.nn.top_k(x, k=k+1) - min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True) + min_top = tf.reduce_min(top_x, axis=-1, keepdims=True) x = tf.nn.relu((x - min_top) + 1e-12) - x /= tf.reduce_sum(x, axis=-1, keep_dims=True) + x /= tf.reduce_sum(x, axis=-1, keepdims=True) return x, tf.reduce_max(top_x, axis=-1) def top_k_experts(x, k, hparams): x_shape = common_layers.shape_list(x) - x_flat = tf.reshape(x, [-1, x.get_shape().as_list()[-1]]) + x_flat = tf.reshape(x, [-1, common_layers.shape_list(x)[-1]]) is_training = hparams.mode == tf.contrib.learn.ModeKeys.TRAIN gates, load = expert_utils.noisy_top_k_gating( x_flat, hparams.v_size, is_training, k) @@ -117,7 +140,7 @@ def dae(x, hparams, name): maxvhot = tf.stop_gradient(tf.one_hot(maxvec, hparams.v_size)) # Add losses that prevent too few being used. distrib = tf.reshape(logsm, [-1, hparams.v_size]) * maxvhot - d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) + d_mean = tf.reduce_mean(distrib, axis=[0], keepdims=True) d_variance = tf.reduce_mean(tf.square(distrib - d_mean), axis=[0]) d_dev = - tf.reduce_mean(d_variance) ret = s @@ -140,14 +163,51 @@ def vae(x, z_size, name): return z, kl_loss, mu, log_sigma +def project_hidden(x, hparams): + """Project encoder hidden state into block_dim using projection tensors. + + Args: + x: Encoder hidden state of shape [-1, hidden_size] + hparams: Hparams + + Returns: + Projected states of shape [-1, num_blocks, block_dim]. + """ + x = tf.reshape(x, shape=[1, -1, hparams.hidden_size]) + x_tiled = tf.reshape( + tf.tile(x, multiples=[hparams.num_blocks, 1, 1]), + shape=[hparams.num_blocks, -1, hparams.hidden_size]) + x_projected = tf.matmul(x_tiled, hparams.projection_tensors) + x_projected = tf.transpose(x_projected, perm=[1, 0, 2]) + return x_projected + + +def slice_hidden(x, hparams): + """Slice encoder hidden state into block_dim. + + Args: + x: Encoder hidden state of shape [-1, hidden_size] + hparams: Hparams + + Returns: + Sliced states of shape [-1, num_blocks, block_dim]. + """ + assert hparams.num_blocks * hparams.block_dim == hparams.hidden_size + x_sliced = tf.reshape(x, shape=[-1, hparams.num_blocks, hparams.block_dim]) + return x_sliced + + def nearest(x, means, hparams): """Find the nearest means to elements in x.""" - x_flat = tf.reshape(x, [-1, hparams.hidden_size]) - x_norm_sq = tf.reduce_sum(x_flat ** 2, axis=-1, keep_dims=True) - means_norm_sq = tf.reduce_sum(means ** 2, axis=-1, keep_dims=True) - dist = ( - x_norm_sq + tf.transpose(means_norm_sq) - - 2 * tf.matmul(x_flat, means, transpose_b=True)) + x_reshaped = hparams.reshape_fn(x, hparams) + x_norm_sq = tf.reduce_sum(tf.square(x_reshaped), axis=-1, keepdims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) + scalar_prod = tf.matmul( + tf.transpose(x_reshaped, perm=[1, 0, 2]), + tf.transpose(means, perm=[0, 2, 1])) + scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) + dist = x_norm_sq + tf.transpose( + means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod if hparams.random_top_k > 1: _, top_k_idx = tf.nn.top_k(-dist, k=hparams.random_top_k) nearest_idx = tf.gather( @@ -157,37 +217,46 @@ def nearest(x, means, hparams): axis=-1) else: nearest_idx = tf.argmax(-dist, axis=-1) - nearest_hot = tf.one_hot(nearest_idx, hparams.v_size) + nearest_hot = tf.one_hot(nearest_idx, hparams.block_v_size) shape = common_layers.shape_list(x) - shape[-1] = hparams.v_size + shape[-1] = hparams.num_blocks + shape.append(hparams.block_v_size) nearest_hot = tf.reshape(nearest_hot, shape=shape) return tf.stop_gradient(nearest_hot) def kmeans(x, means, hparams): + """Compute the nearest neighbors and the loss for training the embeddings.""" x_means_hot = nearest(x, means, hparams) - x_means = tf.gather(means, tf.argmax(x_means_hot, axis=-1)) - q_loss = tf.reduce_mean((tf.stop_gradient(x) - x_means)**2) - e_loss = tf.reduce_mean((x - tf.stop_gradient(x_means))**2) + x_means_hot_flat = tf.reshape(x_means_hot, + [-1, hparams.num_blocks, hparams.block_v_size]) + x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) + x_means = tf.transpose(x_means, [1, 0, 2]) + x_reshaped = hparams.reshape_fn(x, hparams) + q_loss = tf.reduce_mean(tf.square((tf.stop_gradient(x_reshaped) - x_means))) + e_loss = tf.reduce_mean(tf.square(x_reshaped - tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss -def bit_to_int(x_bit, nbits): +def bit_to_int(x_bit, nbits, base=2): """Turn x_bit representing numbers bitwise (lower-endian) to int tensor.""" x_l = tf.stop_gradient(tf.reshape(x_bit, [-1, nbits])) x_labels = [] for i in range(nbits): - x_labels.append(x_l[:, i] * 2**i) + x_labels.append(x_l[:, i] * tf.to_int32(base)**tf.to_int32(i)) res = sum(x_labels) return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) -def int_to_bit(x_int, nbits): +def int_to_bit(x_int, nbits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor.""" x_l = tf.expand_dims(x_int, axis=-1) x_labels = [] for i in range(nbits): - x_labels.append(tf.floormod(tf.floordiv(x_l, 2**i), 2)) + x_labels.append( + tf.floormod( + tf.floordiv(tf.to_int32(x_l), + tf.to_int32(base)**i), tf.to_int32(base))) res = tf.concat(x_labels, axis=-1) return tf.to_float(res) @@ -218,12 +287,25 @@ def embed(x): hot = tf.one_hot(x, hparams.v_size) h1 = tf.layers.dense(hot, hparams.hidden_size, name="dae_dense") elif hparams.bottleneck_kind == "vq-vae": - if hparams.ema: - means_embed = ema_means - else: - means_embed = means - - h1 = tf.gather(means_embed, x) + means_embed = means + shape_x = common_layers.shape_list(x) + x_flat = tf.reshape(x, [-1, 1]) + c = int_to_bit(x_flat, nbits=int(math.log(hparams.v_size, 2)), base=2) + shape = common_layers.shape_list(c) + new_shape = shape + new_shape[-1] = hparams.num_blocks + new_shape.append(int(math.log(hparams.v_size, 2) // hparams.num_blocks)) + c = tf.to_int32(tf.reshape(c, shape=new_shape)) + c = bit_to_int( + c, + nbits=int(math.log(hparams.v_size, 2) // hparams.num_blocks), + base=2) + h1 = tf.gather(tf.transpose(means_embed, [1, 0, 2]), c) + h1 = tf.stack( + [h1[:, :, i, i, :] for i in range(hparams.num_blocks)], axis=-2) + new_shape = shape_x + new_shape.append(hparams.hidden_size) + h1 = tf.reshape(h1, new_shape) elif hparams.bottleneck_kind == "rounding": h1 = x @@ -269,22 +351,45 @@ def embed(x): h1 = tf.layers.dense(hot, hparams.hidden_size, name="dae_dense") if hparams.bottleneck_kind == "vq-vae": x_means_hot, x_means, q_loss, e_loss = kmeans(x, means, hparams) - c = tf.argmax(x_means_hot, axis=-1) + + # Get the discrete latent represenation + x_means_idx = tf.argmax(x_means_hot, axis=-1) + # Get the binary representation + x_means_bits = int_to_bit( + x_means_idx, + nbits=int(math.log(hparams.v_size, 2) // hparams.num_blocks), + base=2) + shape = common_layers.shape_list(x_means_bits) + new_shape = shape[:-1] + new_shape[-1] = int(math.log(hparams.v_size, 2)) + x_means_bits = tf.reshape(x_means_bits, shape=new_shape) + c = bit_to_int( + tf.to_int32(x_means_bits), + nbits=int(math.log(hparams.v_size, 2)), + base=2) # Update the ema variables if hparams.ema: tf.logging.info("Using EMA with beta = {}".format(hparams.beta)) - x_means_hot_flat = tf.reshape(x_means_hot, shape=[-1, hparams.v_size]) updated_ema_count = moving_averages.assign_moving_average( ema_count, - tf.reduce_sum(x_means_hot_flat, axis=0), + tf.reduce_sum( + tf.reshape( + x_means_hot, + shape=[-1, hparams.num_blocks, hparams.block_v_size]), + axis=0), hparams.decay, zero_debias=False) - x_flat = tf.reshape(x, [-1, hparams.hidden_size]) - dw = tf.matmul(x_means_hot_flat, x_flat, transpose_a=True) + + x_means_hot_flat = tf.reshape( + x_means_hot, shape=[-1, hparams.num_blocks, hparams.block_v_size]) + x_reshaped = hparams.reshape_fn(x, hparams) + dw = tf.matmul( + tf.transpose(x_means_hot_flat, perm=[1, 2, 0]), + tf.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = moving_averages.assign_moving_average( ema_means, dw, hparams.decay, zero_debias=False) - n = tf.reduce_sum(updated_ema_count) + n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ((updated_ema_count + hparams.epsilon) / (n + hparams.v_size * hparams.epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) @@ -296,7 +401,11 @@ def embed(x): else: l = q_loss + hparams.beta * e_loss - h1 = tf.stop_gradient(x_means) + x - tf.stop_gradient(x) + x_reshaped = hparams.reshape_fn(x, hparams) + shape = common_layers.shape_list(x) + x_means = tf.reshape(x_means, shape) + x_reshaped = tf.reshape(x_reshaped, shape) + h1 = x_reshaped + tf.stop_gradient(x_means - x_reshaped) if hparams.bottleneck_kind == "rounding": h = tf.layers.dense(x, 1, name="vcc") @@ -316,15 +425,20 @@ def embed(x): return res, c, l, embed -def compress(x, is_2d, hparams, name): +def compress(x, c, is_2d, hparams, name): """Compress.""" with tf.variable_scope(name): # Run compression by strided convs. cur = x k1 = (3, 3) if is_2d else (3, 1) - cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc") k2 = (2, 2) if is_2d else (2, 1) + cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc") + if c is not None and hparams.do_attend_compress: + cur = attend(cur, c, hparams, "compress_attend") for i in xrange(hparams.num_compress_steps): + if hparams.do_residual_compress: + cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, + "rc_%d" % i) cur = common_layers.conv_block( cur, hparams.hidden_size, [((1, 1), k2)], strides=k2, name="compress_%d" % i) @@ -377,12 +491,49 @@ def multinomial_sample(x, vocab_size, temperature): return tf.to_int32(reshaped_samples) +def ae_latent_softmax(latents_pred, latents_discrete, hparams): + """Latent prediction and loss.""" + vocab_size = hparams.v_size + if hparams.bottleneck_kind == "semhash": + vocab_size = 2**hparams.z_size + if hparams.num_blocks < 2: + latents_logits = tf.layers.dense(latents_pred, vocab_size, + name="extra_logits") + loss = None + if latents_discrete is not None: + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=latents_discrete, logits=latents_logits) + sample = multinomial_sample( + latents_logits, vocab_size, hparams.sampling_temp) + return sample, loss + + # Multi-block case. + vocab_bits = int(math.log(vocab_size, 2)) + assert vocab_size == 2**vocab_bits + assert vocab_bits % hparams.num_blocks == 0 + block_vocab_size = 2**(vocab_bits // hparams.num_blocks) + latents_logits = [tf.layers.dense(latents_pred, block_vocab_size, + name="extra_logits_%d" % i) + for i in xrange(hparams.num_blocks)] + loss = None + if latents_discrete is not None: + losses = [] + for i in xrange(hparams.num_blocks): + d = tf.floormod(tf.floordiv(latents_discrete, + block_vocab_size**i), block_vocab_size) + losses.append(tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=d, logits=latents_logits[i])) + loss = sum(losses) + samples = [multinomial_sample(l, block_vocab_size, hparams.sampling_temp) + for l in latents_logits] + sample = sum([s * block_vocab_size**i for i, s in enumerate(samples)]) + return sample, loss + + def ae_latent_sample(latents_dense, inputs, ed, embed, iters, hparams): """Sample from the latent space in the autoencoder.""" latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, "extra") - latents_pred = tf.layers.dense(latents_pred, 2**16, name="extra_logits") - latents_discrete = multinomial_sample( - latents_pred, 2**16, hparams.sampling_temp) + latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams) def next_bit(latents_discrete, i): latents_discrete_prev = latents_discrete @@ -390,9 +541,7 @@ def next_bit(latents_discrete, i): latents_dense = embed(latents_discrete) latents_pred = decode_transformer( inputs, ed, latents_dense, hparams, "extra") - latents_pred = tf.layers.dense(latents_pred, 2**16, name="extra_logits") - latents_discrete = multinomial_sample( - latents_pred, 2**16, hparams.sampling_temp) + latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams) return tf.concat([latents_discrete_prev[:, :(i+1), :], latents_discrete[:, (i+1):, :]], axis=1) @@ -434,14 +583,14 @@ def ae_transformer_internal(inputs, targets, _ = common_layers.pad_to_same_length( targets, max_targets_len_from_inputs, final_length_divisible_by=2**hparams.num_compress_steps) - targets_c = compress(targets, False, hparams, "compress") + targets_c = compress(targets, inputs, False, hparams, "compress") if hparams.mode != tf.estimator.ModeKeys.PREDICT: # Compress and bottleneck. latents_dense, latents_discrete, extra_loss, _ = bottleneck( targets_c, hparams, 2 * 2048, "vc", means, ema_count, ema_means) if _DO_SUMMARIES: tf.summary.histogram("b0", tf.reshape(latents_discrete[:, 0, :], [-1])) - pc = common_layers.inverse_exp_decay(hparams.startup_steps) * 0.95 + pc = common_layers.inverse_exp_decay(hparams.startup_steps) pc = pc if hparams.mode == tf.estimator.ModeKeys.TRAIN else 1.0 cond = tf.less(tf.random_uniform([batch_size]), pc) latents_dense = tf.where(cond, latents_dense, targets_c) @@ -452,11 +601,10 @@ def ae_transformer_internal(inputs, latents_pred = decode_transformer( tf.stop_gradient(inputs), tf.stop_gradient(ed), tf.stop_gradient(latents_dense), hparams, "extra") - latents_pred = tf.layers.dense(latents_pred, 2**16, name="extra_logits") - losses["latent_pred"] = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=latents_discrete, logits=latents_pred) + _, latent_pred_loss = ae_latent_softmax( + latents_pred, latents_discrete, hparams) losses["latent_pred"] = tf.reduce_mean( - losses["latent_pred"] * 0.5 * tf.to_float(cond)) + latent_pred_loss * 0.5 * tf.to_float(cond)) else: inputs_c = decode_transformer(inputs, ed, targets_c, hparams, "dec_c") losses["latent_pred"] = tf.reduce_mean((inputs_c - targets_c)**2) * 20 @@ -494,10 +642,11 @@ def bn_inputs(): # Masking. if hparams.do_mask: - masking = common_layers.inverse_lin_decay(100000) - masking *= common_layers.inverse_exp_decay(25000) # Not much at start. + masking = common_layers.inverse_lin_decay(hparams.mask_startup_steps) + masking *= common_layers.inverse_exp_decay( + hparams.mask_startup_steps // 4) # Not much at start. if not hparams.do_refine: - masking -= tf.random_uniform([]) * 0.3 + masking -= tf.random_uniform([]) * hparams.unmasked_percentage masking = tf.minimum(tf.maximum(masking, 0.0), 1.0) if hparams.mode == tf.estimator.ModeKeys.PREDICT: masking = predict_mask @@ -507,6 +656,8 @@ def bn_inputs(): for i in xrange(hparams.num_compress_steps): j = hparams.num_compress_steps - i - 1 d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j) + if hparams.do_attend_decompress: + d = attend(d, inputs, hparams, "decompress_attend_%d" % j) d = decompress_step(d, hparams, i > 0, False, "decompress_%d" % j) targets = mask * targets + (1.0 - mask) * d targets = tf.concat([tf.reverse(latents_dense, [1]), targets], axis=1) @@ -516,19 +667,17 @@ def bn_inputs(): res = res[:, common_layers.shape_list(latents_dense)[1]:, :, :] if hparams.do_mask and hparams.do_refine: def refine_res(): - return residual_conv(res, 1, (5, 1), hparams, "refine") + # return residual_conv(res, 1, (5, 1), hparams, "refine") + r, _ = encode(tf.squeeze(res, axis=[2]), + target_space, hparams, "refine_enc") + return tf.expand_dims(r, axis=2) masked_batches = tf.reduce_sum(mask, axis=[1, 2, 3]) all_masked = tf.less(masked_batches, 0.1) res = tf.where(all_masked, refine_res(), res) - # We'll start training only the extra model of latents after 400K steps. - # Before we train only this, we decrease lr for other weights. - latent_time = tf.less(300000, tf.to_int32(tf.train.get_global_step())) - decreased_lr = common_layers.inverse_lin_decay(400000) + # We'll start training the extra model of latents after mask_startup_steps. + latent_time = tf.less(hparams.mask_startup_steps, + tf.to_int32(tf.train.get_global_step())) losses["latent_pred"] *= tf.to_float(latent_time) - losses["extra"] *= 1.0 - tf.to_float(latent_time) - decreased_lr_res = tf.stop_gradient(decreased_lr * res) - decreased_lr_res += (1.0 - decreased_lr) * res - res = tf.cond(latent_time, lambda: decreased_lr_res, lambda: res) return res, losses, cache @@ -545,15 +694,47 @@ def __init__(self, *args, **kwargs): self.ema_count = None self.ema_means = None if self._hparams.bottleneck_kind == "vq-vae": + # Check that num_blocks exactly divides hidden_size and v_size + assert self._hparams.hidden_size % self._hparams.num_blocks == 0 + assert self._hparams.v_size % self._hparams.num_blocks == 0 + + self._hparams.block_dim = int( + self._hparams.hidden_size // self._hparams.num_blocks) + self._hparams.block_v_size = 2**( + math.log(self._hparams.v_size, 2) / self._hparams.num_blocks) + self._hparams.block_v_size = int(self._hparams.block_v_size) + + if self._hparams.reshape_method == "project": + tf.logging.info("Using random projections for hierarchical vq-vae") + tf.logging.info("Trainable projections = {}".format( + self._hparams.trainable_projections)) + self._hparams.projection_tensors = tf.get_variable( + name="projection", + shape=[ + self._hparams.num_blocks, self._hparams.hidden_size, + self._hparams.block_dim + ], + initializer=tf.random_normal_initializer(), + trainable=self._hparams.trainable_projections) + self._hparams.reshape_fn = project_hidden + elif self._hparams.reshape_method == "slice": + tf.logging.info("Using slices for hierarchical vq-vae") + self._hparams.reshape_fn = slice_hidden + else: + raise ValueError("Unknown reshape method") + self.means = tf.get_variable( name="means", - shape=[self._hparams.v_size, self._hparams.hidden_size], - initializer=tf.random_normal_initializer()) + shape=[ + self._hparams.num_blocks, self._hparams.block_v_size, + self._hparams.block_dim + ], + initializer=tf.uniform_unit_scaling_initializer()) # Create the shadow variables if we are using EMA if self._hparams.ema: self.ema_count = tf.get_variable( - "ema_count", [self._hparams.v_size], + "ema_count", [self._hparams.num_blocks, self._hparams.block_v_size], initializer=tf.constant_initializer(0)) with tf.colocate_with(self.means): self.ema_means = tf.get_variable( @@ -602,7 +783,7 @@ def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, """Produce predictions from the model.""" if not self._hparams.do_mask: return super(TransformerAE, self).infer( - features, decode_length, beam_size, top_beams, alpha) + features, decode_length, beam_size, top_beams, alpha)["outputs"] if not features: features = {} inputs_old = None @@ -649,20 +830,30 @@ def transformer_ae_small(): hparams.hidden_size = 384 hparams.filter_size = 2048 hparams.label_smoothing = 0.0 + hparams.optimizer = "Adafactor" hparams.add_hparam("z_size", 16) hparams.add_hparam("noise_dev", 0.0) hparams.add_hparam("d_mix", 0.5) # Bottleneck kinds supported: dense, vae, semhash, gumbel-softmax, vq-vae. hparams.add_hparam("bottleneck_kind", "semhash") + hparams.add_hparam("num_blocks", 1) + # Reshape method for hierarchical vq-vae: slice, project + hparams.add_hparam("reshape_method", "slice") + hparams.add_hparam("trainable_projections", False) + hparams.add_hparam("unmasked_percentage", 0.3) hparams.add_hparam("do_ae", True) hparams.add_hparam("do_mask", True) hparams.add_hparam("do_refine", False) + hparams.add_hparam("do_attend_compress", False) + hparams.add_hparam("do_attend_decompress", True) + hparams.add_hparam("do_residual_compress", False) hparams.add_hparam("drop_inputs", False) hparams.add_hparam("v_size", 1024*64) hparams.add_hparam("max_context_length", 64) hparams.add_hparam("num_compress_steps", 3) hparams.add_hparam("kl_steps", 35000) hparams.add_hparam("startup_steps", 10000) + hparams.add_hparam("mask_startup_steps", 50000) hparams.add_hparam("kmeans_lr_factor", 0.002) hparams.add_hparam("z_dropout", 0.1) hparams.add_hparam("is_2d", 0) diff --git a/tensor2tensor/models/vanilla_gan.py b/tensor2tensor/models/vanilla_gan.py index a6196c491..459753b95 100644 --- a/tensor2tensor/models/vanilla_gan.py +++ b/tensor2tensor/models/vanilla_gan.py @@ -89,6 +89,8 @@ def vanilla_gan_internal(inputs, hparams, train): losses = {} losses["discriminator"] = d_loss losses["generator"] = g_loss + # Include a dummy training loss to skip self.top and self.loss + losses["training"] = tf.constant(0., dtype=tf.float32) z_sampled = tf.random_uniform(shape=[1, hparams.random_sample_size], minval=-1, maxval=1, name="z") @@ -146,9 +148,6 @@ def vanilla_gan(): hparams = common_hparams.basic_params1() - hparams.input_modalities = "inputs:image:zero_loss" - hparams.target_modality = "image:zero_loss" - hparams.batch_size = 32 hparams.label_smoothing = 0.0 hparams.add_hparam("startup_steps", 10000) diff --git a/tensor2tensor/models/xception.py b/tensor2tensor/models/xception.py index 9e2174161..401694580 100644 --- a/tensor2tensor/models/xception.py +++ b/tensor2tensor/models/xception.py @@ -95,9 +95,7 @@ def xnet_resblock(x, filters, res_relu, name): force2d=True, name="res_conv0") - inputs = common_layers.standardize_images(inputs) - # TODO(lukaszkaiser): summaries here don't work in multi-problem case yet. - # tf.summary.image("inputs", inputs, max_outputs=2) + tf.summary.image("inputs", inputs, max_outputs=2) x = common_layers.conv_block( inputs, 32, [((1, 1), (3, 3))], @@ -155,7 +153,7 @@ def xception_base(): hparams.num_hidden_layers = 8 hparams.kernel_height = 3 hparams.kernel_width = 3 - hparams.learning_rate_decay_scheme = "exp50k" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 @@ -181,9 +179,7 @@ def xception_tiny(): @registry.register_hparams def xception_tiny_tpu(): hparams = xception_base() - hparams.tpu_batch_size_per_shard = 2 - # The base exp50k scheme uses a cond which fails to compile on TPU - hparams.learning_rate_decay_scheme = "noam" + hparams.batch_size = 2 hparams.num_hidden_layers = 2 hparams.hidden_size = 128 hparams.optimizer = "TrueAdam" diff --git a/tensor2tensor/notebooks/hello_t2t.ipynb b/tensor2tensor/notebooks/hello_t2t.ipynb index 27ce78bb5..cc9f66a02 100644 --- a/tensor2tensor/notebooks/hello_t2t.ipynb +++ b/tensor2tensor/notebooks/hello_t2t.ipynb @@ -60,8 +60,7 @@ }, "source": [ "# Install deps\n", - "# We're using some new features from tensorflow so we install 1.5.0rc0\n", - "!pip install -q 'tensor2tensor==1.4.1' 'tensorflow==1.5.0rc0'" + "!pip install -q tensor2tensor" ], "cell_type": "code", "execution_count": 0, @@ -190,7 +189,7 @@ " 'audio_timit_characters_tune',\n", " 'audio_timit_tokens8k_test',\n", " 'audio_timit_tokens8k_tune',\n", - " 'image_celeba_tune',\n", + " 'image_celeba',\n", " 'image_cifar10',\n", " 'image_cifar10_plain',\n", " 'image_cifar10_plain8',\n", @@ -699,7 +698,7 @@ "def translate(inputs):\n", " encoded_inputs = encode(inputs)\n", " with tfe.restore_variables_on_create(ckpt_path):\n", - " model_output = translate_model.infer(encoded_inputs)\n", + " model_output = translate_model.infer(encoded_inputs)[\"outputs\"]\n", " return decode(model_output)\n", "\n", "inputs = \"The animal didn't cross the street because it was too tired\"\n", diff --git a/tensor2tensor/serving/README.md b/tensor2tensor/serving/README.md index aadffa9f8..2081553cc 100644 --- a/tensor2tensor/serving/README.md +++ b/tensor2tensor/serving/README.md @@ -3,8 +3,7 @@ Tensor2Tensor and the TensorFlow ecosystem make it easy to serve a model once trained. -**Note**: The following requires recent features in TensorFlow as so if you get -import errors or the like, try installing `tensorflow==1.5.0rc0`. +**Note**: Requires TF 1.5+. ## 1. Export for Serving diff --git a/tensor2tensor/test_data/transformer_test_ckpt/flags.txt b/tensor2tensor/test_data/transformer_test_ckpt/flags.txt index 26988922c..2587e3e2d 100644 --- a/tensor2tensor/test_data/transformer_test_ckpt/flags.txt +++ b/tensor2tensor/test_data/transformer_test_ckpt/flags.txt @@ -1,7 +1,7 @@ --eval_steps=1 --hparams_range= --t2t_usr_dir= ---experimental_optimize_placement=False +--enable_graph_rewriter=False --sync=False --eval_run_autoregressive=False --eval_use_test_set=False @@ -46,4 +46,4 @@ --locally_shard_to_cpu=False --worker_job=/job:localhost --model=transformer ---parsing_path= \ No newline at end of file +--parsing_path= diff --git a/tensor2tensor/test_data/transformer_test_ckpt/hparams.json b/tensor2tensor/test_data/transformer_test_ckpt/hparams.json index 196d736ba..b07ac9486 100644 --- a/tensor2tensor/test_data/transformer_test_ckpt/hparams.json +++ b/tensor2tensor/test_data/transformer_test_ckpt/hparams.json @@ -1 +1 @@ -{"daisy_chain_variables": true, "optimizer_adam_beta1": 0.9, "scheduled_sampling_prob": 0.0, "num_hidden_layers": 2, "moe_loss_coef": 0.01, "max_target_seq_length": 0, "clip_grad_norm": 0.0, "pos": "timing", "scheduled_sampling_gold_mixin_prob": 0.5, "initializer": "uniform_unit_scaling", "grad_noise_scale": 0.0, "optimizer_momentum_momentum": 0.9, "nbr_decoder_problems": 1, "attention_key_channels": 0, "tpu_batch_size_per_shard": 24, "eval_drop_long_sequences": false, "learning_rate_cosine_cycle_steps": 250000, "prepend_mode": "none", "weight_decay": 0.0, "symbol_modality_skip_top": false, "weight_noise": 0.0, "target_modality": "default", "use_tpu": false, "attention_dropout": 0.1, "parameter_attention_value_channels": 0, "factored_logits": false, "relu_dropout": 0.1, "no_data_parallelism": false, "layer_preprocess_sequence": "n", "sampling_method": "argmax", "learning_rate": 0.2, "num_heads": 2, "max_length": 256, "summarize_grads": false, "attention_value_channels": 0, "num_encoder_layers": 0, "label_smoothing": 0.1, "use_fixed_batch_size": false, "optimizer": "Adam", "moe_k": 2, "self_attention_type": "dot_product", "learning_rate_decay_scheme": "noam", "sampling_temp": 1.0, "kernel_height": 3, "use_pad_remover": true, "batch_size": 4096, "problem_choice": "adaptive", "max_relative_position": 0, "force_full_predict": false, "min_length_bucket": 8, "layer_prepostprocess_dropout": 0.1, "eval_run_autoregressive": false, "shared_embedding_and_softmax_weights": true, "symbol_modality_num_shards": 16, "dropout": 0.2, "compress_steps": 0, "parameter_attention_key_channels": 0, "length_bucket_step": 1.1, "kernel_width": 1, "hidden_size": 16, "num_decoder_layers": 0, "input_modalities": "default", "filter_size": 8, "optimizer_adam_beta2": 0.98, "scheduled_sampling_warmup_steps": 50000, "norm_type": "layer", "min_length": 0, "moe_num_experts": 64, "multiply_embedding_mode": "sqrt_depth", "max_input_seq_length": 0, "learning_rate_warmup_steps": 8000, "proximity_bias": false, "ffn_layer": "dense_relu_dense", "initializer_gain": 1.0, "layer_postprocess_sequence": "da", "moe_hidden_sizes": "2048", "optimizer_adam_epsilon": 1e-09, "norm_epsilon": 1e-06} \ No newline at end of file +{"daisy_chain_variables": true, "optimizer_adam_beta1": 0.9, "scheduled_sampling_prob": 0.0, "num_hidden_layers": 2, "moe_loss_coef": 0.01, "max_target_seq_length": 0, "clip_grad_norm": 0.0, "pos": "timing", "scheduled_sampling_gold_mixin_prob": 0.5, "initializer": "uniform_unit_scaling", "grad_noise_scale": 0.0, "optimizer_momentum_momentum": 0.9, "nbr_decoder_problems": 1, "attention_key_channels": 0, "eval_drop_long_sequences": false, "learning_rate_cosine_cycle_steps": 250000, "prepend_mode": "none", "weight_decay": 0.0, "symbol_modality_skip_top": false, "weight_noise": 0.0, "target_modality": "default", "attention_dropout": 0.1, "parameter_attention_value_channels": 0, "factored_logits": false, "relu_dropout": 0.1, "no_data_parallelism": false, "layer_preprocess_sequence": "n", "sampling_method": "argmax", "learning_rate": 0.2, "num_heads": 2, "max_length": 256, "summarize_grads": false, "attention_value_channels": 0, "num_encoder_layers": 0, "label_smoothing": 0.1, "use_fixed_batch_size": false, "optimizer": "Adam", "moe_k": 2, "self_attention_type": "dot_product", "learning_rate_decay_scheme": "noam", "sampling_temp": 1.0, "kernel_height": 3, "use_pad_remover": true, "batch_size": 4096, "problem_choice": "adaptive", "max_relative_position": 0, "force_full_predict": false, "min_length_bucket": 8, "layer_prepostprocess_dropout": 0.1, "eval_run_autoregressive": false, "shared_embedding_and_softmax_weights": true, "symbol_modality_num_shards": 16, "dropout": 0.2, "compress_steps": 0, "parameter_attention_key_channels": 0, "length_bucket_step": 1.1, "kernel_width": 1, "hidden_size": 16, "num_decoder_layers": 0, "input_modalities": "default", "filter_size": 8, "optimizer_adam_beta2": 0.98, "scheduled_sampling_warmup_steps": 50000, "norm_type": "layer", "min_length": 0, "moe_num_experts": 64, "multiply_embedding_mode": "sqrt_depth", "max_input_seq_length": 0, "learning_rate_warmup_steps": 8000, "proximity_bias": false, "ffn_layer": "dense_relu_dense", "initializer_gain": 1.0, "layer_postprocess_sequence": "da", "moe_hidden_sizes": "2048", "optimizer_adam_epsilon": 1e-09, "norm_epsilon": 1e-06} diff --git a/tensor2tensor/test_data/vocab.ende.32768 b/tensor2tensor/test_data/vocab.ende.32768 new file mode 100644 index 000000000..8036581fe --- /dev/null +++ b/tensor2tensor/test_data/vocab.ende.32768 @@ -0,0 +1,33708 @@ +'' +'' +', _' +'._' +'the_' +'_' +'in_' +'of_' +'and_' +'to_' +'die_' +'der_' +'und_' +'a_' +'s_' +'-_' +'is_' +'that_' +'zu_' +'for_' +'den_' +'von_' +'n_' +'on_' +'ist_' +'an_' +'für_' +'. _' +'be_' +'The_' +'with_' +'en_' +'es_' +'are_' +'das_' +'as_' +'e_' +'des_' +'auf_' +'mit_' +'it_' +'eine_' +'dass_' +'nicht_' +'I_' +'im_' +'not_' +'have_' +'by_' +'this_' +' (_' +' – _' +'sich_' +'was_' +'ein_' +'werden_' +'Die_' +'will_' +'from_' +'we_' +'dem_' +'’_' +'t_' +': _' +'at_' +'or_' +'Sie_' +'which_' +'has_' +'er_' +'als_' +'auch_' +'you_' +'wir_' +'r_' +'In_' +'um_' +'sind_' +'wird_' +') _' +'so_' +'can_' +'sie_' +'ing_' +'all_' +''_' +' - _' +'einer_' +'hat_' +'wie_' +'also_' +'their_' +'European_' +'haben_' +'d_' +'would_' +'ed_' +'oder_' +'its_' +'more_' +'über_' +'but_' +'?_' +'einen_' +'ich_' +'y_' +'zur_' +'our_' +'they_' +'aus_' +'bei_' +'Das_' +'one_' +'been_' +'; _' +'nur_' +'Union_' +'should_' +'It_' +'EU_' +'einem_' +'/_' +'nach_' +'durch_' +'This_' +'können_' +'diese_' +'ung_' +'other_' +'zum_' +'noch_' +'only_' +'there_' +' , _' +'do_' +'am_' +'de_' +'countries_' +'1_' +'kann_' +'dieser_' +'war_' +'than_' +'We_' +'new_' +'o_' +'your_' +'Europe_' +'Der_' +'must_' +'Mr_' +'no_' +'vor_' +'were_' +'2_' +'like_' +'wenn_' +'man_' +'US_' +'Ich_' +'wurde_' +'- _' +'about_' +' "_' +'us_' +'President_' +'m_' +'time_' +'Es_' +'these_' +'if_' +'aber_' +'te_' +'sein_' +'who_' +'up_' +'very_' +'Hotel_' +'world_' +' ._' +'uns_' +'Commission_' +'when_' +'such_' +'A_' +'But_' +'Wir_' +'people_' +'müssen_' +' “_' +'into_' +'ten_' +'ng_' +'China_' +'out_' +'3_' +'mehr_' +'ihre_' +'his_' +'5_' +'now_' +'most_' +'some_' +'what_' +'sehr_' +'Kommission_' +'many_' +'!_' +'i_' +')._' +'l_' +'he_' +'any_' +'% _' +'had_' +' „_' +'States_' +'them_' +',_' +'eines_' +'4_' +'well_' +'Herr_' +'), _' +'" _' +'economic_' +'diesem_' +'need_' +'unter_' +'years_' +'political_' +'between_' +'ly_' +'zwischen_' +'first_' +'hotel_' +'alle_' +'even_' +'policy_' +'make_' +'bis_' +'two_' +'muss_' +'could_' +'over_' +'anderen_' +'use_' +'Parliament_' +'keine_' +'my_' +'work_' +'may_' +'way_' +'important_' +'Council_' +'gegen_' +'report_' +'Präsident_' +'0_' +'system_' +'Europäischen_' +'Europa_' +'gibt_' +'because_' +'If_' +'those_' +'just_' +'support_' +'vom_' +'seine_' +'sowie_' +'k_' +'country_' +'year_' +'much_' +'Wenn_' +'dieses_' +'after_' +'government_' +'Member_' +'al_' +'made_' +' _' +'ungen_' +'able_' +'take_' +'h_' +'möchte_' +'market_' +'being_' +'immer_' +'“ _' +'” _' +'public_' +'own_' +'long_' +'' _' +'Welt_' +'dies_' +'sondern_' +'Zeit_' +'Menschen_' +'Jahren_' +'international_' +'(_' +'where_' +'right_' +'good_' +'financial_' +'how_' +'ihrer_' +'da_' +'diesen_' +'USA_' +'wurden_' +'andere_' +'For_' +':_' +'g_' +'As_' +'Diese_' +'Jahr_' +'both_' +'information_' +'against_' +'Länder_' +'part_' +'same_' +'last_' +'Bericht_' +'unsere_' +'global_' +'dann_' +'z_' +'through_' +'würde_' +'re_' +'6_' +'then_' +'sollte_' +'There_' +'jedoch_' +'hier_' +'high_' +'does_' +'end_' +'damit_' +'Im_' +'10_' +'seiner_' +'heute_' +'S_' +'United_' +'Und_' +'under_' +'social_' +'too_' +'ion_' +'7_' +'national_' +'order_' +'growth_' +'8_' +'example_' +'still_' +'see_' +'me_' +'le_' +'neue_' +'ation_' +'ohne_' +'free_' +'europäischen_' +'Parlament_' +'Land_' +'number_' +'That_' +'Mitgliedstaaten_' +'rights_' +'place_' +'könnte_' +'development_' +'area_' +'And_' +'within_' +'power_' +'her_' +'course_' +'room_' +'point_' +'fact_' +'before_' +'bereits_' +'used_' +'Frage_' +'’ _' +'neuen_' +'while_' +'denen_' +'far_' +'possible_' +'Entwicklung_' +'Ein_' +'20_' +'selbst_' +'wieder_' +'economy_' +' [[_' +'want_' +'la_' +'future_' +'sollten_' +'You_' +'zwei_' +'dazu_' +'Europäische_' +'say_' +'Regierung_' +'great_' +'already_' +' | _' +'without_' +'c_' +'set_' +'Aber_' +'less_' +'9_' +'large_' +'während_' +'human_' +'here_' +'! _' +'weil_' +'today_' +'jetzt_' +'ihren_' +'30_' +'ihr_' +'So_' +'view_' +'se_' +'machen_' +'wäre_' +'therefore_' +'cannot_' +'believe_' +'problem_' +'liegt_' +'wo_' +'since_' +'go_' +'Lage_' +' '_' +'u_' +'crisis_' +'three_' +'state_' +'per_' +'find_' +'few_' +'down_' +'America_' +'et_' +'Ländern_' +'viele_' +'services_' +'000_' +'..._' +'st_' +'process_' +'issue_' +'help_' +'Unternehmen_' +'trade_' +'including_' +'available_' +'15_' +'level_' +'case_' +'Maßnahmen_' +'viel_' +'know_' +'geht_' +'einige_' +'Eine_' +'means_' +'darauf_' +'denn_' +'dafür_' +'00_' +'next_' +'mich_' +'different_' +'Jahre_' +'seit_' +'city_' +'change_' +'areas_' +'rs_' +'real_' +'get_' +'problems_' +'When_' +'Staaten_' +'health_' +'el_' +'Politik_' +'E_' +'C_' +'   _' +'making_' +'best_' +'Mit_' +'Ihnen_' +'however_' +'clear_' +'better_' +'allem_' +'politischen_' +'lassen_' +'finden_' +'x_' +'why_' +'habe_' +'gut_' +'Frau_' +'ers_' +'Dies_' +'B_' +'ne_' +'D_' +'energy_' +'during_' +'become_' +'allen_' +'They_' +'service_' +'access_' +'waren_' +'ganz_' +'Japan_' +'2009_' +'unserer_' +'etwas_' +'daß_' +'Committee_' +'current_' +'wollen_' +'question_' +'doch_' +'stellen_' +'politische_' +'com_' +'back_' +'To_' +'yang_' +'particular_' +'small_' +'ob_' +'did_' +'day_' +'Rat_' +'think_' +'These_' +'Israel_' +'based_' +'bar_' +'ve_' +'interest_' +'debate_' +'common_' +'beim_' +'Doch_' +'Commissioner_' +'letzten_' +'each_' +'again_' +'bietet_' +'Germany_' +'At_' +'security_' +'nun_' +'measures_' +'business_' +'Zimmer_' +'situation_' +'schon_' +'put_' +'offers_' +'p_' +'markets_' +'Wie_' +'ts_' +'tion_' +'taken_' +'seinen_' +'military_' +'major_' +'Ihre_' +'ge_' +'come_' +'another_' +'Bereich_' +'Arbeit_' +'stay_' +'Iran_' +'rate_' +'provide_' +'Ende_' +'tun_' +'result_' +'rather_' +'law_' +'continue_' +'citizens_' +'ce_' +'50_' +'said_' +'action_' +'page_' +'might_' +'House_' +'term_' +'American_' +'2008_' +'worden_' +'recent_' +'given_' +'Unterstützung_' +'Internet_' +'Euro_' +'whether_' +'ies_' +'every_' +'Probleme_' +'|_' +'ment_' +'give_' +'einfach_' +'2000_' +'steht_' +'rooms_' +'governments_' +'ersten_' +'debt_' +'called_' +'Teil_' +'Auch_' +'besteht_' +'Problem_' +'sten_' +'proposal_' +'needs_' +'gen_' +'budget_' +'Bank_' +'12_' +'private_' +'ns_' +'Herrn_' +'weniger_' +'lich_' +'institutions_' +'full_' +'erhalten_' +'On_' +'New_' +'Fall_' +'11_' +'non_' +'P_' +'vote_' +'terms_' +'os_' +'issues_' +'f_' +'data_' +'away_' +'Nach_' +'sehen_' +'little_' +'line_' +'least_' +'further_' +'around_' +'always_' +'Ziel_' +'Sicherheit_' +'Als_' +'que_' +'million_' +'großen_' +'du_' +'ch_' +'France_' +'ter_' +'open_' +'eigenen_' +'Russia_' +'Rolle_' +'systems_' +'life_' +'insbesondere_' +'hatte_' +'group_' +'geben_' +'close_' +'World_' +'sagen_' +'hope_' +'With_' +'Art_' +'All_' +'zwar_' +'using_' +'quality_' +'policies_' +'main_' +'legal_' +'ern_' +'deren_' +'ons_' +'off_' +'located_' +'ab_' +'System_' +'Bürger_' +'money_' +'mir_' +'kommen_' +'bin_' +'age_' +'What_' +'Namen_' +'risk_' +'investment_' +'foreign_' +'drei_' +'ble_' +'allerdings_' +'Zusammenarbeit_' +'Wirtschaft_' +'He_' +'Bedeutung_' +'2005_' +'role_' +'position_' +'große_' +']] _' +'Was_' +'Informationen_' +'würden_' +'women_' +'möglich_' +'form_' +'certain_' +'T_' +'2006_' +'etwa_' +'einmal_' +'done_' +'German_' +'weiter_' +'sector_' +'agreement_' +'2007_' +'rules_' +'increase_' +'ihnen_' +'dan_' +'companies_' +'basis_' +'b_' +'unseren_' +'particularly_' +'members_' +'local_' +'führen_' +'di_' +'create_' +'century_' +'öffentlichen_' +'pro_' +'old_' +'necessary_' +'erreichen_' +'Er_' +'Deutschland_' +'Bei_' +'.”_' +'start_' +'second_' +'once_' +'davon_' +'ago_' +'O_' +'yet_' +'together_' +'ry_' +'going_' +'soll_' +'others_' +'könnten_' +'dort_' +'staff_' +'\u_' +'Für_' +' & _' +'stehen_' +'needed_' +'location_' +'ensure_' +'dabei_' +'conditions_' +'Millionen_' +'Community_' +'Auf_' +'2001_' +'weitere_' +'though_' +'rn_' +'often_' +'key_' +'ins_' +'alles_' +'short_' +'price_' +'meisten_' +'him_' +'fully_' +'especially_' +'September_' +'Fragen_' +'rates_' +'control_' +'central_' +'Weise_' +'Weg_' +'One_' +'low_' +'kein_' +'itself_' +'greater_' +'gegenüber_' +'capital_' +'Rahmen_' +'nt_' +'modern_' +'decision_' +'aller_' +' _' +'sogar_' +'cooperation_' +'Recht_' +'However_' +'innerhalb_' +'costs_' +'Vorschlag_' +'reform_' +'led_' +'food_' +'Zukunft_' +'Wachstum_' +'Stadt_' +'Indeed_' +'2004_' +' $_' +'wissen_' +'third_' +'region_' +'account_' +' ..._' +'leaders_' +'among_' +'Seite_' +'M_' +'Beispiel_' +'whole_' +'tax_' +'protection_' +'present_' +'ic_' +'early_' +'aid_' +'Um_' +'Europas_' +'100_' +'vielen_' +'schen_' +'name_' +'meine_' +'ischen_' +'income_' +'following_' +'excellent_' +'darüber_' +'children_' +'banks_' +'25_' +'welche_' +'several_' +'reason_' +'nen_' +'having_' +'UN_' +'Seiten_' +'weit_' +'progress_' +'centre_' +'Chinese_' +'wirklich_' +'really_' +'möchten_' +'macht_' +'kommt_' +'ja_' +'europäische_' +'value_' +'resources_' +'environment_' +'bringen_' +'bieten_' +'Region_' +'Möglichkeit_' +'16_' +'water_' +'something_' +'left_' +'internationalen_' +'home_' +'democratic_' +'Zusammenhang_' +'Grund_' +'Frauen_' +'2003_' +'sowohl_' +'include_' +'democracy_' +'daher_' +'weiterhin_' +'stellt_' +'states_' +'production_' +'kan_' +'fast_' +'Bush_' +'wirtschaftlichen_' +'single_' +'shall_' +'nden_' +'longer_' +'efforts_' +'Mrs_' +'India_' +'working_' +'until_' +'ty_' +'poor_' +'matter_' +'land_' +'force_' +'chen_' +'French_' +'society_' +'indem_' +'2002_' +'unterstützen_' +'regard_' +'offer_' +'nd_' +'nationalen_' +'list_' +'likely_' +'halten_' +'No_' +'Group_' +'Central_' +'period_' +'never_' +'meiner_' +'gentlemen_' +'entfernt_' +'difficult_' +'beiden_' +'Kinder_' +'18_' +'un_' +'prices_' +'look_' +'fiscal_' +'besonders_' +'ar_' +'approach_' +'Parlaments_' +'Mittel_' +'views_' +'verschiedenen_' +'standards_' +'results_' +'respect_' +'resolution_' +'research_' +'les_' +'industry_' +'developing_' +'cost_' +'L_' +'Iraq_' +'International_' +'G_' +'Dollar_' +' % _' +'wichtig_' +'special_' +'member_' +'hand_' +'created_' +'U_' +'N_' +'Investitionen_' +'40_' +'wirtschaftliche_' +'show_' +'nehmen_' +'management_' +'interests_' +'enough_' +'breakfast_' +'State_' +'24_' +'wenig_' +'proposals_' +'parties_' +'nichts_' +'ihm_' +'experience_' +'etc_' +'almost_' +'Vereinigten_' +'R_' +'Obama_' +'Leben_' +' ' +'transport_' +'taking_' +'remain_' +'programme_' +'play_' +'near_' +'general_' +'family_' +'erste_' +'aufgrund_' +'address_' +'Ziele_' +'Fraktion_' +'Daten_' +'side_' +'ren_' +'products_' +'men_' +'ihn_' +'history_' +'hinaus_' +'easy_' +'billion_' +'Service_' +'Richtlinie_' +'Krise_' +'Grundlage_' +'thus_' +'things_' +'site_' +'sei_' +'questions_' +'ous_' +'nuclear_' +'known_' +'ierung_' +'feel_' +'call_' +'building_' +'Thema_' +'Russland_' +'Regierungen_' +'Kollegen_' +'towards_' +'seinem_' +'ner_' +'iert_' +'ieren_' +'hin_' +'dessen_' +'befindet_' +'Verfügung_' +'Landes_' +'An_' +'wish_' +'technology_' +'rt_' +'proposed_' +'natürlich_' +'keinen_' +'demand_' +'darin_' +'Lösung_' +'Kommissar_' +'F_' +'14_' +'workers_' +'themselves_' +'solution_' +'projects_' +'pay_' +'months_' +'ity_' +'Programm_' +'Macht_' +'version_' +'regional_' +'program_' +'past_' +'ness_' +'lichen_' +'ische_' +'hard_' +'ever_' +'eren_' +'ask_' +'adopted_' +'Gemeinschaft_' +'East_' +'By_' +'60_' +'strategy_' +'points_' +'personal_' +'lead_' +'ive_' +'ihrem_' +'ia_' +'higher_' +'gute_' +'directive_' +'cultural_' +'beispielsweise_' +'agree_' +'Man_' +'welcome_' +'ted_' +'software_' +'sicher_' +'serious_' +'sche_' +'liche_' +'levels_' +'gilt_' +'gehen_' +'found_' +'either_' +'effective_' +'education_' +'above_' +'Situation_' +'After_' +'19_' +'vielleicht_' +'various_' +'specific_' +'schnell_' +'schaffen_' +'related_' +'freedom_' +'deal_' +'che_' +'besser_' +'bedeutet_' +'South_' +'Institutionen_' +'21_' +'rapporteur_' +'ls_' +'enjoy_' +'economies_' +'ebenfalls_' +'direkt_' +'bleiben_' +'big_' +'authorities_' +'allow_' +'November_' +'Meinung_' +'Even_' +'Ansicht_' +'. _' +'à_' +'user_' +'seen_' +'remains_' +'reforms_' +'ra_' +'provided_' +'plan_' +'opportunity_' +'ladies_' +'keit_' +'impact_' +'groups_' +'framework_' +'ens_' +'comes_' +'Schutz_' +'th_' +'strong_' +'simply_' +'significant_' +'quite_' +'let_' +'leading_' +'language_' +'em_' +'concerns_' +'climate_' +'behalf_' +'X_' +'Restaurant_' +'Kosten_' +'Dieser_' +'Da_' +'Africa_' +'town_' +'ting_' +'party_' +'pages_' +'makes_' +'globalen_' +'due_' +'Our_' +'Hilfe_' +'Demokratie_' +'2010_' +'upon_' +'seems_' +'relations_' +'peace_' +'online_' +'oil_' +'forward_' +'effect_' +'W_' +'Treaty_' +'Of_' +'Menschenrechte_' +'Form_' +'zurück_' +'true_' +'total_' +'subject_' +'safety_' +'later_' +'jeder_' +'growing_' +'face_' +'appropriate_' +'amendments_' +'West_' +'Minister_' +'Geschichte_' +'Alle_' +';_' +'young_' +'top_' +'solche_' +'she_' +'recently_' +'kind_' +'internationale_' +'individual_' +'ig_' +'euro_' +'environmental_' +'ebenso_' +'daran_' +'concerned_' +'Ukraine_' +'Strategie_' +'Madam_' +'Gesellschaft_' +'GDP_' +'Dieses_' +'w_' +'spending_' +'share_' +'positive_' +'opinion_' +'ma_' +'light_' +'führt_' +'eurozone_' +'ert_' +'community_' +'care_' +'attention_' +'Some_' +'Rates_' +'Milliarden_' +'EUR_' +'sure_' +'success_' +'son_' +'sollen_' +'sen_' +'outside_' +'km_' +'improve_' +'huge_' +'half_' +'genau_' +'funds_' +'four_' +'content_' +'changes_' +'addition_' +'Western_' +'é_' +'words_' +'traditional_' +'text_' +'station_' +'setzen_' +'rise_' +'nächsten_' +'nutzen_' +'nothing_' +'ions_' +'idea_' +'gab_' +'fundamental_' +'bekannt_' +'V_' +'T' +'Krieg_' +'Italy_' +'Beziehungen_' +'stand_' +'legislation_' +'ging_' +'field_' +'currently_' +'art_' +'Turkey_' +'Paris_' +'Mitglieder_' +'London_' +'Ihr_' +'Hotels_' +'Geld_' +'Frankreich_' +'Bevölkerung_' +'v_' +'times_' +'thank_' +'sense_' +'restaurant_' +'required_' +'population_' +'person_' +'house_' +'held_' +'heart_' +'gemacht_' +'fall_' +'developed_' +'deutlich_' +'bzw_' +'amerikanischen_' +'Während_' +'Members_' +'British_' +'"_' +' . _' +'ur_' +'minutes_' +'living_' +'inflation_' +'ie_' +'forces_' +'facilities_' +'arbeiten_' +'actually_' +'Zugang_' +'UK_' +'Moreover_' +'M' +'Dienstleistungen_' +'Bedingungen_' +'Auswirkungen_' +'Aus_' +'"._' +'sea_' +'run_' +'product_' +'principle_' +'nämlich_' +'model_' +'majority_' +'ll_' +'jobs_' +'hätte_' +'hours_' +'gesagt_' +'erreicht_' +'date_' +'certainly_' +'alten_' +'across_' +'statt_' +'sch_' +'reduce_' +'potential_' +'los_' +'indeed_' +'hatten_' +'game_' +'exchange_' +'employment_' +'einigen_' +'deshalb_' +'days_' +'cases_' +'benefits_' +'Windows_' +'Schritt_' +'National_' +'Markt_' +'Greece_' +'Bezug_' +'1999_' +')_' +'wichtige_' +'whose_' +'via_' +'unterstützt_' +'unserem_' +'tes_' +'stability_' +'similar_' +'range_' +'keyword_' +'igen_' +'directly_' +'consider_' +'company_' +'beautiful_' +'along_' +'air_' +'Umsetzung_' +'Interesse_' +'Erfolg_' +'Entscheidung_' +'Asia_' +',” _' +'si_' +'self_' +'regulation_' +'pool_' +'parts_' +'natural_' +'media_' +'gerade_' +'five_' +'enden_' +'elections_' +'ca_' +'application_' +'anti_' +'TV_' +'Office_' +'Liste_' +'Ihrer_' +'Haus_' +'Berlin_' +'understand_' +'lot_' +'lediglich_' +'job_' +'ende_' +'emerging_' +'War_' +'Tatsache_' +'Personen_' +'April_' +' : _' +'size_' +'prevent_' +'opportunities_' +'nde_' +'na_' +'mean_' +'lange_' +'involved_' +'cy_' +'conflict_' +'co_' +'capacity_' +'bring_' +'bleibt_' +'bed_' +'achieve_' +'While_' +'Tag_' +'Nähe_' +'January_' +'Ergebnis_' +'Ebene_' +'Debatte_' +'&_' +'wide_' +'thing_' +'sprechen_' +'rule_' +'regions_' +'project_' +'meeting_' +'handelt_' +'eher_' +'complete_' +'body_' +'Deshalb_' +'13_' +'violence_' +'verwendet_' +'verhindern_' +'table_' +'ss_' +'response_' +'politik_' +'je_' +'heißt_' +'ger_' +'focus_' +'dürfen_' +'competition_' +'clearly_' +'check_' +'car_' +'ben_' +'ant_' +'add_' +'Verantwortung_' +'Präsidenten_' +'La_' +'Jahres_' +'Interessen_' +'Government_' +'Afghanistan_' +'17_' +' ( _' +'unser_' +'scheint_' +'move_' +'lässt_' +'jeden_' +'increasingly_' +'image_' +'großer_' +'gleichzeitig_' +'gemeinsame_' +'features_' +'existing_' +'everything_' +'event_' +'ent_' +'ds_' +'computer_' +'clean_' +'civil_' +'bereit_' +'amount_' +'administration_' +'Park_' +'Irak_' +'Gewalt_' +'ways_' +'ste_' +'sozialen_' +'provides_' +'popular_' +'obwohl_' +'nie_' +'negotiations_' +'nature_' +'fight_' +'direct_' +'del_' +'culture_' +'center_' +'came_' +'Z' +'Kampf_' +'II_' +'G' +'23_' +'verfügt_' +'training_' +'six_' +'road_' +'night_' +'network_' +'live_' +'internal_' +'instead_' +'file_' +'decisions_' +'betrifft_' +'balance_' +'ary_' +'Türkei_' +'North_' +'Hier_' +'Bereichen_' +'Banken_' +'Aussprache_' +' -_' +'ya_' +'space_' +'meet_' +'keep_' +'extremely_' +'effects_' +'ck_' +'chinesischen_' +'below_' +'activities_' +'Zu_' +'San_' +'Partei_' +'Möglichkeiten_' +'I' +'Einsatz_' +'BIP_' +'Allerdings_' +'", _' +' ''_' +'weiß_' +'weiteren_' +'week_' +'wahrscheinlich_' +'values_' +'unternehmen_' +'simple_' +'return_' +'rest_' +'perhaps_' +'notwendig_' +'net_' +'infrastructure_' +'increased_' +'included_' +'il_' +'contains_' +'commitment_' +'besten_' +'Staat_' +'Spain_' +'Richtung_' +'Ort_' +'Booking_' +'   – _' +'vergangenen_' +'turn_' +'try_' +'tragen_' +'toward_' +'took_' +'tatsächlich_' +'step_' +'reviews_' +'responsible_' +'poverty_' +'negara_' +'möglicherweise_' +'late_' +'importance_' +'ideal_' +'hätten_' +'hohen_' +'former_' +'favour_' +'essential_' +'doing_' +'design_' +'customers_' +'currency_' +'] _' +'Software_' +'80_' +'“, _' +'zusammen_' +'ziehen_' +'wären_' +'spielen_' +'soziale_' +'performance_' +'oft_' +'moment_' +'lack_' +'kleinen_' +'klar_' +'fünf_' +'erst_' +'derzeit_' +'dar_' +'brauchen_' +'befinden_' +'beach_' +'ally_' +'Wert_' +'Party_' +'Großbritannien_' +'Grenzen_' +'Chinas_' +' ‘_' +'wichtigen_' +'weltweit_' +'wegen_' +'responsibility_' +'require_' +'reasons_' +'ngen_' +'negative_' +'liegen_' +'integration_' +'ings_' +'ian_' +'hen_' +'größten_' +'geführt_' +'external_' +'develop_' +'credit_' +'bank_' +'York_' +'Vor_' +'Today_' +'Preis_' +'Außerdem_' +'-, _' +'ta_' +'successful_' +'red_' +'protect_' +'president_' +'places_' +'largest_' +'implementation_' +'heit_' +'friendly_' +'double_' +'decades_' +'darf_' +'challenges_' +'Von_' +'Tatsächlich_' +'S' +'O' +'My_' +'Aufgabe_' +'Am_' +'ure_' +'sub_' +'stark_' +'soon_' +'rich_' +'pressure_' +'option_' +'neu_' +'jedes_' +'ien_' +'glaube_' +'events_' +'established_' +'despite_' +'comfortable_' +'cause_' +'built_' +'board_' +'benefit_' +'although_' +'Wahl_' +'Verfahren_' +'Regionen_' +'May_' +'Mal_' +'K_' +'Ihren_' +'Enterprise_' +'Britain_' +'Behörden_' +'Ausschuss_' +'Amerika_' +'ändern_' +'verschiedene_' +'takes_' +'strategic_' +'steps_' +'status_' +'len_' +'labor_' +'guarantee_' +'gehört_' +'einzelnen_' +'designed_' +'ces_' +'card_' +'cal_' +'behind_' +'agenda_' +'Website_' +'Verbindung_' +'Russian_' +'Rechte_' +'Presidency_' +'Initiative_' +'F' +'Court_' +'City_' +'Abstimmung_' +'�_' +'wichtigsten_' +'walk_' +'video_' +'type_' +'terrorism_' +'stop_' +'standard_' +'schließlich_' +'risks_' +'regime_' +'post_' +'nya_' +'nor_' +'internet_' +'gehören_' +'ermöglichen_' +'bisher_' +'beginning_' +'became_' +'Zum_' +'Verfassung_' +'Uhr_' +'Spanien_' +'Platz_' +'First_' +'Fed_' +'D' +'Bemühungen_' +'Armut_' +'. - (_' +'zeit_' +'zeigen_' +'wohl_' +'visit_' +'verbunden_' +'threat_' +'thought_' +'receive_' +'reach_' +'probably_' +'practice_' +'official_' +'nice_' +'mal_' +'lower_' +'looking_' +'ler_' +'gemeinsam_' +'gar_' +'findet_' +'fen_' +'complex_' +'committee_' +'closed_' +'angesichts_' +'ability_' +'Zahl_' +'Yet_' +'Vorschläge_' +'Version_' +'Verhandlungen_' +'Politiker_' +'More_' +'Mehrheit_' +'How_' +'Führung_' +'Eurozone_' +'Ergebnisse_' +'Bar_' +'B' +'Ansatz_' +'70_' +'200_' +'ze_' +'wobei_' +'study_' +'started_' +'rund_' +'reality_' +'purpose_' +'programs_' +'plans_' +'music_' +'monetary_' +'limited_' +'konnte_' +'ke_' +'isch_' +'highly_' +'guests_' +'falls_' +'enable_' +'confidence_' +'bad_' +'according_' +'accept_' +'V' +'Reformen_' +'Prozess_' +'Nationen_' +'NATO_' +'Kunden_' +'K' +'Indien_' +'Handel_' +'From_' +'Druck_' +'Dabei_' +'Antwort_' +'..." _' +' / _' +'vier_' +'sustainable_' +'style_' +'shown_' +'raise_' +'previous_' +'matters_' +'lives_' +'ken_' +'industrial_' +'helfen_' +'creating_' +'context_' +'consumers_' +'consequences_' +'con_' +'basic_' +'answer_' +'Prozent_' +'Only_' +'June_' +'English_' +'Development_' +', “_' +'zehn_' +'werde_' +'unique_' +'ton_' +'setting_' +'seines_' +'presented_' +'ors_' +'lost_' +'konnten_' +'knowledge_' +'ihres_' +'gegeben_' +'gebracht_' +'gas_' +'erforderlich_' +'effort_' +'creation_' +'cht_' +'choose_' +'caused_' +'categories_' +'bus_' +'beyond_' +'asked_' +'active_' +'Wort_' +'Seit_' +'Punkt_' +'Now_' +'Gruppe_' +'Entscheidungen_' +'Berichterstatter_' +'Artikel_' +'Arab_' +'öffentliche_' +'zen_' +'zeigt_' +'ties_' +'seem_' +'saying_' +'politicians_' +'partner_' +'note_' +'nahe_' +'latest_' +'ks_' +'hohe_' +'guten_' +'gewährleisten_' +'gesamten_' +'finance_' +'failure_' +'evidence_' +'entwickeln_' +'enthält_' +'darum_' +'dadurch_' +'challenge_' +'alone_' +'act_' +'Spiel_' +'Putin_' +'P' +'Hinblick_' +'General_' +'Gelegenheit_' +'Gefahr_' +'Gebiet_' +'Förderung_' +'Europeans_' +'Darüber_' +'Dank_' +'Damit_' +'Beginn_' +'Barcelona_' +'August_' +'Abkommen_' +'Öffentlichkeit_' +'verwenden_' +'unemployment_' +'treatment_' +'source_' +'sound_' +'sometimes_' +'solutions_' +'quickly_' +'programmes_' +'please_' +'objective_' +'lines_' +'larger_' +'ker_' +'guest_' +'damage_' +'build_' +'aware_' +'average_' +'aktuellen_' +'agricultural_' +'achieved_' +'University_' +'St_' +'Schlusselwortern_' +'Regeln_' +'Produkte_' +'Middle_' +'March_' +'H' +'Datei_' +'CD_' +'Bildung_' +'500_' +'1990_' +' " _' +'“._' +'äußerst_' +'zone_' +'ves_' +'v' +'throughout_' +'t' +'ssen_' +'request_' +'politics_' +'movement_' +'mentioned_' +'leben_' +'jede_' +'independent_' +'gleichen_' +'gleich_' +'ganzen_' +'fragen_' +'fest_' +'fair_' +'failed_' +'ermöglicht_' +'equal_' +'enlargement_' +'distribution_' +'direction_' +'ding_' +'danken_' +'coming_' +'choice_' +'cally_' +'Terrorismus_' +'Palestinian_' +'Minuten_' +'IMF_' +'Herren_' +'Funktion_' +'Anfang_' +'Abgeordneten_' +'zed_' +'völlig_' +'verstehen_' +'test_' +'supported_' +'shows_' +'setzt_' +'recht_' +'procedure_' +'principles_' +'lt_' +'lose_' +'ini_' +'includes_' +'ht_' +'hold_' +'gestellt_' +'gemeinsamen_' +'final_' +'fear_' +'e' +'domestic_' +'deficit_' +'consumer_' +'cher_' +'charge_' +'book_' +'base_' +'anything_' +'akan_' +'advanced_' +'X' +'W' +'Since_' +'Ressourcen_' +'Notwendigkeit_' +'Natürlich_' +'Kraft_' +'Korea_' +'Kontrolle_' +'Israeli_' +'Hand_' +'Fortschritte_' +'Erweiterung_' +'Debian_' +'Ausdruck_' +'Aufmerksamkeit_' +'übernachten_' +'x' +'web_' +'verfügen_' +'submitted_' +'speed_' +'reached_' +'produce_' +'perfect_' +'objectives_' +'mind_' +'ments_' +'initiative_' +'i' +'hoffe_' +'ground_' +'goods_' +'giving_' +'famous_' +'fallen_' +'entwickelt_' +'don_' +'considered_' +'class_' +'ck' +'cities_' +'bekommen_' +'additional_' +'accommodation_' +'Y_' +'Wann_' +'Viele_' +'Tage_' +'Security_' +'Rights_' +'Many_' +'Lisbon_' +'Folgen_' +'Federal_' +'E' +'Damen_' +'Blick_' +'Bild_' +'Bekämpfung_' +'Ausgaben_' +'Anwendung_' +'Angesichts_' +'Americans_' +'90_' +'45_' +'27_' +'22_' +'%._' +' % _' +'zudem_' +'wrong_' +'worked_' +'weder_' +'ut_' +'untuk_' +'tell_' +'später_' +'speak_' +'situated_' +'richtig_' +'restaurants_' +'res_' +'produced_' +'p' +'news_' +'m' +'ige_' +'häufig_' +'größte_' +'globale_' +'est_' +'enthalten_' +'emissions_' +'decided_' +'death_' +'completely_' +'brought_' +'au_' +'annual_' +'added_' +'Veränderungen_' +'Umwelt_' +'Services_' +'Schaffung_' +'Reihe_' +'Reform_' +'Instead_' +'Here_' +'Gesamt' +'Fund_' +'Finally_' +'Einfluss_' +'Durch_' +'December_' +'Dazu_' +'1791_' +'Änderungsantrag_' +'zahlen_' +'weapons_' +'voted_' +'technologies_' +'target_' +'secure_' +'requirements_' +'partners_' +'package_' +'options_' +'massive_' +'ism_' +'increasing_' +'goal_' +'files_' +'extent_' +'erung_' +'erster_' +'eigene_' +'contact_' +'consumption_' +'ber_' +'allows_' +'aim_' +'agreements_' +'Zentrum_' +'Text_' +'Schließlich_' +'Qualität_' +'Mitglied_' +'L' +'Kosovo_' +'Its_' +'Frieden_' +'During_' +'Chance_' +'300_' +'zweite_' +'won_' +'trotz_' +'tions_' +'technical_' +'students_' +'send_' +'prepared_' +'original_' +'mobile_' +'mail_' +'item_' +'function_' +'front_' +'f' +'extra_' +'entire_' +'election_' +'eben_' +'dialogue_' +'critical_' +'changed_' +'ang_' +'allowed_' +'Wettbewerb_' +'Verwendung_' +'So' +'Nutzung_' +'Nations_' +'Märkte_' +'Kultur_' +'Jahrhundert_' +'Italien_' +'Gästebewertungen_' +'Furthermore_' +'Erklärung_' +'Daher_' +'DE_' +'Beitrag_' +': „_' +'28_' +'1781_' +'zweiten_' +'wenige_' +'website_' +'wealth_' +'voll_' +'versuchen_' +'team_' +'supply_' +'stärker_' +'sorgen_' +'solidarity_' +'scale_' +'ring_' +'providing_' +'players_' +'paid_' +'opposition_' +'ling_' +'lang_' +'kam_' +'influence_' +'ier_' +'geworden_' +'genug_' +'gain_' +'ft_' +'forms_' +'follow_' +'erte_' +'erklären_' +'einschließlich_' +'distance_' +'concern_' +'concept_' +'carried_' +'campaign_' +'borders_' +'began_' +'ate_' +'aspects_' +'allein_' +'[_' +'WTO_' +'Server_' +'Programme_' +'Meer_' +'July_' +'Forschung_' +'Fehler_' +'Familie_' +'Ausschusses_' +'Although_' +'African_' +'? _' +': ' +'story_' +'stage_' +'server_' +'officials_' +'office_' +'offered_' +'nis_' +'legen_' +'leave_' +'jene_' +'insgesamt_' +'immigration_' +'hinter_' +'genannten_' +'fördern_' +'ful_' +'erwartet_' +'erwarten_' +'erneut_' +'doubt_' +'digital_' +'dari_' +'concerning_' +'bitte_' +'bevor_' +'apartment_' +'anderer_' +'Verbraucher_' +'Unsere_' +'Portugal_' +'Person_' +'Pakistan_' +'Organisation_' +'Opfer_' +'Ko' +'Idee_' +'H_' +'Griechenland_' +'Gesundheit_' +'EN_' +'DVD_' +'C' +' [_' +' ) _' +'zumindest_' +'z' +'writing_' +'worldwide_' +'verbessern_' +'uses_' +'users_' +'treffen_' +'tidak_' +'sed_' +'search_' +'save_' +'reports_' +'quiet_' +'professional_' +'privaten_' +'parking_' +'month_' +'map_' +'kosten_' +'jedem_' +'historical_' +'head_' +'gt_' +'generation_' +'funding_' +'einzige_' +'disease_' +'d' +'construction_' +'connection_' +'committed_' +'code_' +'child_' +'airport_' +'Werte_' +'Wasser_' +'Vergangenheit_' +'Unter_' +'Themen_' +'Stunden_' +'Prime_' +'Obwohl_' +'Most_' +'Mediterranean_' +'Linux_' +'Le_' +'Italian_' +'Information_' +'Herausforderung_' +'Flughafen_' +'Dialog_' +'Anti' +'Afrika_' +'übernehmen_' +'Änderungsanträge_' +'ying_' +'usually_' +'tic_' +'seek_' +'practical_' +'nimmt_' +'mus_' +'ms_' +'morning_' +'meinen_' +'material_' +'links_' +'kleine_' +'ja' +'implemented_' +'hoch_' +'helpful_' +'glauben_' +'getan_' +'geschaffen_' +'fishing_' +'erklärt_' +'effectively_' +'dollar_' +'deutschen_' +'demokratischen_' +'demands_' +'decline_' +'communication_' +'ch' +'benötigen_' +'applied_' +'angenommen_' +'amerikanische_' +'alternative_' +'Zeitpunkt_' +'Wahlen_' +'Tat_' +'Stelle_' +'Room_' +'Risiken_' +'People_' +'Parteien_' +'Lösungen_' +'Let_' +'Industrie_' +'Ihrem_' +'Hamas_' +'Fällen_' +'Frühstück_' +'Erstens_' +'Einkommen_' +'Dinge_' +'Dezember_' +'Center_' +'Austria_' +'Affairs_' +': "_' +'   ._' +'   . _' +'zunehmend_' +'zentrale_' +'works_' +'warum_' +'wants_' +'wanted_' +'vermeiden_' +'ver_' +'statement_' +'served_' +'series_' +'safe_' +'relationship_' +'provisions_' +'police_' +'neuer_' +'neben_' +'nce_' +'leadership_' +'leader_' +'join_' +'illegal_' +'gewesen_' +'ess_' +'eigentlich_' +'cs_' +'cken_' +'businesses_' +'border_' +'avoid_' +'authority_' +'applications_' +'appears_' +'agreed_' +'actions_' +'Zudem_' +'Schwierigkeiten_' +'Republic_' +'Präsidentin_' +'Please_' +'Juni_' +'J' +'IT_' +'Höhe_' +'Heute_' +'Greek_' +'Google_' +'George_' +'Egypt_' +'Economic_' +'Convention_' +'Amerikas_' +'2013_' +'1998_' +'160_' +'.  _' +' "..._' +'tra' +'structural_' +'star_' +'stable_' +'speech_' +'somit_' +'solchen_' +'schützen_' +'regards_' +'received_' +'read_' +'property_' +'powerful_' +'politischer_' +'path_' +'overall_' +'nearly_' +'n' +'method_' +'meinem_' +'lle_' +'legislative_' +'ine_' +'igkeit_' +'ideas_' +'getting_' +'folgt_' +'everyone_' +'establish_' +'ell_' +'drive_' +'cut_' +'competitive_' +'compared_' +'chinesische_' +'bit_' +'beitragen_' +'bare_' +'b' +'ations_' +'Zeiten_' +'Tagen_' +'Such_' +'Station_' +'Sozial' +'R' +'Position_' +'Nachfrage_' +'Management_' +'Latin_' +'Kingdom_' +'Integration_' +'Herzen_' +'Globalisierung_' +'Financial_' +'Club_' +'Bestimmungen_' +'Aktivitäten_' +'31_' +'3' +''' _' +'с' +'Änderungen_' +'zahlreiche_' +'word_' +'variety_' +'union_' +'trading_' +'talk_' +'serve_' +'rising_' +'requires_' +'reducing_' +'reduced_' +'mehrere_' +'leicht_' +'jüngsten_' +'joint_' +'instruments_' +'immediately_' +'ierte_' +'hinsichtlich_' +'geschlossen_' +'folgen_' +'erhöhen_' +'ere_' +'equipped_' +'elsewhere_' +'efficient_' +'durchgeführt_' +'discussion_' +'difference_' +'developments_' +'comprehensive_' +'bringt_' +'bewusst_' +'beide_' +'attacks_' +'anders_' +'Vertrauen_' +'Revolution_' +'Plan_' +'PC_' +'Konferenz_' +'Japanese_' +'Ireland_' +'Great_' +'Centre_' +'CO2_' +'Bitte_' +'Anzahl_' +'.' +'wichtiger_' +'weltweiten_' +'types_' +'train_' +'tools_' +'thousands_' +'suggest_' +'stock_' +'sectors_' +'school_' +'sagte_' +'representatives_' +'reichen_' +'promote_' +'productivity_' +'priority_' +'possibility_' +'park_' +'nationale_' +'mit' +'message_' +'medical_' +'las_' +'instrument_' +'initiatives_' +'ierten_' +'ial_' +'genommen_' +'ga_' +'frei_' +'farmers_' +'expected_' +'elements_' +'elected_' +'easily_' +'degree_' +'deficits_' +'chance_' +'bestehen_' +'ausgestattet_' +'attack_' +'ated_' +'affected_' +'Woche_' +'Web_' +'Vertrag_' +'Tu' +'Syria_' +'Stabilität_' +'Pro' +'Preise_' +'Policy_' +'Nicht_' +'Neu' +'März_' +'Microsoft_' +'Markt' +'Mail_' +'Lissabon_' +'Land' +'Jo' +'His_' +'Global_' +'Finanz' +'Energie_' +'Design_' +'Constitution_' +'Brazil_' +'Besuch_' +'Bereiche_' +'Bad_' +'A' +'. ' +'whom_' +'ums_' +'ue_' +'tät_' +'turned_' +'relativ_' +'refugees_' +'reduction_' +'played_' +'para_' +'nächste_' +'ning_' +'ni_' +'middle_' +'mein_' +'letzte_' +'leider_' +'kaum_' +'k' +'ismus_' +'institutional_' +'forced_' +'expect_' +'erfolgreich_' +'enter_' +'diejenigen_' +'crucial_' +'commercial_' +'circumstances_' +'carry_' +'becoming_' +'bald_' +'aufgenommen_' +'activity_' +'Why_' +'Vereinten_' +'Verbesserung_' +'Technologie_' +'Te' +'Systems_' +'Standards_' +'Site_' +'Personal_' +'Osten_' +'Oktober_' +'October_' +'Not_' +'Küche_' +'Just_' +'Infrastruktur_' +'High_' +'Guest_' +'Grand_' +'Freiheit_' +'Free_' +'Finanzierung_' +'Directive_' +'CA' +'Auffassung_' +'According_' +'35_' +'29_' +'1980_' +'., _' +'* _' +')' +'® _' +'y' +'went_' +'travel_' +'ten' +'task_' +'sieht_' +'ship_' +'review_' +'religious_' +'relevant_' +'record_' +'procedures_' +'precisely_' +'pleased_' +'paar_' +'minute_' +'minister_' +'mention_' +'maintain_' +'leisten_' +'jeweiligen_' +'island_' +'investors_' +'improving_' +'hour_' +'hotels_' +'h' +'größere_' +'gesamte_' +'gekommen_' +'firms_' +'ence_' +'dringend_' +'dangerous_' +'conference_' +'colleagues_' +'c' +'break_' +'betrachtet_' +'bereich_' +'apply_' +'ance_' +'akzeptieren_' +'Währung_' +'Waffen_' +'Umgebung_' +'Trade_' +'Therefore_' +'Star_' +'Sicht_' +'N' +'IWF_' +'England_' +'Einführung_' +'Do_' +'Conference_' +'Co' +'Auswahl_' +'Asien_' +'Arbeitnehmer_' +'4' +', ' +'). _' +'zusätzliche_' +'ze' +'written_' +'white_' +'weise_' +'walking_' +'unbedingt_' +'trust_' +'tor_' +'tabled_' +'sts_' +'sse_' +'sign_' +'schaft_' +'sa_' +'s' +'round_' +'reserves_' +'regulations_' +'raised_' +'presence_' +'ped_' +'organisation_' +'neither_' +'namely_' +'mag_' +'länger_' +'ku' +'justice_' +'holiday_' +'historischen_' +'hands_' +'gives_' +'genießen_' +'ganze_' +'feature_' +'facing_' +'equipment_' +'draw_' +'documents_' +'denke_' +'deine_' +'boost_' +'banking_' +'attempt_' +'atmosphere_' +'assistance_' +'aimed_' +'agriculture_' +'advantage_' +'Verordnung_' +'Transparenz_' +'Tagesordnung_' +'Spanish_' +'Sorge_' +'Social_' +'Secondly_' +'Sea_' +'Rooms_' +'Robert_' +'Restaurants_' +'Nur_' +'Mai_' +'Linie_' +'Gründen_' +'Erfahrung_' +'Den_' +'Code_' +'Asian_' +'Also_' +'2' +'.)_' +'--_' +'weeks_' +'voting_' +'votes_' +'unterschiedlichen_' +'trying_' +'stets_' +'sm_' +'shift_' +'section_' +'sechs_' +'recovery_' +'programm_' +'pro' +'press_' +'pre_' +'phone_' +'ory_' +'oben_' +'networks_' +'ned_' +'nations_' +'nation_' +'modified_' +'merely_' +'membership_' +'meines_' +'lösen_' +'ley_' +'largely_' +'keiten_' +'kannst_' +'implement_' +'historic_' +'happen_' +'gs_' +'grounds_' +'goes_' +'gesetzt_' +'friends_' +'fort' +'floor_' +'eten_' +'establishment_' +'erkennen_' +'erfordert_' +'efficiency_' +'draft_' +'der' +'daily_' +'conclusion_' +'ches_' +'changing_' +'carbon_' +'buy_' +'burden_' +'bathroom_' +'assessment_' +'Wettbewerbsfähigkeit_' +'Schulden_' +'Rezession_' +'Regierungs' +'Raum_' +'Punkte_' +'Per' +'Museum_' +'Monaten_' +'Methode_' +'Jahrhunderts_' +'Islam_' +'Inter' +'Innovation_' +'Human_' +'Gruppen_' +'Groß' +'Europäer_' +'Diskussion_' +'Both_' +'Bedrohung_' +'Arbeitslosigkeit_' +'Al_' +'Airport_' +'8' +'2012_' +'Änderung_' +'zuletzt_' +'ys_' +'unten_' +'sta' +'sing_' +'ses_' +'scientific_' +'schwierig_' +'running_' +'rten_' +'regarding_' +'plus_' +'plant_' +'participation_' +'output_' +'normal_' +'nbsp_' +'mission_' +'lag_' +'ko' +'j_' +'ität_' +'innovation_' +'innen_' +'improved_' +'impossible_' +'hält_' +'hol' +'haus_' +'gezeigt_' +'ges_' +'gen' +'financing_' +'fe_' +'express_' +'export_' +'entsprechende_' +'ei_' +'deep_' +'decade_' +'contribution_' +'considerable_' +'competitiveness_' +'bodies_' +'bilden_' +'begin_' +'außerhalb_' +'Zweitens_' +'Zweifel_' +'Wirtschaftswachstum_' +'Wer_' +'Warum_' +'Vorteile_' +'Unterkategorien_' +'She_' +'Resort_' +'Republik_' +'Ra' +'Projekt_' +'Produktion_' +'Partner_' +'No' +'Mitte_' +'Lo' +'Investoren_' +'Forum_' +'Erfahrungen_' +'Energie' +'Einigung_' +'Du' +'Article_' +'Angebot_' +'... _' +'überzeugt_' +'ß_' +'zing_' +'za' +'vision_' +'versucht_' +'up' +'treten_' +'transparent_' +'told_' +'ti' +'spend_' +'speaking_' +'sites_' +'shopping_' +'sh_' +'screen_' +'says_' +'refer_' +'reading_' +'rd_' +'raum_' +'post' +'policymakers_' +'outcome_' +'operations_' +'operation_' +'opening_' +'ol' +'nach' +'multi_' +'mass_' +'manufacturing_' +'lies_' +'king_' +'ir_' +'intended_' +'insurance_' +'hu' +'hin' +'highest_' +'happened_' +'handeln_' +'gewinnen_' +'film_' +'families_' +'exports_' +'erfüllen_' +'ellen_' +'easier_' +'document_' +'derartige_' +'defense_' +'darstellt_' +'darstellen_' +'controls_' +'congratulate_' +'compromise_' +'clients_' +'braucht_' +'betrachten_' +'bestimmten_' +'bar' +'au' +'appear_' +'ans_' +'ale_' +'addressed_' +'Westen_' +'Welt' +'Wasser' +'Vielfalt_' +'Technologien_' +'Su' +'Street_' +'Spa_' +'RE' +'Public_' +'Privat' +'Poland_' +'Online_' +'Musik_' +'Kommissarin_' +'Kolleginnen_' +'John_' +'Inflation_' +'Handels' +'Folge_' +'Erholung_' +'ECB_' +'Da' +'Costa_' +'Because_' +'A5_' +'26_' +'2011_' +'überhaupt_' +'überall_' +'ßen_' +'zunächst_' +'ya' +'wählen_' +'wing_' +'wider_' +'waste_' +'vital_' +'victims_' +'useful_' +'urban_' +'theory_' +'structure_' +'ster_' +'staatlichen_' +'schwer_' +'saw_' +'sales_' +'relation_' +'rapidly_' +'profitieren_' +'primary_' +'presidency_' +'pre' +'powers_' +'planning_' +'offen_' +'numerous_' +'neues_' +'muß_' +'moral_' +'mainly_' +'lagen_' +'kurz_' +'ka_' +'investments_' +'inter' +'innovative_' +'heard_' +'gern_' +'generally_' +'gelegen_' +'fund_' +'freien_' +'finally_' +'establishing_' +'entspricht_' +'entscheiden_' +'eiten_' +'eindeutig_' +'details_' +'desire_' +'dengan_' +'core_' +'calls_' +'bestimmte_' +'bessere_' +'analysis_' +'amendment_' +'alt_' +'aktuelle_' +']], _' +'Zentralbank_' +'Vorschriften_' +'Volkswirtschaften_' +'Unser_' +'Tri' +'Time_' +'Team_' +'Strand_' +'Stimme_' +'Sinne_' +'Sicherheits' +'Sektor_' +'See' +'Schritte_' +'Reserve_' +'Re' +'Maße_' +'Juli_' +'Installation_' +'Herausforderungen_' +'Haushalts' +'Euro' +'En' +'Despite_' +'Dateien_' +'Congress_' +'Bundes' +'Brexit_' +'Brasilien_' +'Berichte_' +'Benutzer_' +'Arbeitsplätze_' +'Anteil_' +'Annahme_' +'Angelegenheiten_' +'Amsterdam_' +'Amendment_' +'worth_' +'worse_' +'vorgeschlagen_' +'transfer_' +'tool_' +'territory_' +'taxes_' +'steigen_' +'stated_' +'spirit_' +'spa' +'sent_' +'richtige_' +'release_' +'reference_' +'ping_' +'phase_' +'paper_' +'o' +'numbers_' +'moderne_' +'met_' +'menu_' +'me' +'linked_' +'limits_' +'learn_' +'interesting_' +'interested_' +'icht_' +'hoch' +'helped_' +'halte_' +'gesehen_' +'genuine_' +'gefunden_' +'g' +'freie_' +'fordern_' +'fixed_' +'fine_' +'fail_' +'extension_' +'examples_' +'erzielt_' +'enormous_' +'endlich_' +'encourage_' +'else_' +'eit_' +'dly_' +'district_' +'criteria_' +'continued_' +'consensus_' +'candidate_' +'buffet_' +'britischen_' +'books_' +'bonds_' +'bestimmt_' +'becomes_' +'baren_' +'apartments_' +'animals_' +'adopt_' +'accepted_' +'Worten_' +'Weltwirtschaft_' +'Wege_' +'Vor' +'Vertreter_' +'Urlaub_' +'Um' +'Turkish_' +'Ste' +'Status_' +'Selbst_' +'Sache_' +'Red_' +'RI' +'Nice_' +'NEW_' +'Muslim_' +'Meine_' +'Lebens' +'Last_' +'Konzept_' +'Januar_' +'Golf_' +'Gaza_' +'Einige_' +'Behandlung_' +'Alternative_' +'Agenda_' +'400_' +'0' +', „_' +'“_' +'у' +'Über' +'wo' +'willing_' +'weisen_' +'warm_' +'verbundenen_' +'understanding_' +'ul_' +'tried_' +'traffic_' +'tomorrow_' +'to' +'ter' +'television_' +'targets_' +'suggests_' +'sufficient_' +'stärken_' +'stronger_' +'spread_' +'signed_' +'shared_' +'separate_' +'seeking_' +'scope_' +'ro' +'respond_' +'released_' +'regionale_' +'ready_' +'putting_' +'published_' +'pass_' +'owing_' +'org_' +'modernen_' +'mittel_' +'minimum_' +'managed_' +'lu' +'log_' +'lernen_' +'kommenden_' +'kennen_' +'integrated_' +'improvement_' +'immediate_' +'identity_' +'hear_' +'green_' +'governance_' +'got_' +'games_' +'flight_' +'fellow_' +'exactly_' +'evening_' +'europäischer_' +'el' +'ehemaligen_' +'earlier_' +'difficulties_' +'damals_' +'dagegen_' +'cross_' +'crime_' +'comfort_' +'character_' +'camera_' +'box_' +'bezüglich_' +'beste_' +'behavior_' +'aten_' +'approved_' +'anti' +'acht_' +'abge' +'Zustimmung_' +'Za' +'Vielleicht_' +'USS_' +'Tra' +'Sterne_' +'Solidarität_' +'Sinn_' +'Risiko_' +'Regime_' +'Rechts' +'Prodi_' +'Nachbarn_' +'Monat_' +'Modell_' +'Ku' +'J_' +'In' +'III_' +'Her' +'Health_' +'Eindruck_' +'EZB_' +'Do' +'Clinton_' +'Business_' +'Bis_' +'Bilder_' +'Bau' +'Barack_' +'Au' +'+_' +'о' +'ät_' +'Überwachung_' +'www_' +'wirtschaftlicher_' +'wert_' +'welfare_' +'voters_' +'vo' +'verringern_' +'verpflichtet_' +'van_' +'unge' +'une_' +'unable_' +'umgesetzt_' +'ultimately_' +'summer_' +'street_' +'specifically_' +'sort_' +'sicherzustellen_' +'ser_' +'revolution_' +'resolve_' +'rer_' +'reflect_' +'quote_' +'ps_' +'protected_' +'port_' +'planet_' +'placed_' +'pada_' +'otherwise_' +'ones_' +'offering_' +'morgen_' +'millions_' +'mer' +'measure_' +'machine_' +'licher_' +'letztlich_' +'konzentrieren_' +'je' +'j' +'io_' +'host_' +'holding_' +'größeren_' +'greatest_' +'gleiche_' +'gelangen_' +'ga' +'führte_' +'faces_' +'expressed_' +'expensive_' +'era_' +'entweder_' +'entsprechend_' +'ensuring_' +'durch' +'download_' +'divided_' +'discussions_' +'discussed_' +'described_' +'dennoch_' +'deliver_' +'continues_' +'continent_' +'conclude_' +'comments_' +'color_' +'click_' +'broad_' +'bestand_' +'berg_' +'begrüße_' +'beginnen_' +'bedarf_' +'außerdem_' +'aus' +'arbeitet_' +'anderem_' +'alte_' +'accession_' +'abzu' +'Vorteil_' +'Video_' +'Versuch_' +'Trotz_' +'Tre' +'Standard_' +'Saudi_' +'Polen_' +'Pe' +'Or' +'Open_' +'Nachdem_' +'NA' +'Mo' +'Mit' +'Michael_' +'James_' +'Ist_' +'Haltung_' +'Gäste_' +'Gegenteil_' +'Entschließung_' +'Ent' +'El_' +'Bürgern_' +'Ben' +'Beitritt_' +'Arbeits' +'Anstieg_' +'75_' +'33_' +'32_' +'30' +'.._' +'%, _' +'” – _' +'ı' +'Ä' +'­_' +'window_' +'widely_' +'west_' +'wer_' +'vollständig_' +'veröffentlicht_' +'ve' +'ufen_' +'tte_' +'tradition_' +'thereby_' +'tan_' +'spent_' +'southern_' +'sources_' +'skills_' +'sanctions_' +'rural_' +'root_' +'reception_' +'profit_' +'priorities_' +'player_' +'partly_' +'oc' +'obligations_' +'nachdem_' +'militärische_' +'mar' +'läuft_' +'loss_' +'lo_' +'llen_' +'liberal_' +'ite_' +'it' +'industrie_' +'individuals_' +'höhere_' +'himself_' +'heutigen_' +'granted_' +'gi' +'format_' +'firm_' +'ff_' +'fand_' +'fahren_' +'expectations_' +'exclusive_' +'erlaubt_' +'entsprechenden_' +'eln_' +'einiger_' +'dürfte_' +'doesn_' +'detailed_' +'denken_' +'default_' +'cuts_' +'cover_' +'communities_' +'claim_' +'britische_' +'außer_' +'associated_' +'article_' +'ahead_' +'actual_' +'absolutely_' +'Wohn' +'Uni' +'UNO_' +'Trump_' +'Teilen_' +'Systeme_' +'Strategien_' +'Square_' +'Secretary_' +'Schulden' +'Regel_' +'Präsidentschaft_' +'Po' +'Männer_' +'Ma' +'Leute_' +'Kindern_' +'Kapital' +'Justice_' +'Ja' +'Islamic_' +'Homepage_' +'Geschäfts' +'Ger' +'Gebäude_' +'Frankfurt_' +'Firmen_' +'Erachtens_' +'Einrichtungen_' +'Ebenso_' +'Di' +'Christian_' +'Breakfast_' +'Bio' +'Ausland_' +'Argentina_' +'Ad' +'2014_' +'" (_' +' -, _' +'—_' +'ó' +'ä' +'Öl' +'wieder' +'whatever_' +'vorhanden_' +'ures_' +'unver' +'uner' +'ul' +'tt_' +'treated_' +'sun_' +'suchen_' +'stra' +'someone_' +'so' +'smaller_' +'slow_' +'sides_' +'seven_' +'ro_' +'represents_' +'relating_' +'regionalen_' +'rapid_' +'r' +'pour_' +'permanent_' +'pe_' +'payments_' +'parliamentary_' +'oren_' +'operating_' +'ons' +'nie' +'ne' +'möglichkeiten_' +'monitoring_' +'miteinander_' +'mark_' +'loans_' +'lo' +'listed_' +'link_' +'limit_' +'leads_' +'languages_' +'land' +'kt_' +'ju' +'ji' +'ization_' +'iten_' +'inzwischen_' +'introduced_' +'ians_' +'hören_' +'höher_' +'geschehen_' +'gel' +'garden_' +'funktioniert_' +'fuel_' +'französischen_' +'ff' +'expansion_' +'enti' +'discuss_' +'cutting_' +'corporate_' +'contemporary_' +'connected_' +'combination_' +'causes_' +'benutzt_' +'begann_' +'bear_' +'battle_' +'bars_' +'auszu' +'ating_' +'as' +'ana' +'Zur_' +'Zinsen_' +'Währungs' +'Volk_' +'U' +'Thus_' +'Their_' +'Standpunkt_' +'Se' +'Realität_' +'Prioritäten_' +'Nutzen_' +'Netherlands_' +'Natur_' +'MySQL_' +'Mer' +'Leistungen_' +'Krankheiten_' +'Klima' +'Klicken_' +'Insel_' +'Hoffnung_' +'Gre' +'God_' +'Gesellschaften_' +'Gegensatz_' +'Film_' +'Fe' +'Falle_' +'Eastern_' +'Dr_' +'Denn_' +'Democrats_' +'Car' +'Bewegung_' +'Best_' +'Augen_' +'Atmosphäre_' +'Abschluss_' +'2015_' +'. (_' +', "_' +'„_' +'über' +'äu' +'  _' +'wrote_' +'write_' +'wodurch_' +'win_' +'wesentlich_' +'weg_' +'verlieren_' +'verfahren_' +'va' +'unto_' +'unless_' +'ungefähr_' +'unacceptable_' +'umfassende_' +'turning_' +'trägt_' +'truth_' +'transparency_' +'transition_' +'traditionellen_' +'thinking_' +'thanks_' +'terrorist_' +'technological_' +'talks_' +'swimming_' +'suffer_' +'strategies_' +'stimmen_' +'stellte_' +'starke_' +'st' +'schließen_' +'russischen_' +'resulting_' +'represent_' +'relate_' +'regulatory_' +'ran_' +'punkt_' +'presidential_' +'picture_' +'oni' +'ok_' +'offiziellen_' +'offensichtlich_' +'ns' +'nis' +'nachhaltige_' +'models_' +'migration_' +'mid_' +'meaning_' +'maßnahmen_' +'materials_' +'lebih_' +'laut_' +'laid_' +'komplett_' +'ing' +'industries_' +'ical_' +'ha' +'großes_' +'gleich' +'gezwungen_' +'getroffen_' +'geo' +'functions_' +'followed_' +'folgenden_' +'figures_' +'faced_' +'fa' +'ey_' +'extensive_' +'eu' +'erhöht_' +'equally_' +'enz' +'entstehen_' +'ele' +'einander_' +'directives_' +'determined_' +'ded_' +'debates_' +'deaths_' +'daten_' +'contain_' +'closer_' +'cheap_' +'che' +'besondere_' +'berücksichtigt_' +'behandelt_' +'auf' +'arms_' +'arbeit_' +'announced_' +'Wochen_' +'Vergleich_' +'Unabhängigkeit_' +'Un' +'Umständen_' +'Two_' +'Stimmen_' +'Steuer' +'Sta' +'Spieler_' +'Shi' +'Second_' +'SA' +'Royal_' +'Pri' +'Op' +'Old_' +'Ohne_' +'Nun_' +'Nachfolgend_' +'Mitteilung_' +'Me' +'Lu' +'Le' +'Kritik_' +'Ke' +'Is_' +'Instrument_' +'ID_' +'Home_' +'Hinsicht_' +'Haupt' +'Gra' +'Gold_' +'Go' +'Given_' +'Gesetz_' +'Forschungs' +'Engagement_' +'Einrichtung_' +'EADS_' +'Dennoch_' +'Bildungs' +'Beschäftigung_' +'Beschreibung_' +'Apartments_' +'Amt_' +'Alliance_' +'Air_' +'AIDS_' +'9' +'5' +'1997_' +'1996_' +'1995_' +'150_' +'01_' +'и' +'е' +'á_' +'Über_' +'zimmer_' +'wollte_' +'weltweite_' +'vertreten_' +'verloren_' +'unseres_' +'unlikely_' +'track_' +'tischen_' +'the' +'supporting_' +'suffering_' +'sub' +'stress_' +'strengthen_' +'starting_' +'stands_' +'standing_' +'signal_' +'selbstverständlich_' +'sea' +'saving_' +'rt' +'ries_' +'restrictions_' +'radical_' +'proper_' +'politisch_' +'piece_' +'physical_' +'persönlichen_' +'perspective_' +'per' +'olitik_' +'older_' +'moving_' +'mis' +'min_' +'medium_' +'manage_' +'maintained_' +'laws_' +'keits' +'ked_' +'kaufen_' +'jährlich_' +'ischer_' +'introduction_' +'introduce_' +'inside_' +'independence_' +'increases_' +'imports_' +'ik_' +'humanitarian_' +'housing_' +'historische_' +'guidelines_' +'gs' +'gold_' +'gerecht_' +'gave_' +'gar' +'fo' +'flexible_' +'fire_' +'fields_' +'falsch_' +'expression_' +'exist_' +'except_' +'eventually_' +'euch_' +'erten_' +'ep' +'entry_' +'employees_' +'emphasis_' +'eingeführt_' +'ee_' +'duty_' +'dir_' +'dia' +'delegation_' +'criminal_' +'collapse_' +'coffee_' +'claims_' +'chi' +'chaft_' +'cat' +'carefully_' +'car' +'bottom_' +'bestehenden_' +'begrüßen_' +'barkeit_' +'ausge' +'armed_' +'anyone_' +'angeht_' +'ah_' +'ad_' +'Wissenschaft_' +'Wein' +'Verb' +'Ungleichheit_' +'Teile_' +'THE_' +'Stärkung_' +'Staats' +'Staates_' +'Sehr_' +'Sehenswürdigkeiten_' +'Rotary_' +'Ro' +'Reaktion_' +'Produktions' +'Ph' +'Ne' +'Name_' +'Na' +'NI_' +'Mi' +'La' +'Kunst_' +'Kompromiss_' +'Ka' +'Israelis_' +'Irish_' +'Initiativen_' +'IS' +'Hintergrund_' +'Forderung_' +'Ereignisse_' +'Copenhagen_' +'Chi' +'Can_' +'Bur' +'Binnenmarkt_' +'Beispiele_' +'Bau_' +'Basis_' +'Barroso_' +'Bar' +'Aufbau_' +'Aspekt_' +'Anfrage_' +'36_' +'14' +''._' +' = _' +' ... _' +'ö' +'ée_' +'zusätzlich_' +'zation_' +'welches_' +'weiterer_' +'victory_' +'vergessen_' +'ver' +'unbe' +'ub' +'truly_' +'teilweise_' +'tar' +'suffered_' +'struggle_' +'ski_' +'shops_' +'seriously_' +'selected_' +'riesigen_' +'resort_' +'remember_' +'pursue_' +'purchase_' +'playing_' +'phrase_' +'ourselves_' +'ort_' +'oral_' +'on' +'no' +'möglichen_' +'ming_' +'mehreren_' +'lt' +'looks_' +'lichkeit_' +'leistungen_' +'ld_' +'ld' +'launch_' +'laufen_' +'lan' +'laden_' +'kürzlich_' +'kle' +'jo' +'ium_' +'ish_' +'ire_' +'intervention_' +'implementing_' +'he' +'hauptsächlich_' +'happy_' +'grundlegende_' +'geändert_' +'gerne_' +'fähigkeit_' +'fresh_' +'flexibility_' +'fish_' +'erinnern_' +'erhält_' +'equivalent_' +'enterprises_' +'ene_' +'email_' +'dynamic_' +'diplomatic_' +'declaration_' +'database_' +'counter_' +'contrast_' +'conflicts_' +'completed_' +'combat_' +'collective_' +'calling_' +'ber' +'benutzen_' +'automatisch_' +'asylum_' +'asset_' +'anstatt_' +'animal_' +'angezeigt_' +'ach_' +'Zi' +'Y' +'Volks' +'Vo' +'Vereinbarung_' +'Verbindungen_' +'Unter' +'Soviet_' +'Sorgen_' +'Ri' +'Projekte_' +'Pro_' +'Private_' +'Post' +'Pi' +'Organisationen_' +'Mitarbeiter_' +'Krieges_' +'Korruption_' +'Investitions' +'Institute_' +'Informations' +'IP_' +'He' +'Haushalt_' +'Gesetze_' +'Front_' +'Foundation_' +'Fortschritt_' +'Fort' +'February_' +'Familien' +'Entwicklungsländern_' +'Dra' +'Computer_' +'Ca' +'CAMBRIDGE_' +'Bre' +'Board_' +'Bo' +'Beziehung_' +'Aufgrund_' +'Another_' +'48_' +'13' +' /_' +'”, _' +'т' +'р' +'és_' +'är' +'Ökonomen_' +'zufolge_' +'zahlreichen_' +'warming_' +'wa' +'verlangen_' +'ven_' +'under' +'tz_' +'tu' +'translation_' +'tests_' +'terrace_' +'tasks_' +'ständig_' +'stations_' +'starken_' +'staatliche_' +'spacious_' +'sofort_' +'sin' +'siehe_' +'shower_' +'selection_' +'seemed_' +'science_' +'rn' +'ri_' +'responsibilities_' +'relax_' +'relatively_' +'prosperity_' +'promoting_' +'por_' +'platz_' +'partnership_' +'parliament_' +'opened_' +'ongoing_' +'obvious_' +'nf' +'nennen_' +'methods_' +'meetings_' +'mechanism_' +'les' +'langen_' +'labour_' +'ise_' +'internationaler_' +'installation_' +'ina' +'ill_' +'ierungs' +'ier' +'ide_' +'ice_' +'houses_' +'ha_' +'gutes_' +'größer_' +'goals_' +'gemäß_' +'gegenwärtig_' +'französische_' +'faster_' +'erzielen_' +'ergreifen_' +'erfolgen_' +'entwickelten_' +'entirely_' +'entered_' +'eingesetzt_' +'economists_' +'du' +'driving_' +'dollars_' +'display_' +'defined_' +'darunter_' +'danger_' +'danach_' +'dalam_' +'crimes_' +'corruption_' +'contract_' +'constitution_' +'charged_' +'cer' +'cancer_' +'bu' +'bre' +'bly_' +'biggest_' +'beruht_' +'benötigt_' +'believed_' +'beds_' +'ausschließlich_' +'assets_' +'ans' +'agen_' +'advance_' +'administrative_' +'ade' +'achten_' +'accordance_' +'a' +']] [[_' +'\\_' +'Zunächst_' +'Your_' +'Würde_' +'Wissen_' +'Waren_' +'Vietnam_' +'Verpflichtungen_' +'Verpflichtung_' +'Verhältnis_' +'Verfassungs' +'Unterschied_' +'Unfortunately_' +'TA' +'Syrien_' +'Straße_' +'San' +'SI' +'SE' +'Ru' +'Q_' +'Pre' +'Pacific_' +'Neben_' +'Mor' +'Monetary_' +'Miss' +'Mexiko_' +'Mexico_' +'Men' +'Medien_' +'Mal' +'Live_' +'Landwirtschaft_' +'Königreich_' +'Kultur' +'Kopf_' +'Je' +'Irland_' +'Internationalen_' +'Hotel' +'Hong_' +'Hoch' +'Hause_' +'Han' +'HIV_' +'HA' +'Geld' +'Formen_' +'Fahr' +'Every_' +'Einklang_' +'EC_' +'Dar' +'DI' +'Click_' +'Cha' +'Ce' +'Bus' +'Bra' +'Bi' +'Atom' +'Arten_' +'Angriff_' +'Abend_' +'95_' +'64_' +'1989_' +'', _' +'”._' +'ст' +'а' +'ür' +'ß' +'zuvor_' +'wi' +'weg' +'wages_' +'w' +'verfolgen_' +'umzusetzen_' +'trend_' +'tre' +'tly_' +'ti_' +'tel_' +'teilen_' +'summit_' +'significantly_' +'sets_' +'sektor_' +'scha' +'sagt_' +'sa' +'rr' +'rin_' +'reservation_' +'reported_' +'rely_' +'rejected_' +'recognize_' +'rechts' +'rasch_' +'qua' +'prime_' +'pri' +'plants_' +'pictures_' +'persons_' +'peaceful_' +'par' +'ou_' +'ou' +'opposite_' +'op' +'obviously_' +'nu' +'north_' +'ni' +'ng' +'nes_' +'nder_' +'nationaler_' +'nar' +'named_' +'moved_' +'mm_' +'mer_' +'manchmal_' +'machte_' +'ll' +'lessons_' +'learning_' +'krise_' +'ki_' +'initial_' +'igung_' +'iger_' +'ied_' +'hn_' +'helping_' +'hei' +'guaranteed_' +'gesprochen_' +'gender_' +'genannt_' +'gelten_' +'geleistet_' +'formal_' +'fisheries_' +'finanziellen_' +'finanzielle_' +'figure_' +'fat' +'extended_' +'extend_' +'explain_' +'experts_' +'enen_' +'dy_' +'durchaus_' +'drug_' +'dra' +'do' +'diseases_' +'deutsche_' +'cuisine_' +'courses_' +'couple_' +'cor' +'contrary_' +'constitutional_' +'commitments_' +'charges_' +'cast_' +'capable_' +'candidates_' +'bound_' +'beachten_' +'ban_' +'balanced_' +'außerordentlich_' +'argue_' +'appeal_' +'anzu' +'ang' +'allowing_' +'alliance_' +'allgemeine_' +'ages_' +'absolute_' +'abhängig_' +'Wähler_' +'Wegen_' +'Verhalten_' +'Umwelt' +'Transport_' +'Tradition_' +'Städte_' +'Stadt' +'Sol' +'Si' +'Schäden_' +'Schule_' +'SS_' +'Rezeption_' +'Report_' +'Q' +'Perhaps_' +'Paul_' +'Pa' +'PHP_' +'Niveau_' +'Ni' +'Nahen_' +'NE' +'Mu' +'Mitgliedschaft_' +'Militär' +'Merkel_' +'Mat' +'Located_' +'Lin' +'Leistung_' +'Las_' +'LI' +'Kong_' +'Klein' +'Kern' +'Jetzt_' +'Instrumente_' +'Hälfte_' +'Generation_' +'Gegen' +'Flug' +'Finanzkrise_' +'Far' +'Familien_' +'Erde_' +'Du_' +'Daten' +'Chancen_' +'Cameron_' +'Berichts_' +'BMW_' +'Auto' +'Ausweitung_' +'Ausbildung_' +'Aufenthalt_' +'Anstrengungen_' +'Anforderungen_' +'Am' +'Altstadt_' +'AR' +'25' +'15' +'11' +' — _' +' –, _' +'ы' +'é' +'è' +'Öl_' +'Ägypten_' +'yourself_' +'wine_' +'wiederum_' +'wenigen_' +'welt' +'weight_' +'vulnerable_' +'voice_' +'verlassen_' +'verantwortlich_' +'vast_' +'urgent_' +'ual_' +'te' +'tan' +'tahun_' +'supports_' +'studies_' +'structures_' +'ss' +'species_' +'south_' +'solve_' +'smoking_' +'sitting_' +'sion_' +'sierung_' +'ships_' +'sharing_' +'severe_' +'session_' +'select_' +'seien_' +'season_' +'schlagen_' +'remove_' +'relative_' +'recommend_' +'recession_' +'reaching_' +'race_' +'pu' +'provision_' +'proved_' +'prospects_' +'promotion_' +'promise_' +'practices_' +'positions_' +'photos_' +'photo_' +'pension_' +'owned_' +'out' +'organisations_' +'nisse_' +'names_' +'nahme_' +'mutual_' +'mountain_' +'minority_' +'micro' +'memory_' +'love_' +'ln_' +'lesen_' +'langfristige_' +'la' +'kitchen_' +'ker' +'kar' +'ir' +'improvements_' +'images_' +'hy' +'hundreds_' +'honourable_' +'gre' +'garantiert_' +'führenden_' +'fällt_' +'funktionieren_' +'frühen_' +'founded_' +'fighting_' +'felt_' +'eye_' +'exists_' +'exercise_' +'ethnic_' +'essentially_' +'equality_' +'entschieden_' +'entscheidender_' +'enk' +'elegant_' +'einzigen_' +'einge' +'dritten_' +'dinner_' +'defend_' +'defence_' +'currencies_' +'criticism_' +'crises_' +'compatible_' +'closely_' +'cha' +'budgetary_' +'bt_' +'ble' +'berücksichtigen_' +'automatically_' +'austerity_' +'arrangements_' +'arabischen_' +'anderes_' +'and' +'amp' +'ag_' +'Wunsch_' +'Worte_' +'West' +'Washington_' +'Wahl' +'Wachstums' +'Voraussetzungen_' +'Vielzahl_' +'Verwaltungs' +'Untersuchung_' +'Treffen_' +'Times_' +'Teil' +'Süd' +'Sweden_' +'Steuern_' +'Stand_' +'Sprache_' +'Sohn_' +'Serbia_' +'See_' +'Schlüssel' +'Safety_' +'Rome_' +'Regulation_' +'Rechtsvorschriften_' +'Phase_' +'PT_' +'Ost' +'NT' +'Monate_' +'Mindest' +'MO' +'Liberalisierung_' +'Krisen' +'Kontakt_' +'Kind_' +'Kar' +'Ju' +'Jean_' +'Iraqi_' +'Inseln_' +'Größe_' +'Grund' +'Grenze_' +'Gleichzeitig_' +'Gi' +'Gesundheits' +'Friedens' +'Erhöhung_' +'El' +'Ei' +'Durchführung_' +'Drittens_' +'De_' +'David_' +'Dann_' +'Buch_' +'Bel' +'Bank' +'Bal' +'Bahn_' +'Aspekte_' +'Anwendungen_' +'Anspruch_' +'Angst_' +'Anerkennung_' +'Al' +'Agreement_' +'65_' +'39_' +'з' +'überprüfen_' +'ón_' +'í' +'á' +'   . – _' +'za_' +'wor' +'wirksam_' +'weak_' +'war' +'wagen_' +'vieler_' +'verstanden_' +'usw_' +'us' +'universal_' +'unabhängig_' +'uf' +'tz' +'turns_' +'tung_' +'tourist_' +'touch_' +'tive_' +'tis' +'testing_' +'surrounding_' +'sugar_' +'subsidies_' +'stre' +'sti' +'ssi' +'sonst_' +'soft_' +'societies_' +'serves_' +'ser' +'schreiben_' +'schneller_' +'scheme_' +'russische_' +'run' +'rit' +'richten_' +'representative_' +'remained_' +'reich_' +'recognise_' +'rechts_' +'ra' +'prüfen_' +'productive_' +'pra' +'po' +'pick_' +'peoples_' +'pen' +'payment_' +'participate_' +'parliaments_' +'parents_' +'over' +'origin_' +'organizations_' +'onen_' +'ogen_' +'occur_' +'musste_' +'mu' +'motion_' +'mode_' +'mo' +'maximum_' +'manner_' +'mandate_' +'mal' +'lä' +'losses_' +'lokalen_' +'lity_' +'lesson_' +'legt_' +'leaving_' +'le' +'kurzem_' +'killed_' +'kept_' +'jener_' +'ized_' +'ionen_' +'institution_' +'ig' +'ici' +'ia' +'hostels_' +'hit_' +'gung_' +'griechischen_' +'gla' +'gestimmt_' +'gegenwärtigen_' +'focused_' +'father_' +'factors_' +'extreme_' +'expense_' +'expenditure_' +'exit_' +'enorme_' +'emergency_' +'element_' +'einfache_' +'ehen_' +'east_' +'drugs_' +'discovered_' +'differences_' +'destruction_' +'demokratische_' +'dank_' +'da' +'cu' +'coordination_' +'consideration_' +'confirmed_' +'command_' +'choices_' +'ce' +'buildings_' +'brings_' +'bility_' +'bi' +'bestätigen_' +'belief_' +'bath_' +'außen_' +'ausreichend_' +'ausdrücklich_' +'aufzu' +'aufnehmen_' +'attractive_' +'arrival_' +'ard_' +'apart_' +'aims_' +'ably_' +'abe_' +'Ziel' +'Zeitraum_' +'Weltbank_' +'War' +'Vom_' +'Verwaltung_' +'Venezuela_' +'Var' +'Umfang_' +'Tour_' +'Tor' +'To' +'Thank_' +'Stu' +'Stil_' +'Stellung_' +'Sat' +'Santa_' +'Sanktionen_' +'Sand' +'Saddam_' +'SQL_' +'Rück' +'Russlands_' +'Roman_' +'Roma_' +'Richtlinien_' +'Rede_' +'Rechts_' +'Quelle_' +'Prozesses_' +'Pen' +'Patienten_' +'Other_' +'Option_' +'Mas' +'Mar' +'Luft' +'Leit' +'Leistungs' +'Lebanon_' +'Kyoto_' +'Krankheit_' +'Konflikt_' +'Klimawandel_' +'Kinder' +'Kan' +'Jeder_' +'Hel' +'Hauptstadt_' +'Ha' +'Gründe_' +'Green_' +'Gespräche_' +'Gemeinsamen_' +'Gebieten_' +'Führer_' +'Fähigkeit_' +'Funktionen_' +'Fonds_' +'Find_' +'Faktoren_' +'FR_' +'Emissionen_' +'Ein' +'Each_' +'Dadurch_' +'Charakter_' +'Brüssel_' +'Berichterstatterin_' +'Beide_' +'Begriff_' +'Beach_' +'BA' +'Auf' +'Administration_' +'80' +'23' +'21' +'1' +'.“ _' +' (' +'ären_' +'äre_' +'äge_' +'Übereinstimmung_' +'zählen_' +'zuge' +'yesterday_' +'wirtschaftlich_' +'wichtigste_' +'westlichen_' +'weiteres_' +'wachsende_' +'vous_' +'verstärken_' +'verfolgt_' +'van' +'unter' +'uni' +'ungs' +'und' +'umfassenden_' +'um' +'tzen_' +'trotzdem_' +'tor' +'talking_' +'systeme_' +'surrounded_' +'supposed_' +'super' +'succeed_' +'substances_' +'su' +'stru' +'spielt_' +'sovereignty_' +'sor' +'sit' +'sell_' +'seitens_' +'schools_' +'ru' +'rte' +'rs' +'rly_' +'rental_' +'remote_' +'reicht_' +'referred_' +'records_' +'recognition_' +'radio_' +'quick_' +'properly_' +'producers_' +'processes_' +'prevention_' +'pp' +'patients_' +'pan' +'packages_' +'pa' +'ot_' +'osi' +'ordinary_' +'müssten_' +'myself_' +'mittlerweile_' +'miss' +'militärischen_' +'mas' +'markt_' +'malaria_' +'losing_' +'looked_' +'ler' +'len' +'legitimate_' +'langsam_' +'l' +'kulturelle_' +'kt' +'konkrete_' +'keinerlei_' +'keiner_' +'intellectual_' +'informiert_' +'informed_' +'imposed_' +'impose_' +'import_' +'immigrants_' +'imagine_' +'households_' +'her' +'har' +'hand' +'han_' +'guide_' +'gruppe_' +'grow_' +'golf_' +'ght_' +'geschützt_' +'geb' +'gap_' +'ft' +'früher_' +'freiheit_' +'flows_' +'flat_' +'fit_' +'fell_' +'explanation_' +'ex_' +'ets_' +'est' +'eri' +'ended_' +'ek_' +'eight_' +'economics_' +'dunia_' +'division_' +'discover_' +'devices_' +'device_' +'detail_' +'derzeitigen_' +'depends_' +'del' +'definition_' +'deeply_' +'cycle_' +'cri' +'covered_' +'consultation_' +'conducted_' +'concluded_' +'compensation_' +'colleague_' +'coal_' +'cies_' +'cars_' +'bringing_' +'born_' +'bor' +'bon' +'blocks_' +'block_' +'bildet_' +'beziehen_' +'bezeichnet_' +'bestimmen_' +'beschlossen_' +'bemüht_' +'beigetragen_' +'beaches_' +'ban' +'ball_' +'back' +'ba' +'ausgesetzt_' +'attempts_' +'ati' +'at' +'assume_' +'asking_' +'arguments_' +'appeared_' +'andererseits_' +'an' +'allgemeinen_' +'allgemein_' +'al' +'ai' +'ahmen_' +'agency_' +'Wohlstand_' +'Will_' +'Widerstand_' +'Villa_' +'Very_' +'VO' +'Trek_' +'Ton' +'Tod_' +'Test' +'Ta' +'Streit' +'Straßen' +'Standard' +'Sprachen_' +'Speicher' +'Skype_' +'Sieg_' +'Sa' +'Rückgang_' +'Risiko' +'Regelung_' +'Real' +'Que' +'Pu' +'Produkt' +'Problemen_' +'Praxis_' +'Partnerschaft_' +'Ordnung_' +'OS_' +'OR_' +'Not' +'Nobel_' +'Nevertheless_' +'Media_' +'Mann_' +'Macht' +'MI' +'Leider_' +'Lei' +'Lebens_' +'Kriterien_' +'Kommunikation_' +'Kombination_' +'Karte_' +'Inhalt_' +'Industrie' +'Identität_' +'IT' +'Hä' +'Hostel_' +'Handels_' +'Geschäftsordnung_' +'Geldpolitik_' +'Geb' +'Fra' +'Foto_' +'Foreign_' +'Forderungen_' +'Februar_' +'Fax_' +'Experten_' +'Entwurf_' +'Entwicklungs' +'End' +'Ed' +'ER' +'Download_' +'Direkt' +'Dimension_' +'DE' +'Control_' +'Bos' +'Balkan_' +'Austrian_' +'Aussicht_' +'Aufgaben_' +'Arm' +'Analyse_' +'Allgemeinen_' +'Ale' +'Ala' +'Absch' +'AN' +'192' +'™ _' +'– _' +'м' +'zurückge' +'zero_' +'work' +'weit' +'vorge' +'virtually_' +'village_' +'ur' +'unterschiedliche_' +'unternommen_' +'unmittelbar_' +'tut_' +'ts' +'trifft_' +'trans' +'title_' +'temporary_' +'telephone_' +'substantial_' +'stance_' +'square_' +'sprach_' +'sports_' +'spa_' +'sovereign_' +'sized_' +'sieben_' +'sicherlich_' +'sha' +'sensitive_' +'senior_' +'schönen_' +'sage_' +'returns_' +'represented_' +'relaxing_' +'registered_' +'reflects_' +'referendum_' +'reden_' +'rag' +'quantitative_' +'profits_' +'producing_' +'print_' +'pi' +'perfectly_' +'pan_' +'overcome_' +'ord' +'or' +'onto_' +'olo' +'nts_' +'ngs_' +'newly_' +'nan' +'mussten_' +'multi' +'mor' +'ministers_' +'meist_' +'match_' +'marketing_' +'macroeconomic_' +'länder_' +'lten_' +'lovely_' +'lim' +'launched_' +'kraft_' +'klare_' +'kla' +'ki' +'keeping_' +'itu' +'isierung_' +'ise' +'ip' +'instance_' +'install_' +'inequality_' +'il' +'identify_' +'ian' +'hot_' +'ho' +'hinweisen_' +'heiten_' +'head' +'hardly_' +'groß_' +'globalization_' +'gli' +'gewählt_' +'gewisse_' +'gestalten_' +'ged_' +'ge' +'furniture_' +'formed_' +'forget_' +'flow_' +'fel' +'federal_' +'farming_' +'et' +'erstellt_' +'ernst_' +'erle' +'ergeben_' +'erfahren_' +'entschlossen_' +'enabling_' +'emphasise_' +'elle_' +'ek' +'ehr' +'edi' +'ear' +'distributed_' +'disputes_' +'destroyed_' +'deserves_' +'demanding_' +'decide_' +'dealing_' +'crew_' +'contribute_' +'continuing_' +'concrete_' +'comment_' +'combined_' +'combating_' +'cohesion_' +'cards_' +'button_' +'bul' +'broader_' +'briefly_' +'boom_' +'blood_' +'bezahlen_' +'bewegen_' +'bee' +'background_' +'auto' +'ausgaben_' +'aufge' +'atau_' +'argument_' +'ara_' +'ar' +'angeboten_' +'ancient_' +'ana_' +'am' +'aktiv_' +'afternoon_' +'ae' +'ada_' +'ad' +'ach' +'accounts_' +'accompanied_' +'accessible_' +'Zusammenbruch_' +'Zentralbanken_' +'Ze' +'Wo' +'Wirkung_' +'Verlust_' +'Unternehmens' +'Texte_' +'TI' +'Studie_' +'Sprach' +'Sport_' +'Spar' +'Sonder' +'Selbst' +'Sein_' +'School_' +'Schluss_' +'Schaden_' +'Runde_' +'Reform' +'Priorität_' +'Politik' +'Over_' +'Nord' +'Nach' +'Musik' +'Menschenrechts' +'Menge_' +'Madrid_' +'MA' +'Li' +'Law_' +'Lateinamerika_' +'Kredit' +'Kon' +'Justiz_' +'IN' +'Hinweis_' +'Hill_' +'Grundrechte_' +'Grunde_' +'Grad_' +'Good_' +'Gerichtshof_' +'Gemeinschafts' +'Enjoy_' +'Earth_' +'EL' +'Deutsche_' +'Dass_' +'Dan' +'DA' +'Cre' +'Con' +'Cho' +'Charta_' +'Cap' +'CO' +'Bus_' +'Budget_' +'Book_' +'Bon' +'Beihilfen_' +'Bay_' +'BR' +'Ausnahme_' +'Armee_' +'Apple_' +'Antrag_' +'Anreize_' +'Akteure_' +'Airbus_' +'AG_' +'@_' +': “_' +'44_' +'42_' +'1945_' +'194' +'190' +'188' +'12' +' €_' +'“' +'де' +'д' +'übernommen_' +'ör' +'zurück' +'zufrieden_' +'zeit' +'ysteme_' +'yo' +'wünschen_' +'wirtschaft_' +'wind_' +'wie' +'werk_' +'vorschlagen_' +'vorher_' +'vorgelegt_' +'vor' +'verändert_' +'verdient_' +'verbessert_' +'usual_' +'usi' +'una_' +'umfasst_' +'ue' +'täglich_' +'tting_' +'tten_' +'transfers_' +'ther_' +'th' +'tera' +'technische_' +'survive_' +'super_' +'sun' +'suite_' +'stone_' +'statements_' +'spring_' +'sold_' +'sobald_' +'sit_' +'secret_' +'seats_' +'schwe' +'runs_' +'roads_' +'rein_' +'regardless_' +'refugee_' +'recognized_' +'rch' +'rat_' +'rat' +'railway_' +'rage_' +'purposes_' +'protecting_' +'promised_' +'processing_' +'primarily_' +'precise_' +'politisches_' +'platform_' +'permitted_' +'paragraph_' +'organization_' +'offizielle_' +'occasions_' +'ob' +'nötig_' +'nten_' +'nte_' +'nta' +'notwendigen_' +'normalerweise_' +'nord' +'nit' +'niemand_' +'nge_' +'nearby_' +'ndo_' +'naturally_' +'na' +'museums_' +'mostly_' +'mini_' +'mini' +'metro_' +'metres_' +'menschlichen_' +'mechanisms_' +'luxurious_' +'liquidity_' +'leisure_' +'learned_' +'lay_' +'latter_' +'lage_' +'kh' +'journalists_' +'itt_' +'issued_' +'involve_' +'initially_' +'incentives_' +'ina_' +'impressive_' +'implications_' +'ik' +'id_' +'höheren_' +'höchsten_' +'ht' +'household_' +'hostel_' +'hoher_' +'hn' +'hip_' +'hinzu' +'heits' +'guter_' +'gun' +'gu' +'go' +'gewährleistet_' +'gewa' +'gespielt_' +'gelungen_' +'gel_' +'gegründet_' +'gegenwärtige_' +'gefallen_' +'garantieren_' +'gained_' +'führung_' +'fun_' +'forum_' +'fordert_' +'for' +'finding_' +'finanziert_' +'fied_' +'feststellen_' +'festgelegt_' +'feed_' +'fantastic_' +'existence_' +'exclusively_' +'excessive_' +'erwiesen_' +'erleben_' +'erklärte_' +'ering_' +'erfolgreichen_' +'erfolg' +'engineering_' +'endorse_' +'end' +'electronic_' +'electricity_' +'einzusetzen_' +'einsetzen_' +'eingerichtet_' +'eingehen_' +'effectiveness_' +'dé' +'dri' +'diversity_' +'disaster_' +'determine_' +'danke_' +'correct_' +'convenient_' +'communications_' +'coast_' +'club_' +'cho' +'chief_' +'chen' +'centuries_' +'cation_' +'category_' +'bur' +'brachte_' +'booking_' +'bla' +'bitten_' +'besseren_' +'bedroom_' +'availability_' +'aufzunehmen_' +'ationen_' +'army_' +'ari' +'appreciate_' +'apa' +'ante_' +'anbieten_' +'ama' +'ai_' +'agencies_' +'af' +'addressing_' +'ace' +'aba' +'ab' +']]_' +'Zweiten_' +'Zeit' +'Z_' +'YORK_' +'Within_' +'Willen_' +'Wieder' +'White_' +'Wahrheit_' +'WI' +'Verteidigung_' +'Vereinbarungen_' +'Verbrechen_' +'Val' +'Ur' +'Unterschiede_' +'Trans' +'Those_' +'Test_' +'Tau' +'Taiwan_' +'Tages_' +'TNG_' +'Städten_' +'Studien_' +'Spiele_' +'Son' +'Sommer_' +'Sind_' +'Sin' +'She' +'Sha' +'Seine_' +'Schul' +'Sarkozy_' +'SP' +'Regimes_' +'RA' +'Prä' +'Problems_' +'Prinzipien_' +'Premierminister_' +'Preis' +'Plaza_' +'Pin' +'Perspektive_' +'Page_' +'PS' +'Opposition_' +'ON' +'Nor_' +'Nein_' +'Neben' +'Muslims_' +'Multi' +'Meiner_' +'Meanwhile_' +'Location_' +'Libanon_' +'Lassen_' +'Lang' +'Kriegs' +'Kredite_' +'Kor' +'Kontroll' +'Konsens_' +'Klimaanlage_' +'Kirk_' +'King_' +'Kenntnis_' +'Jedes_' +'JavaScript_' +'Jahrzehnten_' +'Jahres' +'Island_' +'Iranian_' +'Ideen_' +'IC' +'Holz' +'Gefahren_' +'Finanzierungs' +'Fest' +'Facilities_' +'FI' +'Executive_' +'Erwartungen_' +'Erd' +'Entwicklungsländer_' +'Einzel' +'Einwohner_' +'Eigen' +'Doha_' +'Des' +'Depression_' +'Data_' +'Cuba_' +'Chile_' +'Bä' +'Bri' +'Ber' +'Bas' +'Ban' +'BE' +'AP' +'AM' +'> _' +'800_' +'52_' +'1991_' +'10' +'05_' +'.” _' +'."_' +') ' +' € _' +' > _' +'қа' +'я' +'к' +'в' +'б' +'č_' +'übertragen_' +'überrascht_' +'Österreich_' +'zugleich_' +'zo' +'zer' +'zentralen_' +'yr' +'winter_' +'widespread_' +'wesentlichen_' +'welcher_' +'weiter' +'wars_' +'warning_' +'wage_' +'volle_' +'vic' +'verwende_' +'verurteilt_' +'vehicles_' +'vehicle_' +'uss_' +'update_' +'unit_' +'unde' +'u' +'tä' +'troops_' +'tro' +'traditionelle_' +'tourism_' +'tori' +'tisch_' +'tend_' +'temp' +'technischen_' +'taste_' +'tal' +'swa' +'sustainability_' +'surplus_' +'sur' +'strengthening_' +'store_' +'sterben_' +'sport_' +'spoke_' +'spectrum_' +'sought_' +'solcher_' +'smus_' +'sme' +'situations_' +'sing' +'signs_' +'sel' +'schwierigen_' +'sar' +'sam' +'ré' +'rung_' +'rum' +'rti' +'rio' +'rin' +'rf' +'returned_' +'resource_' +'resolved_' +'replaced_' +'rei' +'regular_' +'raten_' +'push_' +'ption_' +'propose_' +'proc' +'prior_' +'preis' +'pleasure_' +'planned_' +'pie' +'phi' +'persönlich_' +'personally_' +'ought_' +'ora' +'opposed_' +'opinions_' +'ome' +'ok' +'offenen_' +'of' +'oe' +'nte' +'niemals_' +'ngs' +'nen' +'müsste_' +'möglichst_' +'mö' +'motor' +'mont' +'mon' +'mix_' +'mental_' +'meant_' +'mat' +'ma' +'luxury_' +'lp' +'li' +'letzter_' +'law' +'künftige_' +'kulturellen_' +'kostenlos_' +'ko_' +'knows_' +'kleiner_' +'kamen_' +'jeweils_' +'jam' +'intensive_' +'im' +'ile_' +'ie' +'ichen_' +'hrt_' +'hof_' +'hel' +'gruppen_' +'grundlegenden_' +'greenhouse_' +'greatly_' +'gra' +'gone_' +'glo' +'gewissen_' +'gan' +'führende_' +'fruit_' +'freue_' +'fi' +'fairly_' +'eyes_' +'essen_' +'erscheinen_' +'ersch' +'erg' +'erforderlichen_' +'era' +'enormen_' +'eng_' +'enabled_' +'elite_' +'electoral_' +'durchzuführen_' +'drei' +'dishes_' +'dining_' +'dienen_' +'derart_' +'depend_' +'definiert_' +'ct_' +'convinced_' +'condition_' +'cing_' +'chl' +'cher' +'cas' +'ca' +'burg_' +'budgets_' +'break' +'black_' +'bie' +'beschäftigt_' +'beobachten_' +'bela' +'bekämpfen_' +'basiert_' +'ay_' +'author_' +'ange' +'ambitious_' +'ale' +'ain_' +'afrikanischen_' +'aff' +'adoption_' +'acts_' +'acceptable_' +'absence_' +'abgeschlossen_' +'[[_' +'Zug' +'Ziffer_' +'Zer' +'Zentral' +'Wirklichkeit_' +'Win' +'Weitere_' +'Wandel_' +'WLAN_' +'Völker_' +'Vorbereitung_' +'Volkes_' +'Vienna_' +'Verkehrs' +'Verkehr_' +'Us' +'Umfeld_' +'Tele' +'Teilnahme_' +'Tar' +'Swedish_' +'Summit_' +'Suche_' +'Strukturen_' +'Straf' +'Stellen_' +'Stadtzentrum_' +'Sport' +'Spitze_' +'Spiel' +'Special_' +'Sollte_' +'Sel' +'Science_' +'Sche' +'Samsung_' +'Saint_' +'Saa' +'Rou' +'Rest' +'Research_' +'Rahmen' +'Protokoll_' +'Pay' +'Pan' +'Paket_' +'Optionen_' +'ON_' +'Nutzer_' +'Mon' +'Mitteln_' +'Menschen' +'Maß_' +'Martin_' +'Mac' +'MEPs_' +'Los_' +'LONDON_' +'Körper_' +'Kräfte_' +'Konvent_' +'Kommunikations' +'Kil' +'Kandidaten_' +'Java_' +'Japans_' +'Gu' +'GmbH_' +'General' +'Gefühl_' +'Gedanken_' +'Gebiete_' +'Gas' +'Garten_' +'Fähigkeiten_' +'For' +'Firstly_' +'Fach' +'Export' +'Ex' +'Erklärungen_' +'Eisenbahn' +'ES' +'EA' +'Druck' +'Dor' +'Der' +'DS' +'Czech_' +'Ci' +'Chris_' +'Chinesen_' +'Check_' +'Che' +'CIA_' +'CH' +'Bor' +'Beweis_' +'Beschäftigungs' +'Bei' +'Ball' +'Bade' +'BI' +'Atlantic_' +'Argentinien_' +'Apartment_' +'Angelegenheit_' +'Amerikaner_' +'Aktien' +'Ag' +'Absicht_' +'Ab' +'AC' +'?' +'47_' +'16' +'06_' +'03_' +'00' +'...' +',' +'!' +'і' +'п' +'ни' +'н' +'л' +'ück' +'übrigen_' +'öl' +'öffentlich_' +'ía_' +'än' +'Überprüfung_' +'Ü' +'zweifellos_' +'zentrum_' +'zahl' +'ystem_' +'xi' +'wirken_' +'welt_' +'wall_' +'waiting_' +'wait_' +'wachsenden_' +'wa_' +'vorgesehenen_' +'vorgeschlagenen_' +'visitors_' +'vir' +'verstärkt_' +'verleihen_' +'urs_' +'unmöglich_' +'unity_' +'unfortunately_' +'ug' +'uer' +'uel' +'typical_' +'tt' +'tit' +'tionen_' +'threats_' +'threatened_' +'tet_' +'teile_' +'sustained_' +'sur_' +'suitable_' +'submit_' +'strongly_' +'strength_' +'stimulus_' +'steuer' +'ster' +'steigern_' +'stattfinden_' +'sozial' +'son' +'sky_' +'ski' +'sinnvoll_' +'sight_' +'settlement_' +'sen' +'selbst' +'seite_' +'sei' +'seat_' +'schriftlich_' +'schlecht_' +'scher_' +'scheinen_' +'rou' +'rk' +'ride_' +'rge' +'reply_' +'rep' +'rend' +'remaining_' +'relief_' +'reliable_' +'regimes_' +'reb' +'reagieren_' +'raising_' +'quarter_' +'prove_' +'prospect_' +'proposing_' +'prison_' +'praktisch_' +'possibly_' +'plenty_' +'pattern_' +'outdoor_' +'operate_' +'ola' +'obtain_' +'nsi' +'nommen_' +'nom' +'nk_' +'neighbors_' +'necessarily_' +'nda' +'mögen_' +'mp' +'movements_' +'monitor_' +'mm' +'meter_' +'med_' +'managing_' +'maintenance_' +'lor' +'loo' +'lokale_' +'lly_' +'liked_' +'lier' +'lie_' +'leu' +'length_' +'legitimacy_' +'lad' +'kti' +'kostet_' +'konzentriert_' +'ite' +'islands_' +'invest_' +'interface_' +'ini' +'ingly_' +'immt_' +'ih' +'iel' +'identified_' +'hundred_' +'hr' +'hoffen_' +'hing_' +'highlight_' +'hi' +'heißen_' +'hearing_' +'harus_' +'han' +'halt_' +'globalisation_' +'gerichtet_' +'geraten_' +'generated_' +'gelegt_' +'gehalten_' +'freundlich_' +'frei' +'foot_' +'fonds_' +'festgestellt_' +'false_' +'falling_' +'experienced_' +'estate_' +'eröffnet_' +'erwähnt_' +'erstellen_' +'erfolgt_' +'ered_' +'entscheidenden_' +'entscheidende_' +'enten_' +'ent' +'enhance_' +'ene' +'ending_' +'enables_' +'eg' +'edited_' +'ed' +'ect' +'drop_' +'drink_' +'dramatic_' +'didn_' +'demonstrated_' +'delivered_' +'delay_' +'dal' +'customer_' +'ction_' +'credibility_' +'counter' +'controlled_' +'contracts_' +'considering_' +'confirm_' +'conclusions_' +'client_' +'cit' +'church_' +'chosen_' +'chain_' +'browser_' +'bo' +'bin' +'bezahlt_' +'betroffen_' +'besuchen_' +'besonderen_' +'besitzt_' +'beschränkt_' +'bequem_' +'behalten_' +'begonnen_' +'bedeuten_' +'bas' +'bahwa_' +'aut_' +'ausgesprochen_' +'audio_' +'ative_' +'association_' +'argued_' +'approximately_' +'applies_' +'applicable_' +'ano' +'alt' +'allies_' +'alb' +'akzeptiert_' +'aka' +'aircraft_' +'afford_' +'advantages_' +'adults_' +'adjustment_' +'absch' +']]._' +'Zahlen_' +'Yu' +'Währungsunion_' +'Women_' +'Without_' +'Wien_' +'Webseite_' +'Web' +'Vorsitzenden_' +'Vorschlägen_' +'Voraussetzung_' +'Verhandlungs' +'Verg' +'Verbreitung_' +'Verbraucher' +'Unternehmens_' +'USD_' +'USB_' +'UR' +'Tro' +'Then_' +'Thailand_' +'Termin' +'Ter' +'Technik_' +'Tal' +'Tages' +'TE' +'Symbol_' +'Sub' +'Start_' +'Sitzung_' +'Sim' +'Schutz' +'Schau' +'Reise_' +'Rechnung_' +'Ratsvorsitz_' +'Pool_' +'Player_' +'Plan' +'Partei' +'Par' +'Palästinenser_' +'Palace_' +'Pact_' +'PPE_' +'Ob' +'OECD_' +'Nr_' +'Note_' +'Nos_' +'Nigeria_' +'Nacht' +'Märkten_' +'Myanmar_' +'Munich_' +'Mittelpunkt_' +'Mitgliedern_' +'Mission_' +'Ministerpräsident_' +'Minderheiten_' +'Micro' +'Market_' +'Mag' +'Len' +'Laufe_' +'Labour_' +'Kunst' +'Kooperation_' +'Kontinent_' +'Kim_' +'Israels_' +'Indian_' +'Hussein_' +'Hostelsclub_' +'Handeln_' +'Hamburg_' +'Hal' +'Gründung_' +'Glaubwürdigkeit_' +'Gesch' +'Geh' +'Frühstücksbuffet_' +'Frei' +'Fi' +'Fernsehen_' +'Fal' +'Faktor_' +'Express_' +'Environment_' +'Entwicklungen_' +'Energy_' +'Eltern_' +'Due_' +'Dienste_' +'Die' +'Dep' +'Dem_' +'Datenbank_' +'Cyprus_' +'Cor' +'Company_' +'Cold_' +'Charter_' +'Charles_' +'Char' +'CI' +'CAN' +'Buch' +'Brussels_' +'Blog_' +'Bild' +'Bewertung_' +'Belarus_' +'Before_' +'Bedenken_' +'Back' +'Audio_' +'Association_' +'Arbeitskräfte_' +'Alt' +'Aktionen_' +'Adresse_' +'Abgeordnete_' +'AT' +'3D_' +'37_' +'24' +'20' +'193' +'191' +'09_' +'03' +'/ _' +'/' +') - _' +'() _' +'"' +' -- _' +'” (_' +'н_' +'е_' +'überzeugen_' +'Öffnung_' +'zusätzlichen_' +'zi' +'yn' +'yes_' +'ye_' +'wächst_' +'wood_' +'wonderful_' +'wishes_' +'wireless_' +'weist_' +'weather_' +'wave_' +'wahr' +'vorzu' +'vorgesehen_' +'visible_' +'vis_' +'verändern_' +'verteidigen_' +'vert' +'verlangt_' +'verhindert_' +'ven' +'uren_' +'una' +'uk' +'tzung_' +'tur' +'trouble_' +'trip' +'trillion_' +'treaty_' +'totally_' +'top' +'tele' +'teil_' +'tas_' +'symbol_' +'swi' +'sum_' +'successfully_' +'streets_' +'strategischen_' +'strategische_' +'ste' +'stag' +'sso' +'spricht_' +'spot_' +'somewhat_' +'solely_' +'sodass_' +'sichern_' +'scho' +'schlägt_' +'schi' +'sch' +'sbe' +'sauna_' +'sale_' +'rta' +'roughly_' +'rischen_' +'rg_' +'restore_' +'residential_' +'rescue_' +'removed_' +'religion_' +'regularly_' +'reflection_' +'ree' +'rechte_' +'reaction_' +'rare_' +'qualität_' +'preparation_' +'preise_' +'populist_' +'pollution_' +'pli' +'pleasant_' +'pit' +'pin' +'personnel_' +'perform_' +'patent_' +'passiert_' +'passed_' +'outstanding_' +'ott' +'orm' +'op_' +'ology_' +'olin' +'ol_' +'od' +'noise_' +'nke' +'nights_' +'nicht' +'ndi' +'muslimischen_' +'mpe' +'motor_' +'momentan_' +'ml' +'mindestens_' +'meaningfully_' +'mba' +'markt' +'manche_' +'maintaining_' +'lla' +'lived_' +'linie_' +'liches_' +'liberalisation_' +'let' +'lar' +'lan_' +'kur' +'kontrollieren_' +'kom' +'kin' +'ken' +'karte_' +'jenen_' +'jemand_' +'jegliche_' +'japanische_' +'ita' +'iso' +'installed_' +'ins' +'ingen_' +'inform_' +'inevitable_' +'index_' +'inden_' +'incomes_' +'imbalances_' +'ien' +'ida' +'hä' +'humanity_' +'http_' +'hoo' +'hingegen_' +'hi_' +'heavy_' +'hardware_' +'guarantees_' +'grammatically_' +'gradually_' +'gr' +'gezogen_' +'gestärkt_' +'genügend_' +'genutzt_' +'generations_' +'gene' +'gem' +'gefordert_' +'gan_' +'fördert_' +'free' +'frage_' +'forest_' +'flo' +'firstly_' +'fest' +'fer' +'fen' +'female_' +'fehlt_' +'fees_' +'fears_' +'fe' +'ez_' +'explained_' +'excess_' +'exception_' +'ex' +'estimated_' +'es' +'erreichbar_' +'erinnert_' +'erf' +'er' +'entstanden_' +'entlang_' +'entitled_' +'enst' +'eni' +'ender_' +'ende' +'ences_' +'en' +'eliminate_' +'ektor_' +'einerseits_' +'ei' +'egen_' +'echte_' +'dro' +'driver_' +'driven_' +'door_' +'discussing_' +'discrimination_' +'dis' +'direkten_' +'diplomacy_' +'dimension_' +'dent' +'deiner_' +'declared_' +'daraus_' +'dapat_' +'dans_' +'cru' +'cou' +'copy_' +'convergence_' +'conventional_' +'contextually_' +'containing_' +'contained_' +'cons' +'connections_' +'conce' +'computers_' +'col' +'co' +'classic_' +'ckt_' +'civilian_' +'chten_' +'charming_' +'channels_' +'cash_' +'capitalism_' +'cad' +'blue_' +'bislang_' +'bildung_' +'bewältigen_' +'betreffen_' +'beschleunigen_' +'ben' +'bel' +'bei' +'beginnt_' +'bef' +'barriers_' +'bal' +'baby_' +'ba_' +'ausgezeichneten_' +'aufzubauen_' +'aufgezahlt_' +'attitude_' +'assure_' +'asi_' +'arbeits' +'appointed_' +'amounts_' +'alter_' +'alle' +'album_' +'ala' +'ag' +'affects_' +'adi' +'acknowledge_' +'achieving_' +'acce' +'abuse_' +'Zustand_' +'Zeichen_' +'Ya' +'Wohl' +'Wird_' +'Wiki' +'Wettbewerbs' +'Wa' +'Volkswirtschaft_' +'Veränderung_' +'Verträge_' +'Verf' +'Verbot_' +'Verbesserungen_' +'Up' +'Universität_' +'Under_' +'Tun' +'Ti' +'The' +'Süden_' +'Sy' +'Sur' +'Stärke_' +'Straßen_' +'Stellungnahme_' +'Stability_' +'Staats_' +'Spe' +'Slo' +'Ski' +'Sitz_' +'Schulen_' +'SM' +'SA_' +'Rund' +'Rom' +'Regional_' +'Regierungschefs_' +'Ratschlag_' +'Quellen_' +'Puerto_' +'Programms_' +'Prinzip_' +'Pra' +'Port' +'Para' +'Paper_' +'Palestine_' +'PL' +'Os' +'Orten_' +'Open' +'OK_' +'Norden_' +'Non_' +'Netz_' +'Nacht_' +'NO' +'Mont' +'Ministers_' +'Mel' +'Mehr_' +'May' +'Man' +'Malaysia_' +'Lord_' +'Likewise_' +'Like_' +'Lebensmittel' +'LE' +'LA' +'Konsum_' +'Komfort_' +'Klima_' +'KDE_' +'Jahrzehnt_' +'Islands_' +'Innen' +'Herstellung_' +'Hersteller_' +'Herrschaft_' +'Grün' +'Gro' +'Gesetzgebung_' +'Gerichts' +'Geist_' +'Gegner_' +'Ge' +'Gast' +'Gang_' +'GE' +'Fotos_' +'Foto' +'Flüchtlinge_' +'Finland_' +'Finde_' +'Falls_' +'Est' +'Einwanderung_' +'Einstellung_' +'EC' +'Does_' +'Dienstleistungs' +'Dia' +'Demokraten_' +'Deb' +'Datei' +'DO' +'DC_' +'Class_' +'Canada_' +'C5_' +'Botschaft_' +'Beim_' +'BO' +'Avi' +'Auto_' +'Aufstieg_' +'As' +'Arbeitsmarkt_' +'Arbeiter_' +'Amendments_' +'Alter_' +'Ai' +'Agency_' +'Adobe_' +'Action_' +'Abschließend_' +'AU' +'://_' +'700_' +'49_' +'250_' +'1979_' +'1970_' +'196' +'186' +'08_' +'07_' +'.“_' +'-/_' +'," _' +'): _' +') (_' +'(' +'' ' +''' +' ….. _' +' )._' +'š' +'überlassen_' +'ös' +'öffnen_' +'ès_' +'ätzen_' +'äng' +'ähnlich_' +'Übersetzung_' +'Übersch' +'Ökonomie_' +'Ö' +'zy' +'zustimmen_' +'zusammenge' +'zusammen' +'zugrunde_' +'zuerst_' +'zeichen_' +'ys' +'yp' +'yer_' +'ws_' +'worst_' +'wonder_' +'wollten_' +'windows_' +'willkommen_' +'wesentliche_' +'warten_' +'wal' +'wahren_' +'vors' +'visited_' +'vis' +'versions_' +'verschärft_' +'verehrte_' +'vere' +'va_' +'ux_' +'ute' +'ut' +'ust' +'use' +'ursprünglichen_' +'ursprünglich_' +'uan_' +'twenty_' +'tti' +'tter_' +'travel' +'towns_' +'topic_' +'time' +'tersebut_' +'teachers_' +'taxpayers_' +'tag_' +'ta' +'sz' +'substance_' +'stun' +'stocks_' +'stem_' +'staying_' +'stayed_' +'statistics_' +'stadt_' +'sse' +'speakers_' +'spe' +'soil_' +'sla' +'sis_' +'sicherheit_' +'shed_' +'shares_' +'serviert_' +'sensible_' +'seg' +'see' +'schä' +'savings_' +'saved_' +'satellite_' +'sal' +'ry' +'rund' +'rst' +'rre' +'rot' +'roll_' +'ris' +'richtlinie_' +'richtigen_' +'revolutionary_' +'retten_' +'retirement_' +'reserve_' +'remind_' +'religiösen_' +'reject_' +'ref' +'reasonable_' +'ram' +'que' +'prä' +'prepare_' +'praktische_' +'pr' +'ports_' +'porta' +'politically_' +'phenomenon_' +'pem' +'pass' +'parallel_' +'par_' +'pal' +'oy' +'operational_' +'oma' +'offices_' +'offene_' +'obstacles_' +'näher_' +'nz' +'ntr' +'nto' +'nt' +'notwendige_' +'noted_' +'nochmals_' +'nc' +'nahm_' +'nachhaltig_' +'my' +'mut' +'mother_' +'mixed_' +'mittel' +'mic' +'messen_' +'marks_' +'mand' +'manager_' +'machines_' +'ländlichen_' +'ländern_' +'ls' +'listen_' +'lin_' +'ließen_' +'li_' +'lei' +'lea' +'lawyers_' +'langfristig_' +'lam' +'kurze_' +'kov' +'kor' +'knew_' +'kinds_' +'kel' +'jungen_' +'jpg_' +'joined_' +'items_' +'interessiert_' +'intention_' +'input_' +'innovations_' +'indicated_' +'inde' +'inc' +'in' +'ili' +'igt_' +'ified_' +'ideological_' +'ich' +'ible_' +'höchste_' +'hren_' +'hole_' +'hm' +'hilft_' +'hier' +'hervor_' +'heraus_' +'heil' +'heavily_' +'ham_' +'gäbe_' +'gro' +'gets_' +'gesellschaft_' +'ges' +'gera' +'ger' +'genauso_' +'früheren_' +'frag' +'ford_' +'flights_' +'fla' +'finde_' +'ffer' +'fertig_' +'failures_' +'expand_' +'ese' +'escape_' +'ern' +'erheblich_' +'erb' +'epi' +'entwickelte_' +'ents_' +'ente_' +'enhanced_' +'engagement_' +'engage_' +'encouraging_' +'emi' +'emerged_' +'ema' +'eite_' +'einzu' +'einzigartigen_' +'einfachen_' +'eigener_' +'ef' +'echten_' +'eat_' +'drawn_' +'down' +'disc' +'died_' +'democracies_' +'dee' +'court_' +'corner_' +'convert_' +'contributions_' +'consequence_' +'conditioning_' +'components_' +'collection_' +'cold_' +'coalition_' +'ci_' +'ci' +'careful_' +'cal' +'cable_' +'bri' +'brand_' +'betreiben_' +'besitzen_' +'berg' +'beinahe_' +'behaupten_' +'behandeln_' +'begrenzt_' +'bathrooms_' +'attractions_' +'ator_' +'ates_' +'applying_' +'angesprochen_' +'angen_' +'amenities_' +'alen_' +'ah' +'affairs_' +'advertising_' +'adequate_' +'abroad_' +'Zugriff_' +'Zug_' +'Zu' +'Yes_' +'Währungsfonds_' +'Wor' +'Wirtschaftspolitik_' +'Who_' +'Where_' +'Weltkrieg_' +'Wall_' +'Videos_' +'Verh' +'Ve' +'Valencia_' +'Ursachen_' +'UN' +'Truppen_' +'Todes' +'Tiere_' +'Though_' +'Tests_' +'Terroristen_' +'Telefon' +'Tab' +'TER' +'System' +'Stimm' +'Stein' +'Stan' +'Sony_' +'Sho' +'Set_' +'Scho' +'Ruhe_' +'Rest_' +'Reise' +'Regulierung_' +'Regelungen_' +'Ratspräsidentschaft_' +'Rats' +'RD' +'Prüfung_' +'Produkt_' +'Polizei_' +'Polish_' +'Place_' +'Pla' +'Peter_' +'Palestinians_' +'Pala' +'Or_' +'Ol' +'OS' +'Nas' +'My' +'Moscow_' +'Moo' +'Moment_' +'Mittelmeer_' +'Meeres' +'Medien' +'Mass' +'Marktes_' +'Mad' +'MS_' +'MP' +'Licht_' +'Kurz_' +'Kr' +'Konsum' +'Konsequenzen_' +'Konjunktur' +'Komponenten_' +'Kilometer_' +'Ker' +'Ken' +'Keine_' +'Katastrophe_' +'Kap' +'Joseph_' +'Institution_' +'Insbesondere_' +'Inn_' +'Immobilien' +'Ihres_' +'IA' +'Ho' +'Having_' +'Haushalte_' +'Hauptbahnhof_' +'Gut' +'Golden_' +'Glück_' +'Gleichgewicht_' +'Gipfel_' +'Gew' +'Gemeinden_' +'Gegenstand_' +'GNU_' +'Fälle_' +'Fu' +'Freunde_' +'Freuen_' +'Fla' +'Fisch' +'Firma_' +'Fer' +'Fenster_' +'FA' +'Einheit_' +'Einhaltung_' +'ES_' +'Dutch_' +'Deutschlands_' +'Details_' +'Definition_' +'Day_' +'Dam' +'Cu' +'Common_' +'Comm' +'College_' +'Cam' +'CE_' +'CE' +'CD' +'Burg' +'Bulgaria_' +'Boden_' +'Bla' +'Bis' +'Betriebs' +'Besides_' +'Beschluss_' +'Berücksichtigung_' +'Berufs' +'Bedarf_' +'Bars_' +'Banken' +'Ba' +'Auge_' +'Auftrag_' +'Aufst' +'Aufnahme_' +'Are_' +'Arbeiten_' +'Ar' +'Ant' +'Anpassung_' +'Angela_' +'Angaben_' +'Anbetracht_' +'Amtszeit_' +'Ambiente_' +'All' +'AS' +'A3' +':' +'85_' +'60' +'34_' +'26' +'1993_' +'1992_' +'02' +'-' +') – _' +'({{_' +'  _' +'’._' +'у_' +'ж' +'во' +'ün' +'übertr' +'überaus_' +'ña_' +'ätze_' +'änder_' +'älteren_' +'Übergang_' +'Ärzte_' +'´_' +' – _' +'zuständig_' +'zin' +'ziemlich_' +'zieht_' +'zer_' +'zar' +'ystems_' +'wissenschaftlichen_' +'whereas_' +'western_' +'wel' +'wei' +'wachsen_' +'vorstellen_' +'vorlegen_' +'voller_' +'ving_' +'vieles_' +'vi' +'vern' +'verfügbar_' +'verbindet_' +'urge_' +'unterzeichnet_' +'unt' +'une' +'undoubtedly_' +'uncertainty_' +'unab' +'umzu' +'ult' +'uh' +'ug_' +'uf_' +'uen_' +'tätig_' +'twice_' +'tri' +'trends_' +'tours_' +'tough_' +'tle' +'tische_' +'tin' +'thy_' +'terrorists_' +'telah_' +'tea_' +'tackle_' +'syn' +'swe' +'surface_' +'subsequent_' +'su_' +'storage_' +'stimmt_' +'stic_' +'stein_' +'spaces_' +'soweit_' +'solid_' +'sm' +'sion' +'sh' +'se' +'schweren_' +'sche' +'san' +'rück' +'rte_' +'route_' +'robust_' +'rm' +'rkt_' +'rip' +'ringen_' +'rig' +'revenue_' +'reu' +'respected_' +'rent_' +'renewable_' +'rem' +'regeln_' +'reduzieren_' +'raw_' +'ratio_' +'rap' +'ral_' +'qualified_' +'puts_' +'pun' +'pt_' +'providers_' +'promises_' +'profile_' +'produktion_' +'pred' +'potentially_' +'possibilities_' +'pose_' +'pointed_' +'piel_' +'pic' +'ph' +'periods_' +'percentage_' +'percent_' +'passengers_' +'passenger_' +'para' +'oz' +'ow_' +'our' +'organised_' +'ordered_' +'ona' +'occurred_' +'obligation_' +'object_' +'nötigen_' +'nst' +'nsch' +'novel_' +'none_' +'nk' +'nine_' +'nic' +'ney_' +'newspaper_' +'neu' +'nder' +'nces_' +'ms' +'mountains_' +'mn' +'miles_' +'mes' +'mere_' +'menschliche_' +'medicines_' +'meals_' +'marquis_' +'lug' +'los' +'logical_' +'locations_' +'lobby_' +'ller_' +'lit' +'leaves_' +'layer_' +'lac' +'ky_' +'krieg_' +'klein_' +'kit' +'ize_' +'ists_' +'iron_' +'integriert_' +'informieren_' +'ine' +'incentive_' +'ij' +'ignored_' +'ifi' +'if' +'ics_' +'hs_' +'hot' +'hoped_' +'herrscht_' +'heim' +'happens_' +'handle_' +'haften_' +'grown_' +'griechische_' +'gri' +'grateful_' +'grad' +'gg' +'geöffnet_' +'gestaltet_' +'gering_' +'genannte_' +'gegen' +'gedr' +'gardens_' +'gang_' +'fying_' +'fu' +'fri' +'fourth_' +'foundation_' +'finanzieren_' +'financed_' +'feeling_' +'facto_' +'exciting_' +'everywhere_' +'eur_' +'eta' +'esta' +'erte' +'erscheint_' +'ero' +'erm' +'erlauben_' +'erk' +'erfüllt_' +'erfordern_' +'entw' +'enr' +'engine_' +'engaged_' +'enb' +'ely_' +'ell' +'eli' +'ein' +'eht_' +'eh' +'educational_' +'eco' +'eau_' +'eastern_' +'earth_' +'earned_' +'ea' +'don' +'distinguish_' +'difficulty_' +'dient_' +'di' +'describes_' +'deflation_' +'dedicated_' +'de' +'dates_' +'creative_' +'cre' +'constantly_' +'consistent_' +'clubs_' +'cleaning_' +'cla' +'ción_' +'citizen_' +'chal' +'centres_' +'causing_' +'castle_' +'cas_' +'bs' +'breath' +'boot_' +'boot' +'blind_' +'bild_' +'bilateral_' +'betroffenen_' +'betonen_' +'beschäftigen_' +'beschränken_' +'besch' +'berühmten_' +'beraten_' +'behörde_' +'begins_' +'beendet_' +'bedeutenden_' +'bea' +'bau_' +'backed_' +'awareness_' +'ausschuss_' +'art' +'ari_' +'architecture_' +'anzunehmen_' +'ani' +'angemessene_' +'angegebenen_' +'anc' +'anb' +'amongst_' +'alp' +'aks' +'air' +'affordable_' +'affect_' +'admit_' +'ade_' +'adalah_' +'accepting_' +']]' +'Zwischen_' +'Zweck_' +'Zwar_' +'Zusammenhalt_' +'Zivil' +'Zinssätze_' +'Zimmern_' +'Zei' +'Year_' +'Wirtschafts' +'William_' +'Wesentlichen_' +'Werk_' +'Weiß' +'Vol' +'Vision_' +'Vice_' +'Verd' +'Vel' +'Va' +'VI' +'Trotzdem_' +'Titel_' +'Thanks_' +'Terror' +'Terrasse_' +'Technology_' +'TO' +'Swiss_' +'Support_' +'Suite_' +'Such' +'Steuer_' +'Statt_' +'Staff_' +'Souveränität_' +'Sogar_' +'Signal_' +'Sierra_' +'Serie_' +'Sektoren_' +'Schwellenländer_' +'Schiff_' +'Sc' +'Sar' +'Resolution_' +'Referendum_' +'Ram' +'Rad' +'Profi' +'Produkten_' +'Poker_' +'Past' +'Pap' +'Pakete_' +'PRO' +'PE' +'Organe_' +'Oder_' +'Ob_' +'Nicht' +'News_' +'Mädchen_' +'Mubarak_' +'Mou' +'Module_' +'Mitgliedsstaaten_' +'Met' +'Menü_' +'Menschheit_' +'Maria_' +'Mac_' +'Life_' +'Lern' +'Küsten' +'Kur' +'Kra' +'Koordinierung_' +'Konvents_' +'Konflikte_' +'Kommunismus_' +'Komm' +'Kluft_' +'Kla' +'Kapital_' +'Kampagne_' +'Ji' +'Jerusalem_' +'Jede_' +'Is' +'IM' +'Haus' +'Hand' +'Haftung_' +'Großteil_' +'Gri' +'Grenz' +'Gewinne_' +'Geschäfte_' +'Georgia_' +'Garden_' +'GA' +'Fuß_' +'Form' +'Fischerei' +'Film' +'Ferner_' +'Federation_' +'Fair' +'Entscheidungsträger_' +'Entscheidungs' +'Ents' +'Effizienz_' +'EN' +'Dynamik_' +'Distribution_' +'Din' +'Digital_' +'Desk_' +'Delegation_' +'Del' +'Darum_' +'Cro' +'Colo' +'Climate_' +'Chairman_' +'Cat' +'Card_' +'Captain_' +'Call' +'CF' +'CC' +'Bug' +'Blue_' +'Blair_' +'Black_' +'Bewältigung_' +'Bevölkerungs' +'Beschlüsse_' +'Berg' +'Bereitschaft_' +'Bemerkungen_' +'Bemerkung_' +'Beiträge_' +'Beh' +'Baltic_' +'Balkans_' +'Außenpolitik_' +'Authority_' +'Arabia_' +'Any_' +'Anst' +'Anleger_' +'Anlagen_' +'Agrar' +'Agentur_' +'AI' +'; ' +'98_' +'97_' +'79' +'63_' +'59_' +'55_' +'41_' +'38_' +'237' +'2020_' +'182' +'.  _' +'...._' +'''_' +'''' +' !_' +'‘_' +'ра' +'но' +'ме' +'ма' +'ла' +'й' +'übers' +'ú' +'ände_' +'zugunsten_' +'zierung_' +'zieren_' +'zeug' +'yt' +'ye' +'yar' +'wunder' +'wu' +'woman_' +'wirksame_' +'wir' +'wife_' +'wiederholen_' +'weitgehend_' +'waters_' +'vorliegenden_' +'vorbei_' +'volume_' +'vollständige_' +'vollen_' +'violent_' +'vin' +'verursacht_' +'versetzt_' +'verpflichten_' +'verk' +'veri' +'verabschiedet_' +'variable_' +'unterstütze_' +'untergraben_' +'ungsp' +'underway_' +'understood_' +'unbea' +'ums' +'umgeben_' +'ular_' +'uk_' +'tte' +'trip_' +'tions' +'tief_' +'terror_' +'tennis_' +'tea' +'tat' +'stärkere_' +'stä' +'stylish_' +'stu' +'stellung_' +'speichern_' +'sou' +'solches_' +'smooth_' +'sleep_' +'showed_' +'shape_' +'securities_' +'secular_' +'schöne_' +'schu' +'schrieb_' +'schnelle_' +'schie' +'sake_' +'sah_' +'ruling_' +'ruhig_' +'rl' +'rich' +'ric' +'rg' +'resulted_' +'restructuring_' +'rest' +'ress_' +'respects_' +'respective_' +'resistance_' +'requirement_' +'renowned_' +'regionaler_' +'regarded_' +'reflected_' +'reduziert_' +'red' +'recorded_' +'reconstruction_' +'recommendations_' +'realen_' +'rc' +'raph' +'rant' +'ranging_' +'rain' +'rah' +'purchases_' +'ps' +'präsentiert_' +'proportion_' +'profound_' +'produktiv' +'produces_' +'previously_' +'preis_' +'practically_' +'positiv_' +'pol' +'plenary_' +'ple' +'plays_' +'piel' +'persistent_' +'peri' +'pea' +'pays_' +'password_' +'passieren_' +'partnerships_' +'palästinensischen_' +'ow' +'overlooking_' +'ous' +'ote' +'orts_' +'organized_' +'organisiert_' +'organisationen_' +'organis' +'ordnung_' +'opponents_' +'ond' +'odu' +'observers_' +'oa' +'nz_' +'ny_' +'nse' +'notice_' +'notably_' +'nnen_' +'nn' +'nken_' +'niveau_' +'niedrigen_' +'new' +'ner' +'neighbouring_' +'nch' +'nb' +'nab' +'mpo' +'mount_' +'modest_' +'mod' +'mil' +'mi' +'mend' +'marked_' +'mani' +'male_' +'ly' +'lounge_' +'lots_' +'logic_' +'lle' +'lität_' +'lin' +'liefern_' +'leg' +'langfristigen_' +'landscape_' +'lands_' +'konkreten_' +'komp' +'kin_' +'keineswegs_' +'ka' +'jährigen_' +'justified_' +'ji_' +'japanischen_' +'isn_' +'ions' +'involvement_' +'introducing_' +'installiert_' +'ino_' +'informal_' +'ines_' +'illi' +'id' +'ichts' +'iche_' +'ica' +'hör' +'hr_' +'home' +'hl_' +'hl' +'hilfreich_' +'herzlich_' +'hed_' +'harm_' +'happening_' +'größerer_' +'großem_' +'groß' +'graphics_' +'gh' +'gestiegen_' +'gestern_' +'geschieht_' +'geringere' +'gemein' +'gele' +'geeignet_' +'gains_' +'furnished_' +'freuen_' +'frequently_' +'fort_' +'folgende_' +'floors_' +'finanzierung_' +'fill_' +'fewer_' +'feld_' +'fate_' +'fashion_' +'fare_' +'faith_' +'failing_' +'fach' +'explore_' +'existieren_' +'exi' +'evaluation_' +'euros_' +'esti' +'erstmals_' +'erst' +'ers' +'error_' +'erhielt_' +'ergriffen_' +'entsprechen_' +'enterprise_' +'enf' +'enc' +'ements_' +'einzig_' +'einzelne_' +'einheitlichen_' +'edge_' +'economically_' +'ebene_' +'eas' +'durchführen_' +'druck_' +'drivers_' +'dor_' +'dnung_' +'diverse_' +'dite' +'diskutieren_' +'disco' +'discipline_' +'directed_' +'dig' +'dien' +'destination_' +'designs_' +'demonstrate_' +'demokratischer_' +'define_' +'decisive_' +'deals_' +'dead_' +'dea' +'dam' +'cti' +'ct' +'creates_' +'cosy_' +'cop' +'contributing_' +'constitute_' +'conse' +'commodity_' +'com' +'colours_' +'collaboration_' +'clo' +'cin' +'chtlich_' +'certainty_' +'cameras_' +'bs_' +'broken_' +'brief_' +'blu' +'beteiligt_' +'bet' +'besorgt_' +'bers' +'bericht_' +'bemühen_' +'bell' +'bekommt_' +'beinhaltet_' +'behaviour_' +'beha' +'begun_' +'begr' +'begegnen_' +'bedrooms_' +'bed' +'baru_' +'ay' +'avoided_' +'ausgezeichnete_' +'ausger' +'ausgehen_' +'ausgegeben_' +'aufmerksam_' +'assi' +'aspect_' +'aside_' +'asiatischen_' +'arrested_' +'array_' +'aro' +'arkt' +'are' +'arc' +'ara' +'apparent_' +'ap_' +'ant' +'ank' +'angel' +'amo' +'ament' +'ambi' +'altung_' +'ail' +'agi' +'aggressive_' +'adapt_' +'abkommen_' +'Zuständigkeit_' +'Zusammen' +'Zivilgesellschaft_' +'Zins' +'Zeitung_' +'Wikitravel_' +'Whi' +'Whether_' +'Welche_' +'Wald' +'Wachstums_' +'Vorsch' +'Vorbe' +'View_' +'Vier' +'Verständnis_' +'Vater_' +'Van_' +'Update_' +'Untersuchungen_' +'Universitäten_' +'Ums' +'Transport' +'Tat' +'TH' +'Switzerland_' +'Super' +'Stunde_' +'Studio_' +'Strukturreformen_' +'Strukturfonds_' +'Struktur_' +'Strom' +'Strategy_' +'Strasbourg_' +'Steigerung_' +'Spezialitäten_' +'Sowjetunion_' +'Similarly_' +'Siege' +'Show_' +'Set' +'Sep' +'Selbstverständlich_' +'Sec' +'Sea' +'Schweden_' +'Schlussfolgerungen_' +'Schicksal_' +'Schi' +'Schatten_' +'Sau' +'SO' +'Rückkehr_' +'Ruh' +'Romania_' +'Rob' +'Road_' +'River_' +'Richard_' +'Renten' +'Ren' +'Religion_' +'Regierungskonferenz_' +'Regi' +'Red' +'Raum' +'Projekt' +'Premier_' +'Point_' +'PSE_' +'PL_' +'Nu' +'Nonetheless_' +'Nie' +'Nero_' +'Nazi_' +'National' +'Nachrichten_' +'Nachricht_' +'Monopol' +'Mol' +'Mitgliedstaat_' +'Minute_' +'Mikro' +'Methoden_' +'Mein_' +'Maßnahme_' +'Massen' +'ME' +'MB_' +'League_' +'Laut_' +'LabVIEW_' +'Küste_' +'Künstler_' +'König_' +'Kä' +'Kulturen_' +'Krisen_' +'Kit' +'Kirche_' +'Kategorie_' +'Kas' +'Karten_' +'Kampf' +'Jones_' +'Je_' +'Ins' +'Innenstadt_' +'Indiens_' +'Index_' +'IR' +'IL' +'IG' +'IF' +'Has' +'Har' +'Hai' +'HTML_' +'Grundsatz_' +'Glauben_' +'Gerechtigkeit_' +'Gegens' +'Gas_' +'GS' +'GI' +'Fun' +'Früh' +'Format_' +'Fo' +'Flugzeuge_' +'Finnish_' +'Festlegung_' +'Fat' +'Errichtung_' +'Erm' +'Entw' +'Einbeziehung_' +'Ehe' +'Effekt_' +'Education_' +'ET' +'EP' +'Dritte_' +'Donald_' +'Diskussionen_' +'Diskriminierung_' +'Disk' +'Dis' +'Demokratien_' +'Clo' +'Chávez_' +'Cas' +'Cal' +'Cab' +'Budgets_' +'Budapest_' +'Box_' +'Block' +'Bill_' +'Betr' +'Besucher_' +'Besch' +'Belgium_' +'Beijing_' +'Be' +'Bahnhof_' +'B5_' +'Außen' +'Ausnahme' +'Arbeitgeber_' +'Anlass_' +'Anders_' +'An' +'Alm' +'Alles_' +'Alexander_' +'Abe_' +'Abe' +'AB' +'78_' +'7' +'28' +'2030_' +'1990er_' +'189' +'183' +'… _' +'• _' +'я_' +'ы_' +'ч' +'те' +'п_' +'не' +'ž' +'üsse_' +'üge_' +'üche_' +'überwinden_' +'überprüft_' +'ön' +'ó_' +'ét' +'ég' +'Ökonomien_' +'· _' +' ' +'{{_' +'zählt_' +'zurückzu' +'zunehmende_' +'wünsche_' +'wre' +'wohnen_' +'wohn' +'wohl' +'widerspr' +'wider' +'werte_' +'wer' +'wenden_' +'welcoming_' +'weiterge' +'web' +'water' +'wart' +'ware_' +'vorgenommen_' +'voraus_' +'virus_' +'verspricht_' +'verkaufen_' +'vent' +'veni' +'vel' +'valid_' +'ution' +'uss' +'usa' +'upgrade_' +'unlike_' +'university_' +'universities_' +'ungsm' +'umfangreiche_' +'ultra' +'ud' +'uch_' +'twin_' +'twe' +'tude_' +'tru' +'treat_' +'tobacco_' +'threshold_' +'tha' +'ters_' +'teilnehmen_' +'tech_' +'teaching_' +'targeted_' +'tap' +'tabl' +'supporters_' +'supplies_' +'sul' +'suites_' +'suggested_' +'successes_' +'studio_' +'string_' +'stories_' +'stor' +'stops_' +'sser' +'spüren_' +'spezifische_' +'spezielle_' +'solange_' +'sofern_' +'sma' +'sichtig' +'showing_' +'shortage_' +'shock_' +'sheet_' +'sharp_' +'sexual_' +'setzte_' +'serving_' +'senken_' +'selten_' +'seeks_' +'schön_' +'schnellen_' +'sb' +'sala' +'rä' +'ruhigen_' +'rob' +'rib' +'resolutions_' +'requested_' +'representation_' +'reporting_' +'replace_' +'repeatedly_' +'repeated_' +'repeat_' +'remarks_' +'reli' +'rein' +'regulated_' +'regret_' +'registration_' +'refused_' +'recommended_' +'rechtlichen_' +'recall_' +'rd' +'rb' +'rational_' +'ration_' +'rapporteurs_' +'ran' +'quo_' +'q_' +'q' +'proposes_' +'produziert_' +'privileged_' +'privacy_' +'premi' +'prefer_' +'ppe' +'populations_' +'pon' +'plu' +'phon' +'peak_' +'patterns_' +'parks_' +'oth' +'ose_' +'oro' +'orders_' +'opt_' +'ong_' +'oli' +'olen_' +'oh' +'offiziell_' +'ock' +'occasion_' +'nunmehr_' +'notion_' +'noti' +'not' +'nnte' +'nds_' +'natürlichen_' +'natur' +'nan_' +'namen_' +'mü' +'multiple_' +'multilateral_' +'mpa' +'movie_' +'moralische_' +'mon_' +'moments_' +'mo_' +'minimal_' +'mine_' +'meters_' +'merk' +'medi' +'meal_' +'mbi' +'mber' +'mas_' +'maritime_' +'mann_' +'man' +'magnificent_' +'lte_' +'losigkeit_' +'literature_' +'lis' +'lebt_' +'lebens' +'lat' +'kriege' +'kop' +'konfrontiert_' +'klicken_' +'killing_' +'key' +'kernel_' +'kern_' +'kation_' +'junge_' +'joining_' +'jahr_' +'itte' +'italienischen_' +'istischen_' +'islamischen_' +'is' +'ious_' +'ior' +'involving_' +'invested_' +'inva' +'intentions_' +'intelligence_' +'insufficient_' +'ino' +'inis' +'inf' +'indischen_' +'indi' +'impr' +'implies_' +'ils_' +'igkeiten_' +'ight_' +'idi' +'ica_' +'hängt_' +'humans_' +'hoping_' +'homes_' +'holds_' +'hit' +'hinter' +'hingewiesen_' +'hero' +'hergestellt_' +'helps_' +'hat' +'handel_' +'halbe' +'größeres_' +'grant_' +'grand' +'gor' +'gle' +'glass_' +'gewähren_' +'gesetz' +'gesellschaften_' +'gese' +'geplant_' +'geltenden_' +'gefährdet_' +'gefährden_' +'gebunden_' +'gal' +'fühlen_' +'fy_' +'fte_' +'fs_' +'forthcoming_' +'foot' +'follows_' +'fleet_' +'fitness_' +'fication_' +'fall' +'factor_' +'extraordinary_' +'extra' +'exclusion_' +'eva' +'etz' +'ete_' +'estr' +'esse' +'esi' +'ese_' +'erweisen_' +'erlangen_' +'erheblichen_' +'erhebliche_' +'equity_' +'entscheidend_' +'entgegen_' +'endo' +'emerge_' +'elt_' +'ela' +'eist' +'eintr' +'einfacher_' +'eilung_' +'eil' +'ehe' +'effektiv_' +'eer' +'eda' +'ea_' +'dus' +'dt_' +'drawing_' +'dos_' +'dominant_' +'dl' +'dit' +'disk_' +'direkte_' +'devoted_' +'det' +'derer_' +'dependent_' +'departure_' +'dep' +'den' +'delicious_' +'dealt_' +'cur' +'ctive_' +'corresponding_' +'copyright_' +'converted_' +'contents_' +'conta' +'constant_' +'considerably_' +'conservative_' +'concentrate_' +'con' +'composed_' +'component_' +'comply_' +'comp' +'colour' +'class' +'claimed_' +'cker_' +'cker' +'cht' +'chemical_' +'chel' +'charm' +'caught_' +'capabilities_' +'can' +'calendar_' +'bä' +'bun' +'buchen_' +'bubble_' +'bridge_' +'breiten_' +'breite_' +'boo' +'boa' +'bo_' +'blieb_' +'birth_' +'bild' +'betreffenden_' +'bestehende_' +'beseitigen_' +'beschließen_' +'bekannten_' +'bek' +'beige' +'befassen_' +'beauty_' +'beantworten_' +'bat' +'basieren_' +'badly_' +'autumn_' +'autonomy_' +'auswählen_' +'auswirken_' +'ausländische_' +'ause' +'aufrechtzuerhalten_' +'atz_' +'atz' +'attempted_' +'ath' +'articles_' +'arten_' +'arrived_' +'armen_' +'arise_' +'arian_' +'arge' +'anzuzeigen_' +'anywhere_' +'anf' +'ami' +'amerikanischer_' +'alo' +'alcohol_' +'ak_' +'agr' +'adopting_' +'acting_' +'aci' +'abwe' +']' +'Zusammens' +'Zo' +'Yo' +'Währungen_' +'Would_' +'Work_' +'Wladimir_' +'Winter_' +'Wild' +'Wikicars_' +'Wi_' +'Wel' +'Vorstellung_' +'Vorausschau_' +'Vielmehr_' +'Vielen_' +'Video' +'Verteidigungs' +'Versuche_' +'Vermi' +'Verm' +'Verlauf_' +'Verfolgung_' +'Verein' +'Valley_' +'VE_' +'Use_' +'Uns_' +'Umgang_' +'Treaties_' +'Tochter_' +'Tip_' +'Tim' +'Tier' +'Ticket_' +'Texas_' +'Syn' +'Sudan_' +'Stä' +'Stattdessen_' +'Start' +'Spannungen_' +'Sor' +'Sonnen' +'Sid' +'Shanghai_' +'Several_' +'Serbien_' +'Sei' +'Schweiz_' +'Schuld' +'Sauna_' +'SD' +'Roman' +'Republicans_' +'Rec' +'Punkten_' +'Presse' +'Preisen_' +'Pop' +'Politikern_' +'Pläne_' +'Planung_' +'PP' +'PI' +'Otherwise_' +'Opera_' +'Once_' +'Ober' +'Noch_' +'Niemand_' +'Natur' +'Nat' +'Mur' +'Mos' +'Morgen_' +'Mobile_' +'Migranten_' +'Messe' +'Mensch_' +'Mechanismen_' +'Marketing_' +'Mark_' +'Mario_' +'Mari' +'Manhattan_' +'Mangel_' +'Mala' +'Luc' +'Links_' +'Libya_' +'Lea' +'LateRooms_' +'Ladies_' +'Lab' +'LL' +'LE_' +'Können_' +'Kurz' +'Kurs_' +'Kunden' +'Kuba_' +'Konf' +'Klasse_' +'Keynes_' +'Key' +'Kauf_' +'Kat' +'Kal' +'KO' +'Journalisten_' +'Ja_' +'Interventionen_' +'Internationale_' +'Insgesamt_' +'Ing' +'Infrastruktur' +'Info' +'IS_' +'ISO_' +'Hu' +'Helsinki_' +'Hauses_' +'Handelss' +'Ham' +'Haft' +'Guests_' +'Großen_' +'Grab' +'Ges' +'Gentoo_' +'Generationen_' +'Gelder_' +'Gef' +'Garantie_' +'Ga' +'GB_' +'Führungs' +'Funds_' +'Frank_' +'Formular_' +'Florida_' +'Fischerei_' +'Fi_' +'Feld_' +'Farb' +'Europa' +'Empire_' +'Empfehlungen_' +'Emissions' +'Em' +'Elemente_' +'Element_' +'Einst' +'Eg' +'Dorf_' +'Disc' +'Deutschen_' +'Des_' +'Demo' +'Declaration_' +'Darin_' +'DER_' +'Cra' +'Constitutional_' +'Chu' +'CON' +'Bud' +'Branchen_' +'Bl' +'Bestandteil_' +'Bes' +'Beitritts' +'Beispielsweise_' +'Band' +'Ave' +'Ava' +'Australia_' +'Ausführungen_' +'Ausführung_' +'Arc' +'Arbeitslosen' +'Anschluss_' +'Ann' +'Anlage_' +'Anlage' +'Andernfalls_' +'Alli' +'Alle' +'Aktion_' +'Airlines_' +'Afrikas_' +'Abhängigkeit_' +'ASEAN_' +'AL' +'72_' +'600_' +'6' +'50' +'22' +'200' +'1994_' +'1971_' +'1967_' +'187' +'184' +'120_' +'1000_' +'04_' +'01' +'. „_' +''' (_' +'$_' +'’, _' +'қ' +'да' +'ł' +'ütte' +'üh' +'übersch' +'überdenken_' +'üb' +'öffentlicher_' +'äten_' +'änge_' +'ähnliche_' +'ão_' +'» _' +'  ' +'}} **{{_' +'zweit' +'zwei' +'zunehmenden_' +'zun' +'zugänglich_' +'zuges' +'zione_' +'zentren_' +'youth_' +'yma' +'yi' +'yard_' +'wonach_' +'wis' +'wirkt_' +'wirkliche_' +'whilst_' +'whi' +'wesen_' +'we' +'wan' +'wake_' +'voran' +'voluntary_' +'vollkommen_' +'vier' +'vielmehr_' +'verteilt_' +'verringert_' +'vermutlich_' +'vermitteln_' +'ury_' +'urgently_' +'urf_' +'unte' +'unsch' +'uno' +'united_' +'ungss' +'ungs_' +'undermine_' +'unange' +'umwelt' +'umso_' +'ultimate_' +'uct' +'uchen_' +'uche_' +'tzt_' +'typically_' +'ture_' +'tritt_' +'trial_' +'trag' +'tik_' +'tier' +'ticket_' +'territories_' +'tern' +'tely_' +'teilt_' +'tall' +'tal_' +'tag' +'süd' +'sé' +'sätze_' +'surprised_' +'surely_' +'sure' +'stützen_' +'ständigen_' +'struck_' +'strike_' +'stri' +'strategie_' +'sto' +'stie' +'stell' +'steel_' +'starts_' +'stammen_' +'stal' +'staatlicher_' +'staaten_' +'sst_' +'spoken_' +'specified_' +'sp' +'sozialer_' +'sons_' +'some' +'soldiers_' +'sna' +'sn' +'ska' +'shop_' +'sho' +'shadow_' +'senden_' +'scu' +'sci' +'schr' +'schla' +'schemes_' +'schaden_' +'sauber_' +'san_' +'rweise_' +'rop' +'rooted_' +'ron' +'rle' +'ris_' +'rightly_' +'rie_' +'ria' +'revealed_' +'reta' +'ret' +'reputation_' +'repair_' +'renoviert_' +'ren' +'reg' +'rede' +'recover_' +'recommendation_' +'rechten_' +'reas' +'reactions_' +'rea' +'rating_' +'radi' +'qui_' +'qui' +'question' +'quest_' +'quel' +'qu' +'punkte_' +'psycho' +'prose' +'prop' +'promoted_' +'proceedings_' +'pris' +'presents_' +'posed_' +'poorest_' +'poly' +'play' +'pho' +'pf' +'perubahan_' +'pert' +'permanently_' +'path' +'participants_' +'pace_' +'ously_' +'os' +'orte' +'organ' +'optional_' +'optimal_' +'oppose_' +'opi' +'oo' +'ona_' +'off' +'ode' +'occupation_' +'obtained_' +'objects_' +'ntu' +'nti' +'nswerte' +'normalen_' +'niedriger_' +'nger' +'neun_' +'netz_' +'nent_' +'ndes_' +'narrow_' +'nachhaltigen_' +'nachge' +'müsse_' +'mächtig' +'mä' +'myth' +'modell_' +'mitten_' +'mittels_' +'mistake_' +'missing_' +'minorities_' +'mid' +'messages_' +'mess' +'mes_' +'menye' +'menu' +'menjadi_' +'mela' +'mein' +'mea' +'manufacturers_' +'manchen_' +'makers_' +'lung_' +'lunch_' +'loved_' +'logy_' +'lm' +'lling_' +'lla_' +'lk' +'lists_' +'linguistic_' +'ling' +'light' +'ließ_' +'lieb' +'lev' +'lets_' +'lem' +'leistung_' +'lee' +'lde' +'lau' +'lasting_' +'last' +'lake_' +'kümmern_' +'kämpfen_' +'kä' +'kurzfristig_' +'kurzer_' +'ksi' +'kosten' +'kontrolliert_' +'kon' +'komme_' +'km' +'klär' +'kita_' +'ket' +'ke' +'kan' +'justify_' +'jemals_' +'jederzeit_' +'iter' +'irre' +'ique_' +'interessen_' +'intend_' +'instability_' +'ink' +'indicate_' +'inder' +'inadequate_' +'impression_' +'impa' +'imagin' +'ika' +'ignore_' +'ign' +'iges_' +'iesen_' +'ide' +'ichten_' +'icher' +'ib' +'holidays_' +'hohem_' +'hinder' +'hielt_' +'heritage_' +'herauszu' +'hence_' +'hau' +'handling_' +'haft_' +'gy_' +'gy' +'gui' +'granting_' +'gn' +'gh_' +'gewiss_' +'getragen_' +'geschrieben_' +'geringe_' +'genetic_' +'gelöst_' +'gefa' +'geboren_' +'gallery_' +'gaben_' +'förder' +'functioning_' +'ften_' +'friend_' +'freundliche_' +'franc' +'fossil_' +'fore' +'fon' +'focusing_' +'flying_' +'fly_' +'flug' +'flu' +'filled_' +'fic' +'favor_' +'farm_' +'familiar_' +'exploitation_' +'expert_' +'exceptional_' +'ewa' +'evi' +'eure_' +'etzen_' +'essi' +'erlassen_' +'erkennt_' +'erkannt_' +'eric' +'eo' +'entsteht_' +'entf' +'enge' +'enforcement_' +'ends_' +'empfangen_' +'els_' +'eit' +'eins_' +'einh' +'eingegangen_' +'eingebracht_' +'eignet_' +'eigenes_' +'egal_' +'een_' +'educated_' +'ede' +'economist_' +'echen_' +'duties_' +'dul' +'dte' +'dritte_' +'doctors_' +'doctor_' +'displayed_' +'disasters_' +'ding' +'dialog_' +'des' +'derzeitige_' +'dens' +'demo' +'demi' +'delighted_' +'defeat_' +'decorated_' +'debian_' +'deadline_' +'dd' +'davor_' +'dat' +'dar' +'dahin_' +'cted_' +'creditors_' +'covering_' +'countless_' +'correctly_' +'coordinated_' +'cooperate_' +'compact_' +'codes_' +'cle' +'clause_' +'classes_' +'chef_' +'checking_' +'channel_' +'chances_' +'centr' +'centers_' +'cast' +'cart' +'career_' +'cam' +'buses_' +'bug_' +'bright_' +'bond_' +'bol' +'bit' +'bisa_' +'bis' +'bill_' +'bilateralen_' +'bi_' +'bewirken_' +'bewahren_' +'besar_' +'beruhen_' +'bert' +'bel_' +'behoben_' +'behauptet_' +'beg' +'beantwortet_' +'bauen_' +'basi' +'bahn' +'aver' +'ausländischen_' +'ausgedrückt_' +'aufgeb' +'auff' +'aub' +'ate' +'assen_' +'assa' +'artists_' +'artistic_' +'appropriations_' +'annehmen_' +'angewiesen_' +'anerkannt_' +'anda' +'amp_' +'ambition_' +'alts' +'aktive_' +'ak' +'agen' +'aged_' +'ado_' +'adjustments_' +'actor_' +'achen_' +'acc' +'Zwei' +'Zuge_' +'Zen' +'Zahlungen_' +'Wit' +'Werke_' +'Wal' +'WE' +'Vorstellungen_' +'Vorsitz_' +'Vors' +'Vorlage_' +'Vorgehen_' +'Viertel_' +'Vertretern_' +'Verteidigungspolitik_' +'Verst' +'Using_' +'Unterkunft_' +'Ukrainian_' +'Tunisia_' +'Tru' +'Traum_' +'Transfer' +'Traditionen_' +'Town_' +'Tonnen_' +'Tok' +'Teilnehmer_' +'Tausende_' +'Taliban_' +'TO_' +'Sunday_' +'Straßburg_' +'Stra' +'Stimmung_' +'Steuerzahler_' +'Star' +'Staatsanleihen_' +'Soldaten_' +'Ska' +'Simbabwe_' +'Schätzungen_' +'Schnell' +'Schlüssel_' +'Schlag' +'Schie' +'Satz_' +'SU' +'SG' +'Russians_' +'Rot' +'Roll' +'Rock_' +'Regional' +'Ree' +'Rand_' +'Radio_' +'Qualitäts' +'Professor_' +'Power_' +'Portuguese_' +'Politiken_' +'Playa_' +'Planeten_' +'Pl' +'Pis' +'Philosophie_' +'Pf' +'Personal' +'Performance_' +'Pas' +'Partnership_' +'Partnerschaften_' +'PA' +'Orte_' +'Orient' +'Ok' +'Offenheit_' +'Notes_' +'Northern_' +'Normen_' +'Nizza_' +'Nation_' +'NGOs_' +'NG' +'NCC_' +'Mot' +'Mobil' +'Mittelmeer' +'Mitglieds' +'Missbrauch_' +'Minutes_' +'Mess' +'Menschenhandel_' +'Mei' +'Mau' +'Mark' +'Mallorca_' +'Mach' +'Maastricht_' +'Lö' +'Luft_' +'Ltd_' +'Lounge_' +'Lockerung_' +'Lizenz' +'Lim' +'Liberal' +'Legitimität_' +'Lee_' +'Landwirte_' +'Lake_' +'Lag' +'LO' +'Kritiker_' +'Korean_' +'Klimawandels_' +'Kir' +'Ki' +'Kara' +'Jugend' +'Jahrzehnte_' +'Invasion_' +'Internet' +'Indonesien_' +'Indonesia_' +'Indem_' +'Imp' +'Image_' +'Il_' +'Höchst' +'Hostels_' +'Hor' +'Hit' +'Hin' +'Hilfs' +'Hezbollah_' +'Have_' +'Hall_' +'Hafen_' +'Gua' +'Gil' +'Gewicht_' +'Geschwindigkeit_' +'Germans_' +'Gerichte_' +'Gericht_' +'Gegend_' +'Gard' +'Ganz_' +'Gal' +'GH' +'Friendly_' +'Freude_' +'Freiheiten_' +'Fre' +'Franc' +'Ford_' +'Flüchtlings' +'Fluss_' +'Fischer_' +'Finanzmärkte_' +'Fin' +'Fil' +'Festival_' +'Ferienwohnung_' +'Feld' +'FO' +'Erstellung_' +'Entschließungsantrag_' +'Ele' +'Einsch' +'Einkommens' +'Einer_' +'Eigenschaften_' +'EL_' +'Dun' +'Drogen_' +'Dreh' +'Dokument_' +'Di_' +'Description_' +'Democracy_' +'Dec' +'Dauer_' +'DS9_' +'Cons' +'Conf' +'Com' +'Cit' +'Church_' +'Christ_' +'Chamber_' +'Canon_' +'Camp' +'Cambridge_' +'CS' +'Bürgerinnen_' +'Bürger' +'Börse_' +'Bulgarien_' +'Briten_' +'Blut' +'Blo' +'Ble' +'Betrieb_' +'Beteiligung_' +'Besitz' +'Bereitstellung_' +'Beratung_' +'Begleit' +'Bedürfnisse_' +'Bed' +'Be_' +'Bauern_' +'BS' +'BB' +'Außenminister_' +'Aussch' +'Ausl' +'Assad_' +'Armen_' +'Ari' +'Arch' +'Anzeichen_' +'Anti_' +'Annehmlichkeiten_' +'Anl' +'Anhänger_' +'Angebote_' +'Andere_' +'Ana' +'Amb' +'Ak' +'Air' +'Af' +'Acc' +'Abwe' +'Abr' +'Abd' +'Ab_' +'AV' +'92_' +'68_' +'51_' +'43_' +'40' +'32' +'31' +'19th_' +'02_' +'" ' +' -' +' ,,_' +' ), _' +'ң_' +'та' +'й_' +'и_' +'ды_' +'ε' +'übernimmt_' +'ökonomischen_' +'éa' +'ätig' +'ändische' +'ägen_' +'án_' +'ßt_' +'Überzeugung_' +'Überwachungs' +'Übers' +'Überleben_' +'Übergangs' +'Überblick_' +'zusammenarbeiten_' +'zug_' +'zuf' +'zub' +'zerstört_' +'zerstören_' +'zel' +'zeiten_' +'zahl_' +'younger_' +'wr' +'worrying_' +'win' +'wiederholt_' +'wichtiges_' +'welchen_' +'weiten_' +'watch_' +'wasser' +'warn' +'wahr_' +'wahl_' +'vorauss' +'voices_' +'vit' +'viertel_' +'viable_' +'verwaltung_' +'verr' +'verkehr_' +'verhalten_' +'verboten_' +'vely_' +'var' +'ute_' +'urn' +'untersuchen_' +'unterscheiden_' +'unta' +'unc' +'unan' +'unabhängigen_' +'umge' +'umb' +'uit' +'uar' +'uan' +'türkischen_' +'tsche' +'tsch' +'tro_' +'trib' +'treiben_' +'trees_' +'transportation_' +'transitional_' +'transferred_' +'tranquil' +'tragic_' +'tracks_' +'tra_' +'tour_' +'topics_' +'tone_' +'ton' +'tlich_' +'tion' +'ting' +'till_' +'tig' +'tifi' +'tie' +'tickets_' +'tic' +'tes' +'territorial_' +'terra' +'teils_' +'tehen_' +'taxi_' +'tables_' +'systemen_' +'systematic_' +'sy' +'switch_' +'sustain_' +'surf' +'supplementary_' +'summe' +'suited_' +'sucht_' +'subs' +'strict_' +'stream_' +'strange_' +'stood_' +'sting_' +'stimme_' +'steigenden_' +'stei' +'stars_' +'stammt_' +'stad' +'staat_' +'spät_' +'spiel_' +'spezifischen_' +'socio' +'sli' +'sis' +'significance_' +'sicheren_' +'sicher' +'shocks_' +'shel' +'settlements_' +'seri' +'selben_' +'sehe_' +'seh' +'seconds_' +'schutz_' +'schrittweise_' +'schme' +'schm' +'satz_' +'sation_' +'sand' +'safer_' +'sad' +'sab' +'rv' +'rut' +'rum_' +'row_' +'roots_' +'rme' +'risk' +'rie' +'rian_' +'ri' +'rhetoric_' +'residence_' +'rese' +'reproduction_' +'rene' +'remo' +'remarkable_' +'refers_' +'reductions_' +'recognised_' +'rechtliche_' +'rec' +'realistic_' +'realer_' +'react_' +'re' +'rbe' +'rau' +'rati' +'rail_' +'ragen_' +'racism_' +'quent' +'purchasing_' +'pur' +'prozess_' +'provider_' +'proven_' +'progressive_' +'professor_' +'prisoners_' +'pride_' +'predicted_' +'praktischen_' +'ppi' +'potenzielle_' +'pledge' +'plan' +'pipeline_' +'pioneer' +'pil' +'photograph' +'phones_' +'persönliche_' +'personenbezogene' +'perfekt_' +'pen_' +'painful_' +'pain_' +'overn' +'ov_' +'ost' +'ort' +'originally_' +'organiz' +'ore' +'optimale_' +'ommen_' +'omi' +'om_' +'oll' +'og' +'oftmals_' +'offen' +'odi' +'nü' +'nter' +'notes_' +'northern_' +'nj' +'nischen_' +'nische_' +'niedrig_' +'nieder' +'ngi' +'negativen_' +'nci' +'nche' +'natürliche' +'mögliche_' +'märkten_' +'murder_' +'mou' +'motiv' +'mona' +'mol' +'modify_' +'moderate' +'mode' +'mittleren_' +'minor_' +'minds_' +'min' +'migrants_' +'mi_' +'mete' +'mereka_' +'memper' +'mel' +'medicine_' +'meat_' +'measured_' +'mail' +'magic_' +'lös' +'lum' +'llo' +'lled_' +'lig' +'lift_' +'lieber_' +'lid' +'library_' +'lenken_' +'leidet_' +'leich' +'legte_' +'legally_' +'leak' +'lbe' +'lav' +'lateral' +'langem_' +'lang' +'lai' +'kürze' +'künftigen_' +'könne_' +'kö' +'ktor' +'korrekt_' +'klaren_' +'kill_' +'kesehatan_' +'kas' +'kana' +'jüngste_' +'jedenfalls_' +'iz' +'iv' +'ition_' +'itali' +'isation_' +'ional_' +'involves_' +'invite_' +'investigation_' +'intervene_' +'inten' +'institutionellen_' +'inl' +'incredible_' +'incon' +'immense_' +'illegalen_' +'illegale_' +'illa' +'ility_' +'iel_' +'ideale_' +'hyper' +'hundert_' +'hopes_' +'hood_' +'holders_' +'histori' +'hinzufügen_' +'hinge' +'hierbei_' +'hes' +'hervorragende_' +'heraus' +'hera' +'hee' +'heads_' +'handels' +'halt' +'hal' +'hab' +'günstig_' +'gültige' +'gä' +'gte_' +'größtenteils_' +'grundsätzlich_' +'gramm' +'governing_' +'gos' +'gleichermaßen_' +'gkeit_' +'gi_' +'gesunde' +'gesch' +'genuinely_' +'geni' +'gelingt_' +'gefährliche_' +'gee' +'gebiet_' +'gate' +'fällen_' +'fur' +'fundamental' +'fuels_' +'fro' +'freely_' +'fre' +'fortsetzen_' +'forschung_' +'forests_' +'football_' +'folge' +'flee' +'flash_' +'flag_' +'fire' +'finish_' +'fin' +'figur' +'ffe' +'festgelegten_' +'fanden_' +'fam' +'fails_' +'fahrt_' +'expenses_' +'expanded_' +'exp' +'examine_' +'exa' +'eti' +'esch' +'erzeugt_' +'erwähnen_' +'erreichte_' +'erne' +'erl' +'erfolgreiche_' +'entre' +'enthalt_' +'enth' +'entertainment_' +'entering_' +'ensch' +'ens' +'ener_' +'encouraged_' +'ena' +'employed_' +'emergence_' +'ement_' +'embrace_' +'em' +'eller_' +'elektronischen_' +'electric_' +'eld_' +'eite' +'einzuführen_' +'einverstanden_' +'eintreten_' +'einr' +'einl' +'einheitliche_' +'eingerichtete_' +'ego' +'egen' +'efe' +'ec' +'easing_' +'dw' +'durchzusetzen_' +'dung_' +'dun' +'ds' +'drängen_' +'dream_' +'dot' +'dor' +'doors_' +'distinction_' +'dispute_' +'disposal_' +'diskutiert_' +'dise' +'directory_' +'director_' +'dim' +'dieselbe_' +'dictatorship_' +'determination_' +'deswegen_' +'destroy_' +'dependence_' +'departments_' +'department_' +'demonstrations_' +'definitely_' +'dating_' +'dark_' +'cz' +'cut' +'critique_' +'credits_' +'crat' +'cra' +'count_' +'corporations_' +'conventions_' +'convenience_' +'contributed_' +'contextual_' +'constructive_' +'considerations_' +'conduct_' +'cond' +'compete_' +'coherent_' +'codecision_' +'chtig' +'chte' +'chau' +'characterized_' +'cel' +'cc' +'catch_' +'carrying_' +'busy_' +'bre_' +'bought_' +'bot_' +'boards_' +'black' +'bio' +'binding_' +'bigger_' +'bezieht_' +'beträgt_' +'bestätigt_' +'bes_' +'bere' +'bera' +'bene' +'bele' +'beings_' +'begründet_' +'beglückwünschen_' +'begl' +'beeinflussen_' +'bedroh' +'bedeutende_' +'barer_' +'barely_' +'band_' +'backing_' +'az' +'aw' +'authoritarian_' +'ausb' +'auftreten_' +'aufgeführt_' +'atte' +'att_' +'att' +'ato_' +'asi' +'arte' +'aria' +'argumentieren_' +'apparently_' +'api' +'ap' +'ants_' +'ano_' +'anhaltenden_' +'angebracht_' +'ands_' +'anderswo_' +'alternatives_' +'alongside_' +'ali' +'aktuell_' +'ahm' +'aga' +'adverse_' +'adds_' +'adaptation_' +'acknowledged_' +'ack' +'accused_' +'acceptance_' +'Zwecke_' +'Zusätzlich_' +'Zusatz' +'Zimmerservice_' +'Zielen_' +'Zerstörung_' +'You' +'Wirksamkeit_' +'Wir' +'Werten_' +'Werkzeuge_' +'Wellness_' +'Welcome_' +'Weiter' +'We' +'Wander' +'Wall' +'Wahrscheinlichkeit_' +'WHO_' +'Vorbild_' +'Virus_' +'Vir' +'Vic' +'Verz' +'Versprechen_' +'Verringerung_' +'Vermögenswerte_' +'Verkauf_' +'Verhaltens' +'Vereinigte_' +'Vene' +'Urteil_' +'Umge' +'UE' +'Tätigkeit_' +'Typ_' +'True_' +'Tool_' +'Tibet_' +'Three_' +'Ten' +'Temple_' +'Tee' +'Teams_' +'Südkorea_' +'Sus' +'Sul' +'Structural_' +'Stre' +'Still_' +'Stahl' +'Spo' +'Spielen_' +'Spezial' +'Spa' +'Sound_' +'Sky' +'Situationen_' +'Sit' +'Sir' +'Siehe_' +'Sicherheitspolitik_' +'Service' +'Section_' +'Scotland_' +'Schwäche_' +'Schwerpunkt_' +'Schreiben_' +'Schon_' +'Schli' +'Schle' +'Schiff' +'Scheitern_' +'Schalt' +'Sach' +'SE_' +'Rä' +'Ruf_' +'Rub' +'Rom_' +'Ret' +'Republican_' +'Rechtsstaatlichkeit_' +'Rechten_' +'Rechnungs' +'Rat' +'Rand' +'RO' +'RM' +'Qui' +'Prozess' +'Projekts_' +'Procedure_' +'Presse_' +'Potenzial_' +'Pos' +'Pol' +'Pflicht_' +'Passwort_' +'Parlaments' +'Pakt_' +'PRI' +'PR' +'PO' +'PD' +'Out_' +'Orl' +'Organization_' +'Offen' +'Off' +'Od' +'Nichtraucherzimmer_' +'Netzwerk_' +'Net' +'NI' +'Modern_' +'Mittel' +'Mir_' +'Mini' +'Minderheit_' +'Min' +'Mil' +'Mechanismus_' +'Mc' +'Maschinen_' +'Martin' +'Mar_' +'Mandat_' +'Manchmal_' +'Mak' +'Mai' +'MPEG_' +'MP3_' +'Löhne_' +'Long_' +'Lie' +'Leitung_' +'Lehr' +'Later_' +'Lac' +'LIN' +'Kru' +'Konvention_' +'Konto_' +'Kons' +'Kongress_' +'Kompromiss' +'Kompetenzen_' +'Komp' +'Kernel_' +'Kauf' +'Kai' +'KORE_' +'KE' +'KA' +'Jung' +'Jak' +'Jackson_' +'Jack' +'Internal_' +'Instruments_' +'Innovationen_' +'Inf' +'IP' +'ION_' +'ING_' +'ID' +'Hinzu' +'Herz' +'Henry_' +'Heil' +'Harvard_' +'HI' +'Gästen_' +'Gott_' +'Gor' +'Gleich' +'Glas' +'Gipfeltreffen_' +'Gh' +'Gewerkschaften_' +'Gener' +'Gem' +'Gel' +'Gehminuten_' +'Gebrauch_' +'Gar' +'Fuß' +'Fur' +'Friedens_' +'Fremd' +'Free' +'Frankreichs_' +'Frank' +'Framework_' +'Flughäfen_' +'Flash_' +'Firefox_' +'Feier' +'Fehl' +'Fast_' +'Farben_' +'Fahrzeuge_' +'Fa' +'Exp' +'Esta' +'Er' +'Energies' +'Elite_' +'Einwanderer_' +'Einstellungen_' +'Einnahmen_' +'Einb' +'Egyptian_' +'Edition_' +'EX' +'ER_' +'EM' +'EIB_' +'EI' +'EG_' +'ED' +'Dy' +'Dublin_' +'Drogen' +'Drive' +'Dritten_' +'Drittel_' +'Don_' +'Deswegen_' +'Department_' +'Denmark_' +'Dek' +'Debatten_' +'Cur' +'Cruz_' +'Country_' +'Cou' +'Colombia_' +'Col' +'Client_' +'Chef_' +'Chal' +'Ch' +'Card' +'Can' +'California_' +'Burma_' +'Bu' +'Brok_' +'Bro' +'Br' +'Beweise_' +'Beurteilung_' +'Bett' +'Berichten_' +'Beobachter_' +'Befürworter_' +'Befugnisse_' +'Beachtung_' +'Bea' +'Based_' +'BU' +'BC_' +'Ausz' +'Ausstattung_' +'Ausst' +'Ausr' +'Aus' +'At' +'Asyl' +'Aspekten_' +'Architektur_' +'Arafat_' +'Ansichten_' +'Ans' +'Anonymous_' +'Anliegen_' +'Ange' +'Andererseits_' +'Allein_' +'Act' +'Ac' +'Abschnitt_' +'Absatz_' +'Abf' +'AD' +'66_' +'62_' +'54_' +'46_' +'35' +'29' +'1985_' +'1980s_' +'180_' +'17' +'.) _' +'. - _' +'. &#_' +''', _' +' ?_' +' ; _' +' +_' +' + _' +' "' +'…"..._' +'” ' +'’' +'ül' +'üg' +'ü' +'év' +'éc' +'äußern_' +'äts' +'ät' +'ähl' +'äh' +'Übrigen_' +'Übertragung_' +'Übernachtung_' +'Übereinkommen_' +'Überdies_' +'Ära_' +'zweitens_' +'zuzu' +'zusch' +'zung_' +'zulassen_' +'zug' +'zu' +'ziel_' +'zie' +'zeigte_' +'zb' +'zahlungen_' +'yu' +'you' +'ym' +'yield_' +'yc' +'yan' +'würdigen_' +'wü' +'worthy_' +'wonder' +'wise_' +'willingness_' +'werke_' +'werfen_' +'welcomes_' +'weis' +'wards_' +'ward_' +'waffen_' +'vulnerability_' +'vorhandenen_' +'vollständigen_' +'viv' +'vil' +'vielfältige' +'vid' +'vessels_' +'veröffentlichte_' +'verä' +'verst' +'versorgung_' +'versichern_' +'versch' +'verlor_' +'verkauft_' +'verg' +'verbreitet_' +'venture_' +'ved_' +'vas' +'vacation_' +'uung_' +'uta' +'unterstützte_' +'unterge' +'unsustainable_' +'unions_' +'unin' +'ungsf' +'ung' +'unf' +'unentgeltlich_' +'underlying_' +'underground_' +'umfassen_' +'uis' +'ui' +'ugs' +'uga' +'ues_' +'ude' +'uch' +'tätigen_' +'täglichen_' +'tw' +'ture' +'trick' +'tras' +'trained_' +'traditions_' +'trace' +'tr' +'tolerance_' +'tip' +'thumbnail_' +'throw_' +'texts_' +'teuer_' +'tested_' +'termin' +'tene' +'tendency_' +'techniques_' +'tec' +'tau' +'tari' +'tab' +'sämtliche_' +'sympathy_' +'sver' +'survival_' +'surveillance_' +'suppliers_' +'supervision_' +'superior_' +'succ' +'subsequently_' +'strategi' +'straightforward_' +'str' +'stored_' +'steigt_' +'starten_' +'stand' +'stan_' +'stake_' +'stabiliz' +'stab' +'ssu' +'ssion_' +'sr' +'späten_' +'spl' +'spiegelt_' +'spiegel' +'speziell_' +'sole_' +'ske' +'situ' +'silence_' +'sil' +'side' +'sicherstellen_' +'shot_' +'sf' +'settings_' +'seperti_' +'sens' +'sending_' +'selling_' +'sekarang_' +'sek' +'screens_' +'score_' +'scientists_' +'schätzen_' +'schwer' +'schutz' +'schlimmsten_' +'schlechten_' +'schaften_' +'sama_' +'sai' +'rücken_' +'rü' +'rze' +'ruktur' +'rp' +'rose_' +'ront' +'rken_' +'rium_' +'rif' +'rier' +'rid' +'rh' +'rgi' +'revenues_' +'reve' +'restricted_' +'responses_' +'respect' +'reserved_' +'rer' +'render' +'religiöse_' +'rel' +'reich' +'regulate_' +'regionen_' +'reforme' +'recruit' +'receiving_' +'rece' +'reagiert_' +'read' +'rder' +'rans' +'rai' +'rage' +'queries_' +'qualifi' +'qi' +'pue' +'publicly_' +'proximity_' +'province_' +'promising_' +'prominent_' +'professionals_' +'producer_' +'problem' +'prinzip_' +'preserve_' +'preferred_' +'port' +'politi' +'poli' +'planen_' +'pillar_' +'performed_' +'perceived_' +'pe' +'paying_' +'past' +'parameters_' +'pala' +'pack' +'pa_' +'ov' +'orn' +'orit' +'ores_' +'order' +'ora_' +'opti' +'onne' +'onia_' +'ong' +'omm' +'oleh_' +'oke' +'ogen' +'og_' +'officers_' +'offens' +'offenbar_' +'occupied_' +'observed_' +'observations_' +'obliged_' +'nützlich_' +'nutz' +'nung_' +'nowadays_' +'novel' +'noticed_' +'nominal_' +'nne' +'nn_' +'nh' +'ngan_' +'net' +'neo' +'negotiate_' +'ndung_' +'ncy_' +'nati' +'nant_' +'nal' +'nachfrage_' +'märkte_' +'mächtigen_' +'má' +'mus' +'mot' +'mono' +'moderate_' +'mobility_' +'mmer_' +'minder' +'mier' +'met' +'merc' +'mente_' +'menc' +'mena' +'men' +'mema' +'melt' +'mehr' +'medieval_' +'maßen_' +'max_' +'mate' +'mam' +'machten_' +'lösung_' +'läge_' +'lut' +'lon_' +'logi' +'load_' +'llte_' +'listings_' +'lip' +'life' +'lic' +'leichter_' +'leib' +'legacy_' +'leb' +'lautet_' +'laufenden_' +'langer_' +'lah' +'lag' +'künftig_' +'kü' +'ktion_' +'kritisiert_' +'kri' +'kostenlose_' +'kontextuellen_' +'kontextuell_' +'konst' +'konse' +'komplexe_' +'kommunistischen_' +'komm' +'kne' +'kli' +'kleineren_' +'klarer_' +'kette_' +'kes_' +'kers_' +'kennt_' +'kem' +'kel_' +'keen_' +'kat' +'jährige_' +'jara' +'jar' +'ivi' +'ively_' +'iti' +'israelischen_' +'isi' +'isen_' +'iri' +'iranische_' +'ira' +'investiert_' +'interpretation_' +'internationales_' +'intends_' +'intelligent_' +'instructions_' +'instantly_' +'inspired_' +'inspection_' +'initiated_' +'inhabitants_' +'inflows_' +'indicators_' +'inclusion_' +'importantly_' +'ille' +'ill' +'ification_' +'ideology_' +'ic' +'hältnis' +'hydro' +'hor' +'honour_' +'honest_' +'hoff' +'hire_' +'hinzufugen_' +'hilfe_' +'hidden_' +'hes_' +'hervorragenden_' +'hervorheben_' +'hervorgehoben_' +'herunterladen_' +'herunter' +'heat_' +'healthy_' +'hd' +'hast_' +'hart_' +'harmful_' +'harder_' +'hamm' +'halte' +'hack' +'guided_' +'gua' +'gründen_' +'gru' +'grati' +'grammatisch_' +'governmental_' +'gne_' +'gis' +'gier' +'ghe' +'gewählten_' +'gewonnen_' +'gewicht' +'get' +'gesto' +'gest' +'geschw' +'gerü' +'geprüft_' +'gep' +'gens' +'generous_' +'generell_' +'gend' +'gelang_' +'gefördert_' +'gefragt_' +'gebildet_' +'gear' +'gat' +'gaining_' +'ful' +'fueled_' +'friedlichen_' +'frequency_' +'fran' +'frame_' +'fra' +'foundations_' +'foster_' +'fordere_' +'forcing_' +'fli' +'fle' +'flaw' +'flags_' +'fix_' +'firmly_' +'finanz' +'films_' +'fie' +'festival_' +'ferner_' +'fee_' +'fans_' +'falsche_' +'facts_' +'facilitate_' +'extremists_' +'expressing_' +'explo' +'experiences_' +'expenditures_' +'expe' +'expanding_' +'executive_' +'ew' +'evident_' +'everybody_' +'estimates_' +'ess' +'erzeugen_' +'ery_' +'erwä' +'erweitern_' +'ersetzen_' +'erkunden_' +'erhöhte' +'erholen_' +'erforderliche_' +'erd' +'erarbeitet_' +'eo_' +'environmentally_' +'entst' +'entdeckst_' +'ensures_' +'ensi' +'enn' +'enjoying_' +'enjoyed_' +'ening_' +'englische' +'enger_' +'enge_' +'enemy_' +'endet_' +'endes_' +'emotional_' +'eme' +'embark' +'eingel' +'eingehalten_' +'ege' +'ega' +'effektive_' +'echt' +'ebe' +'eb' +'eate' +'earn_' +'ean' +'durchs' +'dungen_' +'dru' +'drives_' +'dramatically_' +'dol' +'dm' +'disa' +'dil' +'dignity_' +'digit' +'die' +'df' +'desktop_' +'desk_' +'designer_' +'deserve_' +'description_' +'describe_' +'ders_' +'dern_' +'derl' +'derartigen_' +'depth_' +'depending_' +'denk' +'dem' +'def' +'das' +'dak' +'customs_' +'cua' +'creat' +'courts_' +'count' +'corrupt_' +'cope_' +'conveniently_' +'consult' +'constraints_' +'connect_' +'confronted_' +'confrontation_' +'confront_' +'concentration_' +'complicated_' +'comparison_' +'communism_' +'commit_' +'combine_' +'collect_' +'closing_' +'clarity_' +'civilians_' +'cit_' +'circle_' +'chtung_' +'chne' +'chant' +'chan' +'challenging_' +'cepti' +'cell_' +'cand' +'camp_' +'bö' +'broadcast' +'brechen_' +'brauch' +'bot' +'bon_' +'blog_' +'blind' +'billig' +'bevölkerung_' +'betrieben_' +'bes' +'bert_' +'berge' +'berat' +'belegt_' +'beitr' +'beibehalten_' +'behe' +'beenden_' +'bau' +'band' +'ball' +'bail' +'bai' +'bags_' +'bagi_' +'bag' +'bad' +'backs_' +'awa' +'authors_' +'aut' +'ausschusses_' +'aussch' +'ausgeschlossen_' +'ausgeführt_' +'ausführliche' +'ausführ' +'aufgegeben_' +'aufgefordert_' +'asso' +'ass_' +'arti' +'arrest_' +'aris' +'arg' +'arbitrary_' +'appearance_' +'app' +'antr' +'annt' +'anlagen_' +'anischen_' +'angs_' +'angewandt_' +'angest' +'anger_' +'angepasst_' +'anerkennen_' +'anerkannte' +'ander' +'andel' +'all' +'alism_' +'alisierung_' +'akt' +'aki' +'ake' +'aine' +'aft_' +'advances_' +'adv' +'achtet_' +'accurate_' +'abzusch' +'abst' +'abi' +'aben_' +'abandon_' +'Zucker' +'Zone_' +'Zimmer' +'Zimbabwe_' +'Ye' +'Wr' +'Wort' +'Wissenschaft' +'Widerstands' +'Wi' +'Werbung_' +'Wende' +'Weg' +'Wechselkurs_' +'Webseiten_' +'Watson_' +'Water' +'Ware_' +'Wahr' +'Waffen' +'WASHINGTON_' +'WAR' +'Vorgehensweise_' +'Voll' +'Vi' +'Vert' +'Verluste_' +'Verl' +'Verge' +'Veranstaltungen_' +'Veranstaltung_' +'Vera' +'Ver' +'VER' +'User_' +'Up_' +'Umweltschutz_' +'US' +'UM' +'Transa' +'Trag' +'Tourismus_' +'Todesstrafe_' +'Thu' +'Tha' +'Textil' +'Temperaturen_' +'Tas' +'Tarifa_' +'Tan' +'TE_' +'Sä' +'Subventionen_' +'Str' +'Stor' +'Stockholm_' +'Stell' +'Stau' +'Stat' +'Sports_' +'Spielraum_' +'Spi' +'Space_' +'Sound' +'Sol_' +'Sof' +'Sign' +'Siemens_' +'Sicherheitsrat_' +'Shar' +'Serb' +'Ser' +'Senkung_' +'Sen' +'Sektors_' +'Schwer' +'Schwellenländern_' +'Schwei' +'Schw' +'Schu' +'Schlusselwort_' +'Schlusselphrase_' +'Schloss_' +'Rücks' +'Russ' +'Rumänien_' +'Rules_' +'Rubrik_' +'Rose' +'Right_' +'Ric' +'Rela' +'Rei' +'Ref' +'Rather_' +'RT' +'Provence_' +'Project_' +'Prize_' +'Praktiken_' +'Positionen_' +'Porto_' +'Pod' +'Plus_' +'Plat' +'Phänomen_' +'Pers' +'Pen_' +'Peace_' +'Park' +'Papa' +'Pana' +'Palma_' +'Palm' +'Pal' +'Pad' +'PC' +'PARIS_' +'Original' +'On' +'OP' +'OF_' +'Null' +'Notenbank_' +'Nordkorea_' +'Nord_' +'Niederlage_' +'Netto' +'Nationalismus_' +'Nar' +'Mutter_' +'Mut_' +'Muslime_' +'Morocco_' +'Monitor' +'Modelle_' +'Milosevic_' +'Mid' +'Metro_' +'Meter_' +'Medizin_' +'Material_' +'Marina_' +'Mani' +'Manager_' +'Manage' +'Mana' +'Malaria_' +'Mah' +'Magi' +'MM' +'Ly' +'Logik_' +'Lit' +'Listings_' +'Link_' +'Light' +'Liefer' +'Lib' +'Let' +'Lernen_' +'Leitlinien_' +'Lehren_' +'Laun' +'Lastly_' +'Laeken_' +'Kürze_' +'Kö' +'Kopenhagen_' +'Komplex' +'Kle' +'Kie' +'Kata' +'Kapazitäten_' +'Kandidat' +'Kanada_' +'Jun' +'Jer' +'Iran' +'Irak' +'Interessen' +'Install' +'Industrieländern_' +'Indi' +'Import' +'Immobilien_' +'IBM_' +'IB' +'Hunger_' +'Hunde' +'Hongkong_' +'Hol' +'Hitler_' +'Histori' +'Hei' +'Haushaltsdefizit' +'Haupts' +'Halb' +'Had' +'HD' +'Güter_' +'Gulf_' +'Guide_' +'Grünbuch_' +'Große_' +'Großbritanniens_' +'Growth_' +'Griechenlands_' +'Governments_' +'Golf' +'Gleichstellung_' +'Gla' +'Get_' +'Gestaltung_' +'Gesicht_' +'Genu' +'Gene' +'Geber' +'Games_' +'GP' +'Funktions' +'Fro' +'Friday_' +'Fri' +'Freunden_' +'Freizügigkeit_' +'Fraktionen_' +'Fortunately_' +'Force_' +'Food_' +'Fol' +'Florence_' +'Fle' +'Fischler_' +'Fisch_' +'Finnland_' +'Finanzsystem_' +'Finanzs' +'Finanzen_' +'Fernseh' +'FT' +'FS' +'FE' +'Export_' +'Existenz_' +'Exchange_' +'Eta' +'Este' +'Erwachsene_' +'Ep' +'Entwicklungsp' +'Einfluss' +'Eigentum_' +'ED_' +'Dä' +'Durchsetzung_' +'Dort_' +'Dokumente_' +'Dim' +'Dienst_' +'Det' +'Design' +'Democratic_' +'Def' +'Dea' +'De' +'Davos_' +'Darfur_' +'Daniel_' +'Cy' +'Creati' +'Content_' +'Consumer_' +'Comp' +'Communist_' +'Clubs_' +'Cla' +'Civil_' +'Chan' +'Casino_' +'Café_' +'CT' +'CA_' +'Bürokratie_' +'Bun' +'Bucht_' +'Bol' +'Black' +'Billionen_' +'Bez' +'Bewohner_' +'Betten_' +'Berlusconi_' +'Berg_' +'Belgien_' +'Bele' +'Beifall_' +'Beha' +'Bedürfnissen_' +'Bearbeitung_' +'Bay' +'Baum' +'Base_' +'Band_' +'Bahn' +'Az' +'Außen_' +'Autonomie_' +'Automobil' +'Ausmaß_' +'Ausgangspunkt_' +'Aufforderung_' +'Ass' +'Argument_' +'Antworten_' +'Ansätze_' +'Anleihen_' +'Angriffe_' +'Angl' +'Andr' +'And' +'Ami' +'Alters' +'Alta' +'Ali' +'Album_' +'Aktien_' +'Ah' +'Agrarpolitik_' +'Age_' +'Ablehnung_' +'Ablauf_' +'AS_' +'AC_' +'? ' +'96_' +'88_' +'86_' +'67_' +'61_' +'57_' +'53' +'33' +'27' +'236' +'1990s_' +'1960_' +'185' +'04' +'001' +',/_' +', ‘_' +'+ _' +'%-_' +'%' +'#_' +'! ' +' ("_' +'�' +'“ – _' +'–_' +'ш' +'по' +'м_' +'ли' +'ко' +'ка' +'ер' +'ва' +'ар' +'üstung_' +'ührung_' +'übrigens_' +'überwiegend_' +'überw' +'übernahm_' +'österreichischen_' +'öffentlich' +'ô' +'í_' +'ë' +'è_' +'ç' +'ätz' +'ärmsten_' +'änden_' +'ält_' +'ähr' +'äd' +'Übernahme_' +'°_' +' –, _' +'}}) _' +'| _' +'{_' +'zö' +'zust' +'zukommen_' +'zivil' +'zins' +'ziert_' +'zi_' +'zes_' +'zentral_' +'zen' +'zahlt_' +'yields_' +'yea_' +'xo' +'xe' +'xa' +'wort_' +'worried_' +'worker_' +'witness_' +'wissenschaftliche_' +'winning_' +'will' +'wild_' +'widmen_' +'whereby_' +'wh' +'weshalb_' +'werken_' +'welchem_' +'weite_' +'weigh' +'weapon_' +'weaker_' +'was' +'warme' +'wander' +'walt' +'walls_' +'wahlen_' +'vs' +'vorzunehmen_' +'vorsieht_' +'vorschlag_' +'vorn' +'vore' +'voran_' +'vollst' +'voll' +'volatility_' +'villages_' +'via' +'vez_' +'verz' +'versuch' +'verse' +'verschiedener_' +'verschaffen_' +'verme' +'verliert_' +'verletzt_' +'verhe' +'verh' +'vergeben_' +'verge' +'verfügbaren_' +'verd' +'verbr' +'verbinden_' +'vegeta' +'vari' +'valuable_' +'vak' +'uto' +'uti' +'urs' +'uri' +'urf' +'updated_' +'unzureichend_' +'unw' +'unusual_' +'unterwegs_' +'untern' +'unprecedented_' +'unpa' +'ungew' +'ungen' +'undi' +'unders' +'undergo' +'unden_' +'unau' +'un' +'umr' +'ump' +'uli' +'ukan_' +'ui_' +'uge' +'ucht_' +'ucht' +'ually_' +'ua_' +'ua' +'u0027s_' +'türkische_' +'tät' +'tänd' +'tube_' +'tter' +'très_' +'träge' +'triumph' +'treaties_' +'travellers_' +'transformation_' +'trafficking_' +'tow' +'tors_' +'tons_' +'toll_' +'tol' +'tl' +'tionally_' +'tional_' +'tim' +'til' +'thumb_' +'ths_' +'thousand_' +'thirds_' +'thir' +'thi' +'theo' +'tensions_' +'tens_' +'tellung_' +'teams_' +'taxation_' +'tax' +'tas' +'tana' +'sus_' +'surroundings_' +'surprising_' +'suddenly_' +'substantially_' +'stü' +'studied_' +'student_' +'stoppen_' +'stoff' +'stimulate_' +'stimmung' +'stil' +'stic' +'stelle_' +'steigende_' +'stehenden_' +'steer' +'sted_' +'stattfindet_' +'stages_' +'stabilize_' +'sta_' +'sste' +'ssa' +'spra' +'spor' +'spiel' +'spi' +'spect' +'specifications_' +'speaker_' +'spart' +'sparen_' +'spar' +'spanischen_' +'sow' +'sounds_' +'sorgt_' +'som' +'sol' +'slowdown_' +'slopes_' +'slight_' +'slave' +'skin_' +'sixt' +'simultaneously_' +'sieren_' +'sichere_' +'si' +'shrink' +'sharply_' +'sex' +'ses' +'servers_' +'sent' +'semi' +'segment' +'sec' +'sd' +'schönsten_' +'schwieriger_' +'schwerer_' +'schwach_' +'scht_' +'schri' +'schlu' +'schlimm' +'schlicht_' +'schl' +'schau' +'scene_' +'sav' +'saubere_' +'sat' +'sant' +'sand_' +'rz' +'rufen_' +'rse' +'rsch' +'rov' +'routes_' +'ros' +'rom' +'roll' +'roles_' +'rma' +'rku' +'reverse_' +'retain_' +'resse' +'res' +'requiring_' +'requests_' +'rent' +'rena' +'rek' +'reinforcing_' +'reine_' +'reiben_' +'regulat' +'regelung_' +'refuse_' +'referring_' +'rechtzeitig_' +'reali' +'rasche' +'rar' +'racing_' +'quin' +'quil' +'quantities_' +'qualitative_' +'pä' +'put' +'pursued_' +'punkt' +'proud_' +'protest' +'prote' +'profession' +'private' +'prim' +'preventing_' +'prevented_' +'prevailing_' +'pressures_' +'presenting_' +'predict_' +'ppl' +'ppen_' +'pou' +'posts_' +'positiven_' +'pools_' +'polnischen_' +'polls_' +'pm_' +'plötzlich_' +'ple_' +'platforms_' +'planung_' +'plant' +'pick' +'pflicht' +'pet_' +'pers' +'permit_' +'periphery_' +'pens' +'pel_' +'patio_' +'passing_' +'pas_' +'pas' +'partition_' +'part' +'palästinensische_' +'pakete_' +'pag' +'oß_' +'owners_' +'overseas_' +'oto' +'ossen_' +'ose' +'ories_' +'orientierte' +'oriented_' +'org' +'orc' +'ops_' +'operated_' +'opera' +'ony_' +'ono' +'ole' +'ois' +'oi' +'oga' +'officially_' +'od_' +'nos_' +'normale_' +'non' +'nobody_' +'nnung_' +'nni' +'nin' +'nig' +'nem' +'nell' +'neighbourhood_' +'negotiation_' +'nau' +'native_' +'nai' +'nahmen_' +'m²_' +'museum_' +'moderner_' +'mobilis' +'mmi' +'mme_' +'missions_' +'missile_' +'miss_' +'minu' +'milk_' +'militärischer_' +'merit' +'menja' +'mengen' +'mene' +'medizinischen_' +'mediat' +'meaningful_' +'mb' +'mati' +'massiven_' +'maschine_' +'mart' +'mar_' +'manu' +'mano' +'mals_' +'m2_' +'lässig' +'lur' +'lter_' +'lter' +'lst' +'lowest_' +'low' +'losen_' +'loi' +'logische' +'llt_' +'llers_' +'llen' +'linear_' +'lie' +'lichkeiten_' +'leng' +'leiden_' +'legend' +'lament' +'lalu_' +'lak' +'lacks_' +'lab' +'kund' +'kun' +'kredit' +'konzipiert_' +'kontra' +'konf' +'kommerziellen_' +'klima' +'kek' +'kehren_' +'kata' +'kat_' +'kandidat' +'juga_' +'judicial_' +'journey_' +'jointly_' +'jer' +'jahr' +'itu_' +'ith' +'isten_' +'ising_' +'isiert_' +'isch' +'isa' +'irgend' +'ione' +'investing_' +'interven' +'internen_' +'interessante_' +'institut' +'inso' +'insist_' +'inos_' +'inn' +'ining_' +'incidents_' +'inci' +'impos' +'ime_' +'iklim_' +'ika_' +'igra' +'iger' +'iew' +'ierungen_' +'iegen_' +'ieben_' +'hur' +'hung_' +'human' +'htig' +'hte' +'hro' +'host' +'hom' +'hochwertige' +'hochge' +'hlt_' +'hinaus' +'hill_' +'heu' +'herstell' +'hen' +'harten_' +'harmonisation_' +'happiness_' +'hall_' +'hair_' +'hai' +'guard_' +'gt' +'gründe' +'grew_' +'grenzen_' +'greifen_' +'gratulieren_' +'gran' +'grab' +'gon' +'gol' +'glaub' +'gk' +'geäußert_' +'gewinn' +'gewachsen_' +'getötet_' +'gestr' +'gestartet_' +'gesellschaftliche_' +'geschäft' +'geräumige' +'gerät_' +'gers_' +'geringer_' +'gepr' +'genes_' +'generate_' +'gemeinschaftlichen_' +'gemeinsamer_' +'geli' +'gelassen_' +'gek' +'geistigen_' +'gegangen_' +'gefährlich_' +'gefolgt_' +'gefe' +'gebühr' +'geboten_' +'gd' +'gari' +'gam' +'führer_' +'führe' +'fte' +'frische' +'fried' +'fragment' +'forums_' +'forth_' +'formation_' +'forge_' +'folder_' +'fläche_' +'fiskal' +'fine' +'finds_' +'finances_' +'fft_' +'ffic' +'ffi' +'fei' +'feet_' +'featuring_' +'fault_' +'fan' +'ey' +'expos' +'exporting_' +'exhibit' +'execution_' +'exe' +'excuse_' +'exceptions_' +'exc' +'exact_' +'everyday_' +'eun' +'eth' +'eten' +'essen' +'esca' +'erzielte_' +'ersi' +'ersetzt_' +'erse' +'err' +'erode' +'ernst' +'erlebt_' +'erhoben_' +'erh' +'ergänzen_' +'erer_' +'erende' +'erei' +'ere' +'enze' +'entr' +'entdecken_' +'ense' +'enha' +'energie_' +'enemies_' +'endgültigen_' +'empfängt_' +'empfehlen_' +'emp' +'emo' +'emission' +'embedded_' +'elli' +'elegante_' +'electrical_' +'ekonomi_' +'einzusch' +'einstellen_' +'einst_' +'einrichtungen_' +'einkommen_' +'eingeschränkt_' +'eingesch' +'eindeutige_' +'egte' +'effizienter_' +'ees_' +'eba' +'dürften_' +'dynamism_' +'durchge' +'dur' +'dramatisch_' +'dr' +'downturn_' +'dorthin_' +'domin' +'dle' +'dism' +'discovery_' +'disagree_' +'disabled_' +'direkt' +'dige' +'differ' +'dieselben_' +'dienste_' +'dic' +'deutliche_' +'deuten_' +'desto_' +'denkt_' +'deni' +'demanded_' +'delivery_' +'defining_' +'defi' +'deckt_' +'deck' +'decent_' +'deb' +'dau' +'darstell' +'cycli' +'cyber' +'curs' +'cro' +'credit' +'credible_' +'covers_' +'costly_' +'controlling_' +'contract' +'consolidation_' +'consists_' +'conscious' +'conc' +'como_' +'committees_' +'comme' +'colors_' +'cm_' +'clock_' +'cler' +'cken' +'cke_' +'cia_' +'chts' +'chs' +'chri' +'ched_' +'characters_' +'chaos_' +'champion' +'chair_' +'cet' +'ceme' +'cara_' +'capita_' +'calm_' +'büro' +'bus' +'bury_' +'bungen_' +'broadly_' +'brit' +'bric' +'breed' +'bracht_' +'boundaries_' +'boosting_' +'boat_' +'blow' +'bing_' +'binde' +'big' +'bha' +'bezahl' +'bez' +'bewiesen_' +'beweisen_' +'betont_' +'best' +'besonderer_' +'benutzer' +'benefited_' +'believes_' +'bekannte_' +'bekam_' +'behindert_' +'bedroht_' +'beding' +'bedeutsame' +'be' +'batt' +'bah' +'award_' +'avi' +'avec_' +'ava' +'aux_' +'auszuüben_' +'aussehen_' +'ausgewogene' +'ausgew' +'ausgeb' +'auseinander_' +'aufw' +'auftr' +'aufb' +'attraktive_' +'attract_' +'attended_' +'atr' +'asymmetri' +'ast' +'aspirations_' +'ash' +'ase_' +'artificial_' +'arn' +'arme_' +'arm' +'argues_' +'ard' +'archive_' +'arat' +'appl' +'apo' +'anzuwenden_' +'anzupassen_' +'anwenden_' +'answers_' +'ansehen_' +'anonym' +'anis' +'angesehen_' +'angemessen_' +'angeb' +'andes_' +'amt_' +'ame_' +'allzu_' +'allerg' +'alität_' +'alis' +'ales_' +'ald' +'aku' +'aj' +'airline_' +'ahn' +'ahme' +'aha' +'agieren_' +'afrika_' +'affecting_' +'aer' +'advised_' +'advise' +'adult_' +'ada' +'actors_' +'acted_' +'accounting_' +'accelerating_' +'aca' +'abgesehen_' +'abgelehnt_' +'abg' +'abend' +'aan_' +']], [[_' +'] ' +'Zust' +'Zus' +'Zur' +'Zugleich_' +'Youth_' +'Yas' +'XP_' +'XI' +'Wäscheservice_' +'Wärme' +'Wä' +'Wunder' +'Wissenschaftler_' +'Wirtschaftsp' +'Wirtschaftsf' +'Wirtschafts_' +'Wireless_' +'Wiederherstellung_' +'Wie' +'Widerspruch_' +'Wichtigkeit_' +'Wettbewerbs_' +'Wesen_' +'Werbe' +'Wer' +'Wen' +'Weit' +'Wein_' +'Wei' +'Wea' +'WC_' +'Vorschlags_' +'Vork' +'Vorhaben_' +'Vorf' +'Vollbeschäftigung_' +'Vertrags' +'Verteilung_' +'Versorgung_' +'Verletzung_' +'Verle' +'Verla' +'Verk' +'VI_' +'Urlaubs' +'Unternehmer_' +'Unterkünfte_' +'Unt' +'Typ' +'Twitter_' +'Trend_' +'Tot' +'Top' +'Thursday_' +'Third_' +'Thai_' +'Telefon_' +'Take_' +'Tagung_' +'Systemen_' +'Sun' +'Summe_' +'Suites_' +'Sturz_' +'Studenten_' +'Strom_' +'Strand' +'Stoffe_' +'Stabilitäts_' +'Spring_' +'Spitzen' +'Solange_' +'Smoking_' +'Situated_' +'Sie' +'Sicherheits_' +'Sicher' +'Shuttle_' +'Should_' +'Shop_' +'Sevilla_' +'Seba' +'Schrift' +'Schmerz' +'Schlag_' +'Schl' +'Schei' +'Salz' +'Salo' +'Sala' +'Sai' +'Russen_' +'Ross' +'Rit' +'Ris' +'Rhetorik_' +'Rettungs' +'Rese' +'Reg' +'Real_' +'Reagan_' +'Ratifizierung_' +'Rap' +'Radio' +'RER_' +'Provinz_' +'Programm' +'Prognosen_' +'Produzenten_' +'Produktivität_' +'Press_' +'Presidents_' +'Post_' +'Pont' +'Poll' +'Pole' +'Poker' +'Plenum_' +'Piazza_' +'Patten_' +'Patri' +'Pat' +'Papier' +'Panorama' +'Pale' +'Pak' +'Out' +'Oh' +'OC' +'Nummer_' +'Nuklear' +'None_' +'Nielson_' +'Nichtraucherzonen_' +'Next_' +'Netzwerk' +'Net_' +'Nei' +'Natura_' +'NS' +'NC' +'München_' +'Mul' +'Much_' +'Moskau' +'Mona' +'Moment' +'Modernisierung_' +'Modell' +'Mio_' +'Mini_' +'Millennium_' +'Mill' +'Migration_' +'Mic' +'Meinungs' +'Med' +'Mart' +'Marktwirtschaft_' +'Marg' +'Mao_' +'Mail' +'MD' +'Lü' +'Lärm' +'Lobby' +'Liu_' +'Libyen_' +'Les_' +'Lehrer_' +'Legal_' +'Lebensmittel_' +'Lau' +'Lap' +'Language_' +'Labor_' +'LU' +'LS_' +'LG' +'Kriminalität_' +'Kriege_' +'Kremlin_' +'Kontrollen_' +'Kontext_' +'Kont' +'Konkurrenz_' +'Kollegin_' +'Klaus_' +'Kl' +'Key_' +'Kern_' +'Kenntnisse_' +'Kart' +'Karriere_' +'Kann_' +'KI' +'Jü' +'Justiz' +'Jur' +'Juan_' +'Jordanien_' +'Jordan_' +'Jahrestag' +'Investition_' +'Institut_' +'Ingenieur' +'Industry_' +'Industrieländer_' +'Inc_' +'Ill' +'Id' +'IK' +'Hände_' +'Hypotheken' +'Human' +'Hour_' +'Holiday_' +'Hohe_' +'Hir' +'Hind' +'Herzlich' +'Heraus' +'Haut' +'Hau' +'Hart' +'Harmonisierung_' +'Haar' +'HO' +'HD_' +'Güter' +'Guan' +'Grundsätze_' +'Grundlagen_' +'Gross' +'Governance_' +'Gottes_' +'Gold' +'Global' +'Gestatten_' +'Geschlechter' +'Gen' +'Gemeinschaften_' +'Gefängnis_' +'Gebi' +'Gay_' +'Gates_' +'Gat' +'Gast_' +'Gara' +'GN' +'Führungsrolle_' +'Fü' +'Freizeit' +'Fr' +'Fou' +'Flu' +'Fire' +'Ferienwohnungen_' +'Ferien' +'Feinde_' +'Farm' +'Family_' +'Fallout_' +'FL' +'FC_' +'Excellent_' +'Everything_' +'Europol_' +'Etwa' +'Et' +'Essen_' +'Esp' +'Erwerbs' +'Erne' +'Erk' +'Erfolge_' +'Engine_' +'Energien_' +'Empf' +'Einschätzung_' +'Einschränkung_' +'Eingabe' +'Effektivität_' +'Economy_' +'Ebola_' +'Eb' +'EP_' +'Doppel' +'Dom' +'Dol' +'District_' +'Disp' +'Diplomatie_' +'Deutsch_' +'Deposit_' +'Denken_' +'Defence_' +'Deep_' +'Danach_' +'DL' +'DD' +'DA_' +'Culture_' +'Cru' +'Corb' +'Consensus_' +'Commissioners_' +'Come_' +'Code' +'Coa' +'Churchill_' +'Children_' +'Chat' +'Chancellor_' +'Catal' +'Castle_' +'Cast' +'Cand' +'CP' +'CM' +'Büro' +'Böse' +'Börsen' +'Butt' +'Bru' +'Brand' +'Boy' +'Bord_' +'Boot_' +'Bolkestein_' +'Blin' +'Binnen' +'Bilanz' +'Big_' +'Bie' +'Bibliothek_' +'Bewegungen_' +'Bew' +'Beu' +'Betracht_' +'Beteiligten_' +'Bet' +'Best' +'Besondere' +'Besitz_' +'Benjamin_' +'Bene' +'Ben_' +'Begründung_' +'Beendigung_' +'Bee' +'Bedrohungen_' +'Bedien' +'Beamten_' +'Basel_' +'Barón_' +'Badezimmer_' +'Back_' +'BRICS_' +'Avenue_' +'Automati' +'Ausgangs' +'Ausgabe_' +'Ausflüge_' +'Augen' +'Aufzug_' +'Aufnahme' +'Ast' +'Assembly_' +'Arte' +'Argumente_' +'Arabs_' +'Aqua' +'Ap' +'Anwendungs' +'Anträge_' +'Allianz_' +'Alex' +'Aktivität_' +'Aktionäre_' +'Act_' +'Absichten_' +'Abschaffung_' +'Abg' +'Abendessen_' +'AGE' +'AAA_' +'= _' +'900_' +'90' +'56_' +'2050_' +'1986_' +'1950_' +'195' +'181' +'176' +'.&_' +'. “_' +'+' +'))._' +'", "_' +' {{_' +' ,_' +' &_' +' $ _' +'”' +'“-_' +'“ (_' +'א' +'ө' +'ұ' +'с_' +'ры' +'ны' +'нд' +'в_' +'še' +'üssen_' +'üssel' +'ühren_' +'überschuss_' +'überlegen_' +'überf' +'öm' +'én' +'él' +'äus' +'äufe' +'äst' +'äss' +'ändert_' +'ämpf' +'äl' +'Überlegungen_' +'Überl' +'Überg' +'Är' +'Änderungs' +'zzi' +'zwingen_' +'zweiter_' +'zweifel' +'zutiefst_' +'zuständigen_' +'zus' +'zurückzuführen_' +'zurückf' +'zunehmen_' +'zul' +'zugute_' +'zue' +'zt_' +'zog_' +'zige' +'ziele_' +'zial' +'zentraler_' +'würdig_' +'worry_' +'wood' +'withdrawal_' +'wirklichen_' +'wherever_' +'whenever_' +'wheel_' +'wettbewerbsfähige' +'westliche_' +'wesens_' +'werde' +'wenigsten_' +'wealthy_' +'waves_' +'warrant' +'wann_' +'wand' +'wai' +'wahrscheinliche' +'wachstum_' +'wachs' +'vorzulegen_' +'vorw' +'vorn_' +'vorgeschlagene_' +'vorbe' +'volatile_' +'vist' +'visa_' +'virtue_' +'virtual_' +'villa_' +'videos_' +'victim_' +'vest' +'verwiesen_' +'verwei' +'verwe' +'verwandelt_' +'verwandeln_' +'verwa' +'vertr' +'versteht_' +'versetzen_' +'versa' +'vermittelt_' +'verkehrs_' +'verifi' +'verf' +'verbieten_' +'verbesserte_' +'verband_' +'vera' +'vene' +'varia' +'val_' +'val' +'utz_' +'uted_' +'uste' +'ust_' +'usage_' +'ure' +'uranium_' +'ura' +'upaya_' +'unterstützten_' +'unterst' +'unterschiedlich_' +'unterbreitet_' +'unstable_' +'uns' +'unre' +'unn' +'universe_' +'ungsbe' +'ungsa' +'ungeachtet_' +'unemployed_' +'undenen_' +'unclear_' +'uncertain_' +'unce' +'unabhängige_' +'umgekehrt_' +'umgehen_' +'umen_' +'ull' +'ulat' +'ula_' +'uj' +'uck' +'uchs_' +'uc' +'ubi' +'tätigkeit_' +'tyr' +'typisch_' +'twentieth_' +'tut' +'turno' +'tun' +'tum_' +'tua' +'tta' +'tsunami_' +'träger_' +'tragedy_' +'traf_' +'toxi' +'tot' +'tom' +'tn' +'tliche' +'tigen_' +'tige' +'tien' +'tiefer_' +'tie_' +'threaten_' +'thin' +'thes' +'thermal_' +'there' +'theme_' +'thek' +'theater_' +'tf' +'terrible_' +'terr' +'teri' +'terhadap_' +'tent' +'temperature_' +'tells_' +'teiln' +'teil' +'teh' +'technologischen_' +'techno' +'teacher_' +'tatsächlichen_' +'taten_' +'taste' +'südlichen_' +'sus' +'surprise_' +'supp' +'suggestions_' +'suggestion_' +'sufficiently_' +'succeeded_' +'suc' +'stärksten_' +'stunning_' +'strukturelle_' +'strictly_' +'strengthened_' +'streit' +'stores_' +'stopp' +'stimulat' +'sth' +'steiger' +'steady_' +'station' +'startet_' +'star' +'ssp' +'ssl' +'ssel' +'ssch' +'split_' +'spielte_' +'specialities_' +'sowjetischen_' +'sonn' +'songs_' +'solch_' +'soa' +'smo' +'slow' +'sle' +'sko' +'sk' +'sitze' +'sische' +'sinken_' +'sin_' +'shipping_' +'shing_' +'sher' +'sheer_' +'shareholders_' +'sey_' +'setz' +'sett' +'servi' +'sene' +'sem' +'sely_' +'seits_' +'seit' +'seemingly_' +'seeing_' +'sed' +'sebagai_' +'seas' +'script_' +'schwere_' +'schwachen_' +'schuld' +'schlimmer_' +'schiff_' +'schien_' +'scher' +'sau' +'sarge' +'sanit' +'saf' +'sache' +'räume_' +'rus' +'ruins_' +'ruck' +'rub' +'rts_' +'rtet_' +'roof_' +'rolle_' +'rock_' +'roc' +'rnen_' +'rmo' +'rkte' +'river_' +'riv' +'ring' +'rik_' +'rien' +'rieg' +'restored_' +'responsi' +'reso' +'resist_' +'resign' +'researchers_' +'repression_' +'renewed_' +'relevante' +'reiten_' +'reite' +'reif' +'reichsten_' +'rei_' +'referen' +'redu' +'reco' +'reck' +'realise_' +'real' +'rchi' +'rauch' +'rated_' +'rami' +'radical' +'rab' +'quit' +'quer' +'quelle_' +'pushing_' +'pus' +'pursuing_' +'pure_' +'pto' +'präg' +'provo' +'propaganda_' +'projekte_' +'prohibited_' +'programming_' +'produzieren_' +'proceed_' +'privatis' +'printed_' +'principal_' +'prevail_' +'prev' +'preserv' +'pres' +'preference_' +'preferable_' +'prefer' +'precious_' +'prec' +'popula' +'poorer_' +'politician_' +'pola' +'plä' +'plug_' +'plat' +'plane_' +'pla' +'pilot_' +'pig' +'pieces_' +'physi' +'phy' +'perm' +'performing_' +'penalty_' +'pemerintah_' +'ped' +'peacefully_' +'parteien_' +'parlamentarischen_' +'param' +'pane' +'paket_' +'pak' +'pain' +'overwhelming_' +'overview_' +'overs' +'overr' +'ours_' +'ots_' +'oten_' +'ost_' +'oss' +'osa' +'orthodox' +'orn_' +'orge' +'organic_' +'ore_' +'ordnungsgemäß_' +'ordnete' +'oran' +'ont' +'oned_' +'one' +'onder' +'omis' +'om' +'oliti' +'oldest_' +'ohn' +'ohl_' +'ograph' +'officer_' +'occurs_' +'obe' +'nächster_' +'nze' +'num' +'ntly_' +'nth' +'nso' +'nr' +'nowhere_' +'normally_' +'nm' +'nissen_' +'nga' +'neig' +'nehme_' +'neck' +'ndel' +'nd' +'nationalism_' +'national' +'nat' +'nahe' +'nah' +'nachzu' +'nachh' +'nachdrücklich_' +'multimedia_' +'mul' +'mouth_' +'mounting_' +'mos' +'mood_' +'monopoly_' +'modification_' +'modes_' +'mk' +'mittelalterliche' +'mite' +'mist' +'missbrauch' +'mild' +'mie' +'mehrs' +'mega' +'mee' +'medizinische' +'measur' +'mble_' +'maß' +'maybe_' +'maximi' +'mant' +'manage' +'mana' +'mala' +'mailing_' +'mage' +'mach' +'lö' +'lé' +'lusi' +'loyalty_' +'loyal' +'lowering_' +'lou' +'loose_' +'log' +'locker' +'loan_' +'llig' +'literally_' +'linken_' +'line' +'like' +'liefert_' +'lick' +'lichsten_' +'lich' +'liation_' +'lgen' +'letter_' +'leiten_' +'legitime' +'lays_' +'laste' +'lass' +'lant' +'lance_' +'labelling_' +'kurs' +'ktur' +'ktive' +'kte_' +'ks' +'kritische_' +'konsum' +'konstruktive_' +'komplette' +'kommuni' +'kol' +'kno' +'kni' +'knapp_' +'kleinere_' +'klar' +'kit_' +'kilo' +'kehrt_' +'keb' +'kau' +'jä' +'jum' +'jo_' +'jetzigen_' +'jet' +'ject' +'jan_' +'jah' +'izing_' +'its' +'iter_' +'iss' +'israelische_' +'isolation_' +'ishing_' +'ish' +'ised_' +'ironi' +'iro_' +'irischen_' +'invi' +'investieren_' +'intr' +'internasional_' +'interm' +'intensi' +'intense_' +'installieren_' +'insta' +'innocent_' +'initiat' +'ingung_' +'infolge_' +'info_' +'inflation' +'infected_' +'iner' +'indoor_' +'individuellen_' +'individuelle_' +'individuali' +'indig' +'indicates_' +'ind_' +'incorporated_' +'inad' +'implied_' +'imperative_' +'imper' +'imo' +'imme' +'iller' +'ile' +'iko' +'igen' +'ically_' +'höchst_' +'hy_' +'hs' +'hren' +'hou' +'holes_' +'hob' +'hne' +'hly_' +'hinnehmen_' +'hide_' +'hide' +'heutige_' +'hervorgebracht_' +'herum' +'herself_' +'herausge' +'hem' +'hegemony_' +'headquarters_' +'harte' +'harsh_' +'halben_' +'hal_' +'günstige_' +'gues' +'grünen_' +'grund' +'grosse_' +'gross_' +'grand_' +'graf' +'grade_' +'governed_' +'golden_' +'gnen_' +'gm' +'gio' +'gil' +'ggl' +'gewählte_' +'getr' +'gespeichert_' +'gespa' +'geräte_' +'geringen_' +'geregelt_' +'genießt_' +'geniessen_' +'geneti' +'generi' +'geholfen_' +'geführten_' +'gefährlichen_' +'geeigneten_' +'gedacht_' +'gebi' +'gas' +'ganze' +'fünf' +'führten_' +'füg' +'fü' +'fähr' +'funded_' +'functional_' +'fs' +'freundliches_' +'freige' +'freier_' +'fraud_' +'fragt_' +'formuliert_' +'format' +'forderte_' +'focuses_' +'flicht' +'flexib' +'fits_' +'finished_' +'finanzieller_' +'fina' +'fin_' +'fil' +'fifth_' +'festzustellen_' +'festen_' +'fertig' +'ferr' +'fern_' +'feelings_' +'fan_' +'fak' +'fait' +'fahr' +'factory_' +'fac' +'extremen_' +'extrem_' +'explicitly_' +'experiment_' +'existed_' +'exce' +'ewi' +'ever' +'eut' +'europäisches_' +'etzung_' +'ett' +'etic_' +'ethnischen_' +'ethical_' +'essa' +'esc' +'erwerben_' +'erste' +'ernsthafte_' +'ernsthaft_' +'erleichtern_' +'erhältlich_' +'erhielten_' +'erfreut_' +'erbe' +'erba' +'erati' +'episode_' +'eous_' +'enve' +'entwurf_' +'enthaltenen_' +'entfernen_' +'entdeckt_' +'entar' +'enta' +'engen_' +'empty_' +'employers_' +'empire_' +'emb' +'elte' +'elo' +'eliminating_' +'elf' +'elect' +'ekt' +'einziges_' +'einschl' +'einladende' +'eingestellt_' +'eing' +'eine' +'eind' +'eig' +'eib' +'ehemalige_' +'egung_' +'egt_' +'egi' +'effektiven_' +'edition_' +'ech' +'ebo' +'ease_' +'dynami' +'dscha' +'dry_' +'druck' +'drew_' +'downtown_' +'doubts_' +'donor_' +'domain_' +'disp' +'disorder_' +'disk' +'discharge_' +'disastrous_' +'direct' +'diplomats_' +'dine' +'dienst_' +'dian' +'diam' +'diagnose' +'detaillierte' +'desirable_' +'desi' +'deri' +'depart' +'deny_' +'denselben_' +'denied_' +'demonstrates_' +'demnächst_' +'deli' +'delegate' +'dein' +'dei_' +'degradation_' +'defin' +'deemed_' +'decrease_' +'deco' +'declining_' +'debts_' +'debating_' +'daz' +'dauert_' +'dauerhafte_' +'dasselbe_' +'dangers_' +'dance_' +'damaged_' +'cyclical_' +'cuti' +'ctions_' +'crowd' +'crack' +'countryside_' +'cos' +'cook' +'conte' +'constructed_' +'connect' +'confusion_' +'confirmation_' +'configuration_' +'confident_' +'condemned_' +'comprises_' +'compliance_' +'complement' +'commande' +'coloni' +'collaborat' +'coincide' +'codi' +'coc' +'clip' +'clarify_' +'cks_' +'cio_' +'cio' +'cian' +'cia' +'choosing_' +'chet_' +'cheaper_' +'char' +'centrally_' +'cells_' +'celebrate_' +'ced_' +'catalog' +'capitalist_' +'cancelled_' +'campaigns_' +'by' +'buying_' +'brutale' +'brutal_' +'bru' +'bro' +'breit' +'breakdown_' +'brain_' +'bra' +'boy' +'bou' +'bold_' +'bod' +'boasts_' +'blo' +'blic' +'bles_' +'blank' +'bitter' +'bisherigen_' +'bir' +'biodiversity_' +'bill' +'bic' +'bewegt_' +'betreu' +'betre' +'besuchte_' +'beste' +'beschä' +'beschreibt_' +'beri' +'berechtig' +'berechnet_' +'bemerkenswert_' +'beliebte' +'beizutragen_' +'begrüßt_' +'begab_' +'beeinträchtigt_' +'bedürf' +'bedingten_' +'bedienen_' +'bedauer' +'beda' +'bec' +'bearing_' +'beantragen_' +'battery_' +'bankers_' +'bahn_' +'bab' +'aß' +'az_' +'awarded_' +'aute' +'ausste' +'ausgestattete' +'ausgesch' +'ausgerichtet_' +'ausgelöst_' +'aufweist_' +'aufs' +'aufn' +'aufh' +'aufgerufen_' +'aufgeh' +'aufbauen_' +'audience_' +'attraction_' +'attend_' +'atten' +'attempting_' +'ator' +'ath_' +'atas_' +'assurance_' +'assuming_' +'assumed_' +'assist_' +'assigned_' +'asiatische_' +'ars_' +'arrogant_' +'arro' +'arin' +'argumentiert_' +'approve_' +'approaches_' +'appreciated_' +'appr' +'appe' +'anzeigen_' +'anwe' +'anste' +'anscheinend_' +'annimmt_' +'anne' +'anlage_' +'anhand_' +'angefangen_' +'angef' +'ane_' +'ane' +'amm' +'amend_' +'ama_' +'allocated_' +'allgemeinem_' +'ality_' +'alist' +'alismus_' +'aktivieren_' +'airports_' +'ahl' +'aggregate_' +'ager' +'agents_' +'age' +'ado' +'adequately_' +'aden_' +'addresses_' +'addict' +'activists_' +'achievements_' +'achievement_' +'accus' +'accelerate_' +'abzielt_' +'abt' +'abstimmen_' +'absolut_' +'abrupt' +'abr' +'abo' +'abhängen_' +'abgew' +'abgestimmt_' +'abgest' +'abf' +'abd' +'abandoned_' +'aa' +'Zuständigkeiten_' +'Zugeständnisse_' +'Zugangs' +'Zoll' +'Zeitalter_' +'Zeichen' +'Yugoslavia_' +'Yan' +'Word_' +'Wo_' +'Westens_' +'Wes' +'Weltk' +'Weil_' +'Websites_' +'Watch_' +'Wart' +'Waren' +'Warcraft_' +'Vorz' +'Vorsitzende_' +'Volksgesundheit_' +'Voi' +'Vista_' +'Veröffentlichung_' +'Versionen_' +'Verschl' +'Versammlung_' +'Verkehrsmittel_' +'Verfügbarkeit_' +'Verfahrens' +'Verbündeten_' +'Vac' +'Until_' +'Unterzeichnung_' +'Unterstütz' +'Unterhaltungs' +'Ungarn_' +'Unf' +'Umweltfragen_' +'Umf' +'UT' +'UNG_' +'UB' +'Tö' +'Turn' +'Turk' +'Turb' +'Tschetschenien_' +'Ts' +'Trends_' +'Tourism_' +'Tour' +'Torre' +'Todesfälle_' +'Thomas_' +'Theorie_' +'Tempo_' +'TR' +'TB_' +'Syrian_' +'Swi' +'Swa' +'Störungen_' +'Studium_' +'Struktur' +'Strei' +'Stream' +'Storage_' +'Starfleet_' +'Standort_' +'Spur' +'Spieler' +'Speisen_' +'Sowohl_' +'Southern_' +'Sonne_' +'Sommer' +'Sollten_' +'Smith_' +'Singapore_' +'Sicherheitsa' +'Shin' +'Serbi' +'Select_' +'Sekunde_' +'Scott_' +'Sco' +'Schüler_' +'Schwarz' +'Schulz_' +'Schr' +'Schne' +'Schm' +'Schlafzimmer_' +'Schlaf' +'Sad' +'Sachen_' +'SPA' +'SER' +'Rü' +'Roosevelt_' +'Ronald_' +'Roh' +'Richter_' +'Rhein' +'Rettung_' +'Respekt_' +'Resi' +'Reservierung_' +'Rent' +'Renaissance_' +'Rena' +'Remo' +'Reduzierung_' +'Reaktionen_' +'Ratspräsident_' +'Railway_' +'Rahmens_' +'Rahmenbedingungen_' +'Raf' +'Rab' +'ROM_' +'RG' +'Quick' +'Question_' +'Queen_' +'Qua' +'Qaeda_' +'Pul' +'Prozesse_' +'Protocol_' +'Proteste_' +'Projekten_' +'Privatisierung_' +'Prague_' +'Pr' +'Port_' +'Por' +'Play' +'Pierre_' +'Pha' +'Pflanzen' +'Pfa' +'Peking_' +'Partnern_' +'Paradi' +'Panel_' +'Pack_' +'Pac' +'PS_' +'Over' +'Orts' +'Organ' +'Opti' +'Oper' +'Olympic_' +'Oc' +'OU' +'OT' +'OD' +'Nä' +'Nutz' +'Nor' +'Nokia_' +'Nikon_' +'Niederlanden_' +'Niederlande_' +'Nicolas_' +'Nichts' +'New' +'Never_' +'Napoleon_' +'Nachhaltigkeit_' +'NT_' +'NICHT_' +'ND' +'Mö' +'Muster_' +'Mugabe_' +'Mozilla_' +'Motto_' +'Motor' +'Motiv' +'Moral' +'Monte' +'Mond' +'Modus_' +'Mode_' +'Mitt' +'Milliarde_' +'Mili' +'Michel' +'Metro' +'Merk' +'Ment' +'Mengen_' +'Medic' +'Media' +'Maßstab_' +'Marx_' +'Mars' +'Marokko_' +'Marine_' +'Marc' +'Luxembourg_' +'Love' +'Lou' +'Log' +'List_' +'Liebe_' +'Library_' +'Less' +'Lese' +'Leicht' +'Lehre_' +'Late' +'Lange' +'Lama_' +'Lam' +'LA_' +'Ky' +'Kreuz' +'Kreditkarte_' +'Krediten_' +'Krebs' +'Kranken' +'Kosten' +'Konzept' +'Konvergenz_' +'Kontakt' +'Kongo_' +'Konflikten_' +'Kommissionspräsident_' +'Kommissions' +'Kol' +'Kohlen' +'Koalition_' +'Klar' +'Kei' +'Katastrophen_' +'Katalog_' +'Karls' +'Kamera_' +'Kalten_' +'Jewish_' +'Jesus_' +'Jere' +'Jen' +'Jan_' +'Jacques_' +'Jac' +'Isa' +'Irr' +'Ironi' +'Intervention_' +'Internetzugang_' +'Intergovernmental_' +'Intel_' +'Installer_' +'Innerhalb_' +'Inha' +'Inflationsrate_' +'Immo' +'Images_' +'Ima' +'Identifi' +'Ibiza_' +'IE' +'IC_' +'Hü' +'Hy' +'Hungary_' +'Hum' +'Hub' +'Hot' +'Host' +'Hon' +'Hollande_' +'Hilfen_' +'Hi' +'Herb' +'Heizung_' +'Hea' +'Hauptver' +'Hass' +'Hardware_' +'Harbour_' +'Handlungs' +'HT' +'HS' +'Gö' +'Gäste' +'Gun' +'Griff_' +'Graf' +'Gläubiger_' +'Gläubige' +'Glo' +'Gewiss' +'Geschäft_' +'Geräte_' +'Gepäckraum_' +'Georgien_' +'Geo' +'Gender_' +'Gegenstände_' +'Gefolge_' +'Garantien_' +'Gan' +'Galileo_' +'Freund_' +'Freund' +'Freiheits' +'Frauen' +'Franzosen_' +'Francisco_' +'Forscher_' +'Flugzeug' +'Flor' +'Flo' +'Fleisch_' +'Five_' +'File_' +'Fett' +'Festplatte_' +'Fern' +'Feed' +'Fas' +'Farbe_' +'Fang' +'Fahrrad' +'Extra' +'Exporte_' +'Explorer_' +'Ever' +'Euros_' +'Erz' +'Erst_' +'Erst' +'Erreichung_' +'Erinnerung_' +'Ergebnissen_' +'Englisch_' +'Energiequellen_' +'Energieeffizienz_' +'Enc' +'Emi' +'Elevator_' +'Einsatz' +'Eines_' +'Effekte_' +'Eco' +'EU' +'ECH' +'EB' +'Dubai_' +'Dro' +'Drittländern_' +'Dos' +'Domin' +'Display_' +'Director_' +'Dir' +'Dingen_' +'Dilemma_' +'Diesel' +'Diagnose' +'Desktop_' +'Den' +'Dem' +'Defizit' +'Dau' +'Darstellung_' +'Danish_' +'DAT' +'Custom' +'Crespo_' +'Create_' +'Count' +'Corporation_' +'Consider_' +'Computers' +'Computer' +'Clearly_' +'Cle' +'Christmas_' +'Christine_' +'Chin' +'Child_' +'Cher' +'Chen' +'Chaos_' +'Change_' +'Chancengleichheit_' +'Cent' +'Celsius_' +'Cardassian' +'Cannes_' +'Cala_' +'Cai' +'CU' +'CR' +'COM_' +'CC_' +'CB' +'Bü' +'Brust' +'Brief_' +'Brennstoffe_' +'Brennstoff' +'Branche_' +'Brad' +'Botschaften_' +'Borg_' +'Boo' +'Blue' +'Bilanz_' +'Bil' +'Bier' +'Bezahlung_' +'Betreiber' +'Bestimm' +'Beste' +'Besorgnis_' +'Beschäftigten_' +'Beschr' +'Bern' +'Bericht' +'Bereits_' +'Beobachtung' +'Behörde_' +'Bef' +'Balkon_' +'Bai' +'Bag' +'BM' +'Ayatollah_' +'Ay' +'Autor_' +'Austr' +'Auslands' +'Ausf' +'Ausbruch_' +'Ausbeutung_' +'Aufklärung_' +'Aufhebung_' +'Aufenthalts' +'Att' +'Atomwaffen_' +'Ash' +'Arzneimittel' +'Arti' +'Area_' +'Architekt' +'Arbeitsplätzen_' +'Arbeitsmarkt' +'Ara' +'Apo' +'Anspr' +'Ansehen_' +'Anschläge_' +'Anschl' +'Annan_' +'Ank' +'Anfragen_' +'Andre' +'Anbindung_' +'Among_' +'Ameri' +'Alpen_' +'Alp' +'Allow_' +'Allah_' +'Albert_' +'Albani' +'Aktions' +'Adria' +'Abu_' +'Abteilung_' +'About_' +'Abk' +'Abb' +'AVI_' +'AT_' +'AM_' +'AF' +'ACP_' +'?”_' +'== _' +'="_' +': '_' +'99_' +'93_' +'77_' +'76_' +'68' +'65' +'64' +'58_' +'53_' +'51' +'47' +'350_' +'1st_' +'1956_' +'1930_' +'150' +'.&#_' +'. )' +'. "_' +'--' +'- (_' +', '_' +'); _' +'( _' +'% ' +'!!_' +' – ' +' »_' +' «_' +' - ' +' * _' +' %._' +' %' +'€' +'ә' +'ғ' +'ір' +'э' +'ты' +'ки' +'жа' +'ен' +'ге' +'г' +'ал' +'а_' +'α_' +'α' +'ğ' +'č' +'ültig' +'ückte' +'ücher_' +'überz' +'überwachen_' +'überst' +'übermäßige' +'überge' +'überd' +'ø' +'öße' +'ötig' +'ösen_' +'ökonomische_' +'öh' +'öf' +'ín' +'éta' +'ér' +'ça_' +'å_' +'å' +'äußerster_' +'ärkte' +'är_' +'änglich' +'änger' +'änd' +'ällig' +'äft' +'ächtig' +'án' +'ßig' +'ße_' +'Äußerungen_' +'Änderungsanträgen_' +'®' +'« _' +'«' +'}} ' +'zügig' +'züge' +'zz' +'zusammenbr' +'zurzeit_' +'zum' +'zukünftige_' +'zuh' +'zogen_' +'zl' +'zier' +'zia' +'zh' +'zerstör' +'zers' +'zeichne' +'zei' +'zehn' +'zan' +'ypt' +'yl_' +'yacht' +'xp' +'wünscht_' +'wünschenswert_' +'wäch' +'wusste' +'wur' +'worte' +'word' +'wohin_' +'woch' +'wn_' +'wn' +'with' +'wit' +'wisdom_' +'wirk' +'wines_' +'wettbewerbs' +'wett' +'wes' +'wertung_' +'wellness_' +'wellbeing_' +'weißen_' +'weiterzu' +'weitem_' +'watching_' +'watch' +'wasn_' +'warf' +'wahre_' +'wahl' +'wachsende' +'vorschl' +'vornehmen_' +'vorliegt_' +'vorgeschrieben' +'voranzutreiben_' +'voneinander_' +'volunteers_' +'vollz' +'vol' +'virtuelle' +'vin_' +'vielf' +'vie' +'vi_' +'veto_' +'veränderte' +'verweisen_' +'vertrete' +'verständlich_' +'versi' +'verschwinden_' +'verschle' +'verschieden_' +'verschieben_' +'verschie' +'vermieden_' +'vermi' +'verlängern_' +'verlust' +'verletz' +'verlager' +'verkehrs' +'verha' +'verglichen_' +'vergleich' +'verfassungs' +'verdienen_' +'verdi' +'verbindlich_' +'verbesserten_' +'verantwort' +'veran' +'variations_' +'uve' +'usse' +'usel' +'usch' +'urm' +'url' +'uring_' +'urgency_' +'ups_' +'upcoming_' +'uous_' +'unzählige_' +'unweit_' +'unwahrscheinlich_' +'untersucht_' +'unterm' +'unpro' +'unmittelbaren_' +'unmi' +'unknown_' +'univers' +'unhe' +'ungssystem' +'ungsre' +'ungan_' +'unft' +'unfair_' +'unen' +'uneingeschränkt_' +'understands_' +'underscore' +'unbr' +'unanimously_' +'ume' +'ulen' +'ule' +'uer_' +'ud_' +'tür' +'té' +'tungen_' +'tum' +'tub' +'ttel' +'tron' +'tremendous_' +'treatments_' +'traten_' +'transmission_' +'transit' +'transformed_' +'transc' +'transatlantischen_' +'transactions_' +'transaction_' +'trains_' +'trad' +'tou' +'total' +'tory_' +'tolle' +'todo' +'tod' +'tive' +'tischer_' +'tire' +'tir' +'tieren_' +'tiefe_' +'tib' +'throw' +'thor' +'tho' +'tge' +'teuer' +'terminal_' +'teria' +'tens' +'tende' +'tema' +'technische' +'techni' +'tc' +'tasty_' +'tar_' +'tankers_' +'talent_' +'tak' +'tai' +'tage_' +'tage' +'systematically_' +'symbolic_' +'sym' +'sy_' +'suspension_' +'surpluses_' +'surg' +'supplement' +'suff' +'sudah_' +'subway_' +'substan' +'stück_' +'stö' +'stuff_' +'stuf' +'studie' +'struktur_' +'stro' +'strikte' +'strikes_' +'stretch' +'streng_' +'straight_' +'straf' +'stoffe_' +'stle' +'stieg_' +'stick_' +'stet_' +'stes_' +'steam_' +'steadily_' +'stea' +'statt' +'statisti' +'stamm' +'stakes_' +'stabile_' +'ssen' +'sprachliche' +'sprachen_' +'spell' +'speed' +'speeches_' +'spectacular_' +'specify_' +'sound' +'soul_' +'sorgfältige' +'sorgfältig_' +'sophisticated_' +'solved_' +'sogenannten_' +'sofa_' +'smi' +'smart_' +'slowly_' +'sive_' +'sinnvolle' +'simpli' +'sim' +'sili' +'signing_' +'signature_' +'sierungs' +'sierte' +'sieg' +'sie' +'sic' +'shut_' +'shortcomings_' +'ship' +'shifts_' +'shee' +'sge' +'sexuelle_' +'settled_' +'sema' +'secondly_' +'seba' +'scrutin' +'screening_' +'scrap' +'schwächere' +'schwäch' +'schwedischen_' +'scholar' +'schließt_' +'sches_' +'schauen_' +'sca' +'saubere' +'satisfied_' +'sak' +'safeguard_' +'rückg' +'rö' +'räge_' +'rui' +'ruh' +'ruch_' +'rter' +'roo' +'romantic_' +'roman' +'roa' +'rnähr' +'rna' +'rm_' +'rka' +'ritten_' +'risiko_' +'rige_' +'richtet_' +'ria_' +'rhe' +'reward_' +'revive' +'revers' +'reveal_' +'returning_' +'retro' +'retr' +'restoration_' +'ress' +'responded_' +'residents_' +'reside' +'reproduc' +'repr' +'repli' +'repa' +'renov' +'renminbi_' +'removing_' +'remind' +'remark' +'relocat' +'reinforce_' +'reiche_' +'register_' +'regierung_' +'regelmäßig_' +'refusal_' +'redistribution_' +'recon' +'recht' +'rechnen_' +'receives_' +'reben_' +'realistische' +'realisier' +'realised_' +'reader_' +'rator' +'ratifiziert_' +'ratification_' +'rapide_' +'ranks_' +'rank' +'rall' +'rahmen' +'ract' +'quoten_' +'quisit' +'quir' +'quie' +'ques_' +'quanti' +'qualifizierte_' +'qualifications_' +'pushed_' +'pup' +'präsentier' +'prozesses_' +'prove' +'protektionistische' +'prosperous_' +'proof_' +'prol' +'projekt_' +'programmen_' +'professionelle' +'prob' +'prize_' +'privilege_' +'printing_' +'preventive_' +'prevail' +'prestigious_' +'preserved_' +'presentation_' +'prescri' +'premature_' +'pragmatic_' +'potenziellen_' +'poss' +'poses_' +'pollut' +'pole' +'polar_' +'po_' +'plural' +'pill' +'pier' +'philosophy_' +'phas' +'pflege' +'pfl' +'pfel' +'pfe_' +'petit' +'pes_' +'persönliche' +'persuade_' +'persone' +'persist' +'perpet' +'permi' +'periode_' +'pere' +'perat' +'pensions_' +'penda' +'pemba' +'pel' +'peacekeeping_' +'patient_' +'passes_' +'partitions_' +'parlamentarische_' +'parity_' +'paren' +'papers_' +'panel_' +'pana' +'painting_' +'own' +'owe_' +'overw' +'oversight_' +'overe' +'outs_' +'outs' +'outl' +'oti' +'ote_' +'oste' +'osen_' +'osc' +'ori' +'operates_' +'opens_' +'openly_' +'opa' +'onym' +'onat' +'onal_' +'ome_' +'ologie_' +'ologi' +'oil' +'ofi' +'offensichtliche' +'odie' +'ock_' +'ochen_' +'och_' +'occupation' +'oca' +'obst' +'observation_' +'obli' +'ober' +'nzi' +'nutrition_' +'ntw' +'nsu' +'nous_' +'nost' +'nos' +'nomin' +'noi' +'nnt_' +'nner' +'nne_' +'nlage' +'nko' +'niu' +'nity_' +'nio' +'nik_' +'nightlife_' +'nien_' +'niedrigere' +'niedrige_' +'ngst' +'ngn' +'nglich_' +'ngel' +'neuro' +'neuesten_' +'nett' +'ness' +'nesian' +'ners_' +'neglig' +'neglect_' +'negativ_' +'necessity_' +'ndete' +'nden' +'naval_' +'namens_' +'nah_' +'nada_' +'männ' +'mutige' +'musik_' +'multilateralen_' +'mpin' +'mpf' +'mp_' +'movies_' +'mov' +'mounted_' +'mortgage_' +'monument' +'moment' +'modules_' +'moderni' +'modern' +'mixture_' +'mitge' +'missi' +'mism' +'misc' +'mir' +'minist' +'mining_' +'minimi' +'mina' +'militärisch' +'militant' +'meta' +'mengu' +'mengak' +'menga' +'membu' +'med' +'mechanismen_' +'measurement_' +'matche' +'master' +'massiv_' +'massage_' +'marine_' +'marginal_' +'mapp' +'mali' +'makroökonomische' +'mak' +'mai' +'mah' +'magne' +'magazine_' +'ländliche_' +'lve' +'lv' +'lungs' +'lue' +'luc' +'lua' +'lu_' +'loses_' +'logo_' +'loc' +'lobb' +'lize' +'liza' +'litik' +'liti' +'literar' +'listening_' +'liste' +'list' +'limiting_' +'liebe_' +'licherweise_' +'license_' +'lib' +'lf' +'leute_' +'letting_' +'lending_' +'lem_' +'leitung_' +'leitete_' +'leitet_' +'leis' +'legislat' +'leere' +'lect' +'laute' +'lati' +'latein' +'lager_' +'künstlich' +'käm' +'kta' +'kritisch' +'kris' +'kre' +'kopieren_' +'kontrollierte' +'kontrolle_' +'konkret_' +'konflikt' +'komplette_' +'kommunistische_' +'kolle' +'kenne_' +'kebijakan_' +'karten_' +'kampf' +'kam' +'kali' +'kale' +'kai' +'jüngere' +'jährliche_' +'judgment_' +'judge_' +'judge' +'jegliche' +'jed' +'jas' +'itze' +'itution_' +'ities_' +'istisch' +'ister' +'issen_' +'irt' +'irgendwann_' +'iranischen_' +'ira_' +'ip_' +'iot' +'io' +'invade' +'interpreti' +'interprete' +'interim_' +'interessanten_' +'intellektuelle' +'integrierten_' +'integrieren_' +'integral' +'institutionelle_' +'installer_' +'insch' +'inner_' +'inj' +'inie' +'inglich' +'ingen' +'infringement_' +'informati' +'influential_' +'ineffective_' +'industrielle_' +'induce' +'incredibly_' +'inan' +'imposing_' +'immer' +'ily_' +'illo' +'ildung_' +'ihrerseits_' +'ignoriert_' +'ignorieren_' +'ift' +'iete' +'iere_' +'iere' +'ielen_' +'iehen_' +'iegel' +'idor' +'identical_' +'idealer_' +'icul' +'ichte' +'ices_' +'ican' +'ias_' +'hü' +'hôtel_' +'hunting_' +'hunderte' +'hrung_' +'hre' +'hosted_' +'hospitality_' +'hone' +'holen_' +'hol_' +'hochwertige_' +'hne_' +'hme' +'hm_' +'hle' +'historisch_' +'his' +'hinzugefügt_' +'hint' +'hil' +'hike' +'hierzu_' +'hic' +'heutzutage_' +'hersteller_' +'heri' +'here' +'herausragende' +'height_' +'hearts_' +'health' +'heading_' +'hbo' +'haushalt' +'has' +'harg' +'hard' +'handled_' +'handed_' +'ham' +'hall' +'halb' +'hafte_' +'had' +'habit' +'gutem_' +'gten_' +'greift_' +'grasp_' +'good' +'gni' +'gn_' +'globale' +'gleicher_' +'git' +'gewünschten_' +'gespe' +'geru' +'gericht_' +'gerei' +'gerechter' +'gerechten_' +'geprägt_' +'gepflegt' +'geographical_' +'gent' +'gena' +'gemäßigte' +'gelo' +'geklärt_' +'geist' +'gehandelt_' +'gehabt_' +'gegens' +'geblieben_' +'gebeten_' +'geber' +'gation_' +'ganis' +'gangen_' +'fäll' +'fä' +'fut' +'fus' +'fung_' +'friedliche_' +'freundlicher_' +'freu' +'freez' +'fossile' +'formula_' +'formats_' +'formally_' +'forever_' +'fores' +'foods_' +'fond' +'fließen_' +'fl' +'finnische' +'findings_' +'fig' +'fiel_' +'fi_' +'ffs' +'ffen_' +'fet' +'fern' +'fer_' +'fel_' +'feind' +'fehler' +'federa' +'favourite_' +'favour' +'fassen_' +'fascinating_' +'fantas' +'fals' +'fairen_' +'faire_' +'fahrzeuge_' +'facility_' +'eßen_' +'extremism_' +'externen_' +'externe_' +'express' +'explicit_' +'expertise_' +'experimental_' +'exhibition_' +'executi' +'executed_' +'excluded_' +'exclude' +'ewe' +'evol' +'evo' +'eventuelle' +'eve' +'etzt_' +'eter_' +'ete' +'este_' +'esan' +'erweiterten_' +'erwe' +'ervi' +'ertrag' +'erstklassige' +'erson' +'erschienen' +'errichtete' +'errichten_' +'erra' +'erp' +'ernste_' +'erneuer' +'ermutigen_' +'erla' +'erka' +'erin' +'erheben_' +'ergibt_' +'erfü' +'erarbeiten_' +'entscheide' +'entl' +'ensu' +'enm' +'eng' +'ened_' +'enco' +'emphasis' +'emm' +'els' +'ellt_' +'elit' +'eliminated_' +'elf_' +'einse' +'einsch' +'eins' +'einmalige_' +'einig' +'eingereicht_' +'eingeg' +'eingeb' +'einf' +'einbr' +'einbezogen_' +'eigentlichen_' +'eigentliche_' +'eich' +'egel' +'effektiver_' +'ee' +'ecological_' +'echn' +'eche' +'ebu' +'eat' +'earn' +'durchd' +'dunkle' +'duc' +'drittens_' +'dre' +'draw' +'drag_' +'dos' +'donat' +'dominate_' +'dlich' +'dle_' +'disturb' +'distribut' +'distr' +'diss' +'disrupti' +'disi' +'discretion_' +'disappear' +'dip' +'dina' +'digitalen_' +'digitale_' +'dig_' +'differently_' +'dier' +'dich_' +'dge_' +'dge' +'devastating_' +'dete' +'destabili' +'desp' +'design' +'dero' +'derjenigen_' +'derive_' +'dera' +'deposit_' +'deport' +'deployment_' +'deployed_' +'denjenigen_' +'demonstrators_' +'demographic_' +'delle_' +'delicate_' +'defending_' +'defect' +'defeat' +'declined_' +'declare_' +'declar' +'decken_' +'dad_' +'dac' +'cus' +'cultures_' +'cts_' +'crystal' +'cry_' +'courage_' +'coordinat' +'convey' +'convert' +'conv' +'controller_' +'contaminated_' +'consistently_' +'cong' +'conf_' +'comput' +'compulsory_' +'complaints_' +'competent_' +'competences_' +'compatibility_' +'command' +'column' +'colo' +'collected_' +'clothing_' +'clim' +'clicking_' +'clarification_' +'citizenship_' +'cil' +'cigarette' +'cien' +'cial_' +'chung_' +'chte_' +'chs_' +'chronische' +'chro' +'christliche' +'chlich' +'chlag' +'chir' +'chic' +'chemische_' +'chee' +'chart' +'chairman_' +'chafts' +'ces' +'cent_' +'cen' +'cement_' +'cea' +'carrier_' +'cap' +'camp' +'cali' +'burned_' +'bung_' +'buch' +'bt' +'brother_' +'broker' +'breiter_' +'breast_' +'bran' +'brachten_' +'boy_' +'boom' +'booked_' +'blow_' +'blis' +'blick_' +'bless' +'blame_' +'bili' +'bike_' +'bien_' +'bewährte' +'beweist_' +'bewaffneten_' +'bevorzugte' +'betragen_' +'betrachte_' +'bet_' +'beständig' +'bestre' +'beschrieben_' +'beschleunigt' +'besagt_' +'berl' +'berichtet_' +'bereitgestellt_' +'bereich' +'berechtigt_' +'beneficial_' +'benefi' +'benannt' +'benachrichtigt_' +'bemerkt_' +'beliebtes' +'bekämpfung_' +'behörden_' +'befürwortet_' +'befreien_' +'befehl' +'befasst_' +'beeinflusst_' +'bedingungen_' +'bede' +'bedanken_' +'beck_' +'beberapa_' +'bbi' +'baut_' +'bathing_' +'bath' +'basically_' +'banyak_' +'bankr' +'bahan_' +'azi' +'aya_' +'automatic_' +'auto_' +'authorise' +'auszusch' +'auswärtige_' +'ausschl' +'ausreichende_' +'ausreichen_' +'ausgi' +'ausgel' +'aufweisen_' +'aufste' +'aufrecht_' +'aufges' +'auffordern_' +'auber' +'attribute' +'attending_' +'attacking_' +'attached_' +'attach' +'ats_' +'atis' +'atic_' +'aster' +'aste' +'assessments_' +'arts_' +'armo' +'ark' +'aries_' +'ards_' +'archa' +'arbeiter_' +'approval_' +'approaching_' +'appointment_' +'ape' +'anzus' +'anzug' +'anxiety_' +'anu' +'anticipated_' +'anschließend_' +'anschl' +'ansa' +'anr' +'annually_' +'annten_' +'anle' +'anhalten' +'angka' +'anger' +'angenehme' +'anfangen_' +'ando' +'andern' +'ande' +'ances_' +'analyze' +'analyst' +'amtierende' +'ame' +'ambitions_' +'amazing_' +'albeit_' +'alarm' +'ala_' +'akzept' +'aktionen_' +'airlines_' +'ahren_' +'ahr' +'ahl_' +'agte' +'aging_' +'affi' +'advocate_' +'advice_' +'adidas_' +'adhere' +'adapti' +'adapted_' +'adapt' +'acy_' +'actively_' +'act' +'acr' +'acquired_' +'acknowledg' +'achts' +'accura' +'accountable_' +'accountability_' +'accomplished_' +'accidents_' +'accident_' +'academic_' +'ac' +'abzulehnen_' +'absolute' +'ablehnen_' +'abh' +'abges' +'abe' +'abba' +'abb' +']] | _' +'Zwischenzeit_' +'Zwei_' +'Zustellbetten_' +'Zusammenfassung_' +'Zusagen_' +'Zugriff' +'Zuge' +'Zimmerbeschreibung_' +'Zertifikat' +'Zeitungen_' +'Zeitplan_' +'Zauber' +'Zahlreiche_' +'Young_' +'Xi' +'XML_' +'Wur' +'Wu' +'Wohnungs' +'Wissenschaftlern_' +'Wirtschaftss' +'Wirtschaftsr' +'Wirtschaftsm' +'Wirtschaftskrise_' +'Wirtschaftsa' +'Wirtschaftlich' +'Winter' +'Wilhelm_' +'Wiederg' +'Wichtig_' +'Who' +'Werte' +'Weltraum' +'Wellness' +'Well_' +'Weiteren_' +'Wechselkurs' +'Wachstumspakt' +'WO' +'WM' +'Voyager_' +'Votum_' +'Vorsorge' +'Vorgänger' +'Vordergrund_' +'Vorbereitungen_' +'Visionen_' +'Vin' +'Village_' +'Vil' +'Viertens_' +'Vid' +'Verwe' +'Vertretung_' +'Versorgungs' +'Versicherungen_' +'Versi' +'Verr' +'Verordnungen_' +'Vern' +'Veri' +'Verheugen_' +'Vergnügen_' +'Vereinigung_' +'Verbrauch_' +'Verantwortungs' +'Ven' +'VP_' +'Ut' +'Untersuchungs' +'Unterricht' +'Unterdrückung_' +'Unsere' +'Unruhen_' +'Unr' +'Unm' +'Unlike_' +'Ungl' +'Una' +'Umstrukturierung_' +'UNESCO_' +'Trä' +'Tower_' +'Touristen_' +'Tourismus' +'Tos' +'Tools_' +'Tom' +'Together_' +'Tisch_' +'Tickets_' +'Throughout_' +'Through_' +'Thro' +'Thi' +'Theater_' +'Terror_' +'Tempora' +'Tel_' +'Tech_' +'Tea' +'Taxi' +'Taten_' +'Tap' +'Tao' +'Tak' +'Tag' +'Tabelle_' +'TU' +'TC' +'Sü' +'Szen' +'Swoboda_' +'Sun_' +'Suche' +'Subject_' +'Stü' +'Sturm_' +'Stri' +'Streben_' +'Stiftung_' +'Steuerung_' +'Sternen' +'Staatsverschuldung_' +'Staatsschulden_' +'Staatss' +'Spiels' +'Spezie' +'Speci' +'Spani' +'Spaltung_' +'Sp' +'Sometimes_' +'Solid' +'Solche_' +'Solarium_' +'Solar' +'Soci' +'Sm' +'Slow' +'Six_' +'Sisko_' +'Single_' +'Singapur_' +'Similar_' +'Signale_' +'Shops_' +'Server' +'Series_' +'Sende' +'Senate_' +'Senat' +'Semi' +'Scien' +'Schöne' +'Schweizer_' +'Schre' +'Schlu' +'Scha' +'Say' +'Saturday_' +'SanDisk_' +'Sam' +'Salzburg_' +'ST_' +'STO' +'STE' +'ST' +'Rüstungs' +'Rule_' +'Rohstoff' +'Rock' +'Robert' +'River' +'Rica_' +'Republikaner_' +'Repo' +'Rental_' +'Renn' +'Rem' +'Rekord' +'Registrierung_' +'Regions_' +'Reduc' +'Rede' +'Recent_' +'Rea' +'RS_' +'RI_' +'RC_' +'Quo' +'Quest' +'QE_' +'Putins_' +'Publik' +'Prävention_' +'Program_' +'Political_' +'Polit' +'Poi' +'Plu' +'Platz' +'Picard_' +'Photo' +'Philip' +'Phasen_' +'Pflege_' +'Petr' +'Pet' +'Pes' +'Personen' +'Perl' +'Pent' +'Peer_' +'Patt' +'Patent' +'Patch' +'Parlament' +'Parks_' +'Parag' +'Palästina_' +'Palacio_' +'Paar_' +'PXI_' +'PM' +'PH' +'PDF_' +'Ozean' +'Ot' +'Oslo_' +'Original_' +'Ordner_' +'Ora' +'Operation_' +'Online' +'Ombudsman_' +'Olympus_' +'Og' +'Offizier' +'Obamas_' +'ODER_' +'Nue' +'Nova' +'Normal' +'Nob' +'Nik' +'Niger' +'Nichts_' +'Neue_' +'Netzwerke' +'Neigung' +'Nehmen_' +'Nan' +'Nahrungsmittel_' +'Nachrichten' +'Nachfolger_' +'Nachf' +'NP' +'NH' +'NF' +'Mü' +'Möglich' +'Männern_' +'Mund' +'Multi_' +'Mountain' +'Motion_' +'Mord_' +'Monats_' +'Moldova_' +'Moham' +'Modul_' +'Mode' +'Mod' +'Mittag' +'Mitgliedsländer_' +'Mitentscheidung' +'Michel_' +'Messen' +'Messe_' +'Mes' +'Mem' +'Meinungsumfragen_' +'Medi' +'Mea' +'McC' +'Mazedonien_' +'Maus' +'Materialien_' +'Marco_' +'Map' +'Manu' +'Manche_' +'Male' +'Make_' +'Maha' +'MH' +'ME_' +'MED' +'Lösungs' +'Länge_' +'Län' +'Luxus_' +'Lula_' +'Lui' +'Luftverschmutzung_' +'Luftraum' +'Low_' +'Louis_' +'Look_' +'Logo_' +'Loc' +'Lobby_' +'Lis' +'Linke_' +'Lif' +'Leuten_' +'Lesung_' +'Leone_' +'Lektion_' +'Leiden' +'Leid_' +'Leader' +'Lauf' +'Last' +'Lands' +'Lah' +'LT' +'LD' +'Körper' +'König' +'Kurd' +'Kreis' +'Kre' +'Kontinents_' +'Kontin' +'Konsumenten_' +'Konst' +'Konflikts_' +'Konferenz' +'Kom' +'Kolonial' +'Koll' +'Kohle' +'Know_' +'Know' +'Klin' +'Keynesian_' +'Kel' +'Kein_' +'Kau' +'Kath' +'Kapitalismus_' +'Kanal' +'Kam' +'Kaffee' +'Kabel_' +'KON' +'Juli' +'Jugendlichen_' +'Jud' +'Jews_' +'Jakob' +'Ist' +'Islamist_' +'Ir' +'Investment_' +'Intera' +'Instanz_' +'Instan' +'Installations' +'Inhalte_' +'Inflations' +'Industriep' +'Impl' +'Impf' +'Immunität_' +'Imagin' +'Illusion' +'Il' +'IV_' +'ISIS_' +'IN_' +'ICEcat_' +'IA_' +'Höhen' +'Häufig' +'Häfen_' +'Hyatt_' +'Hop' +'Hollywood_' +'Holland_' +'Hohen_' +'Hisbollah_' +'Hinweise_' +'Hinter' +'Hindernis_' +'Hil' +'High' +'Herz_' +'Herrsch' +'Herbst_' +'Her_' +'Hen' +'Heinrich_' +'Hat_' +'Hard' +'Hannover_' +'HI_' +'Guinea_' +'Guatemala_' +'Grupp' +'Greens_' +'Greeks_' +'Glücklicherweise_' +'Gibt_' +'Gewinner_' +'Gewinn_' +'Gewalt' +'Gesetzes' +'Gerichten_' +'Generalsekretär_' +'Genau_' +'Gemeinw' +'Gemeinschaftsp' +'Gemein' +'Geiste' +'Gegenwärtig_' +'Gegenwart_' +'Gee' +'Gaz' +'Gaulle_' +'Ganze_' +'Gall' +'G20_' +'Förder' +'Föderation_' +'Fußball' +'Funktionsweise_' +'Fundamental_' +'Frie' +'Freundschaft_' +'Freitag_' +'Freihandelsabkommen_' +'Freihandels' +'Franklin_' +'Format' +'Folgendes_' +'Folge' +'Flug_' +'Flexibilität_' +'Fis' +'Find' +'Finance_' +'Fehler' +'Fee' +'Fau' +'Fass' +'Fam' +'Fakten_' +'Fahrer_' +'Face' +'FM' +'FD' +'Explosi' +'Experience_' +'Evans_' +'Eva' +'Eu' +'Ethi' +'Estonia_' +'Esc' +'Erwärmung_' +'Erste' +'Erscheinung_' +'Erl' +'Erkenntnis_' +'Erfind' +'Erdgas' +'Erbe_' +'Entschlossenheit_' +'Entr' +'Entlastung_' +'Ens' +'End_' +'Empfänger_' +'Emp' +'Elf' +'Electronic' +'Einzelnen_' +'Einw' +'Einladung_' +'Einigkeit_' +'Eigentümer' +'Ehre_' +'Ehr' +'Ecuador_' +'Ebenen_' +'EWG_' +'ENT_' +'Dé' +'Dusche_' +'Durch' +'Dur' +'Drei_' +'Dre' +'Donnerstag_' +'Dona' +'Don' +'Disabled_' +'Differenzen_' +'Differenz_' +'Diesen_' +'Diag' +'Demonstranten_' +'Demokrati' +'Deflation_' +'Defense_' +'Deck' +'Davi' +'Darau' +'DP' +'Cup_' +'Crew_' +'Countries_' +'Cooperation_' +'Converter_' +'Continental_' +'Cont' +'Congo_' +'Conce' +'Compa' +'Come' +'Colon' +'Citizens_' +'Cin' +'Christ' +'Chr' +'Chicago_' +'Chemi' +'Cer' +'Cau' +'Carolin' +'Carlo_' +'Car_' +'CEO_' +'CDs_' +'Bühne_' +'Bushs_' +'Bull' +'Bulgarian_' +'Buchung_' +'Brut' +'Browser_' +'Brit' +'Boutique_' +'Bot' +'Boh' +'Binnenmarktes_' +'Bin' +'Beweg' +'Betrug_' +'Betroffenen_' +'Besteuerung_' +'Beschwerden_' +'Besatzung_' +'Bernanke_' +'Berliner_' +'Belo' +'Bekanntlich_' +'Beit' +'Beförderung_' +'Beamte_' +'Basic_' +'Bara' +'BL' +'Azer' +'Autor' +'Australien_' +'Austausch_' +'Ausgaben' +'Ausb' +'Augenmerk_' +'Auftr' +'Aufs' +'Aufr' +'Assa' +'Asiens_' +'Asi' +'Archer_' +'Arbeitss' +'Arbeitskräften_' +'Arabischen_' +'Arabien_' +'Arabi' +'Araber_' +'Appe' +'Anstatt_' +'Anna' +'Anh' +'Angola_' +'Ander' +'Amts' +'Amm' +'American' +'Alternativen_' +'Alpe' +'Akzeptanz_' +'Aktionsplan_' +'Akte' +'Airways_' +'Addi' +'Achtung_' +'Account' +'Abz' +'Abst' +'Abs' +'Able' +'Abgesehen_' +'Abfall' +'Abenteuer_' +'Abbau_' +'AND_' +'AG' +'AB_' +'A6_' +'>' +'==' +'87_' +'83_' +'76' +'74' +'69_' +'66' +'44' +'41' +'38' +'360_' +'1988_' +'1984_' +'1980er_' +'198' +'1978_' +'1975_' +'1970er_' +'1940_' +'1920' +'178' +'002' +') ._' +'%_' +'") _' +' “ _' +' » _' +' ``_' +' = [[_' +' --> _' +'…' +'”)' +'ү' +'ын' +'ыл' +'ті' +'ти' +'са' +'рі' +'ро' +'ре' +'ос' +'ле' +'ке' +'ес' +'да_' +'бол' +'ба' +'ай' +'Б' +'ι' +'üs' +'ürd' +'ündig' +'üllung' +'ühr' +'üge' +'ückt_' +'übt_' +'üblichen_' +'überzeugende' +'überwacht_' +'überschreiten_' +'überleben_' +'überh' +'überb' +'öss' +'öse_' +'ór' +'ña' +'ê' +'éri' +'äußere' +'äuft_' +'ätzung_' +'ätte' +'ärk' +'älteste_' +'älter' +'ält' +'äisch' +'ähnlichen_' +'ächte' +'äc' +'ä_' +'â' +'ár' +'ße' +'Übersetzungs' +'Überschuss_' +'Übereinkunft_' +'° _' +'}} _' +'{' +'zza_' +'zwölf_' +'zweite' +'zusätzliche' +'zustellen_' +'zustande_' +'zurückz' +'zurecht' +'zukünftig' +'zufrieden' +'zte' +'zones_' +'zon' +'zis' +'zip_' +'zio' +'zil' +'zig_' +'zierungs' +'zielen_' +'ziel' +'zeug_' +'zersch' +'zent' +'zeitge' +'zauber' +'yo_' +'yn_' +'xy' +'wäh' +'wunderschönen_' +'writ' +'worauf_' +'wooden_' +'wm' +'witnessing_' +'witnessed_' +'withdraw_' +'wissens' +'wirtschaftspolitische' +'wirtschaftliches_' +'wirt' +'winner_' +'wind' +'width_' +'widerspiegelt_' +'wi_' +'whol' +'westlich_' +'wertvollen_' +'werten_' +'wert' +'weltweite' +'welle' +'welcomed_' +'weiß' +'weich' +'wede' +'wed_' +'wechseln_' +'weakened_' +'weak' +'wd' +'wartet_' +'warned_' +'wan_' +'wage' +'wachstums_' +'vr' +'vorschlägt_' +'vorschläge_' +'vorsch' +'vorr' +'vorherige_' +'vorgegeben' +'vorgeb' +'vorg' +'vora' +'volks' +'void' +'vity_' +'visits_' +'visitor_' +'violations_' +'violation_' +'ville_' +'viewed_' +'view' +'vierte' +'vet' +'veröffentlichten_' +'verwu' +'verwirklichen_' +'verwendeten_' +'verträge' +'vertraut_' +'vertrauen_' +'vertrag_' +'verteilen_' +'verstr' +'versammlung_' +'verp' +'vernichte' +'vermögen_' +'verlä' +'verliehen_' +'verheerende' +'vergi' +'vereint_' +'vereinfacht' +'verde' +'verbreiteten_' +'verbreite' +'verbrauche' +'verbrauch' +'verbl' +'verbindung' +'verarbeitung' +'verantwortungsvolle' +'verabschieden_' +'vention' +'veness_' +'vegetables_' +'vat' +'vary_' +'variant' +'valley_' +'uß_' +'uß' +'utzte' +'utiliz' +'utilit' +'uten_' +'usu' +'uropa_' +'uro' +'urg_' +'urce' +'uran' +'ural' +'upt' +'upgrading_' +'upgraded_' +'updates_' +'unvermeidlich_' +'unv' +'unum' +'unterliegt_' +'unsp' +'unmittelbare_' +'unm' +'unli' +'unl' +'unktion' +'unkt' +'unkonventionelle' +'unklar_' +'unk' +'universelle' +'unif' +'ungsw' +'ungsr' +'ungsprogramm' +'ungsmaßnahmen_' +'ungsk' +'ungleiche' +'unfa' +'unein' +'undertaking_' +'undertaken_' +'understandable_' +'underp' +'undermining_' +'uncon' +'unch' +'unbekannte' +'umu' +'umi' +'umfassende' +'umer' +'umbu' +'ult_' +'uldig' +'uku' +'uin' +'uh_' +'uft_' +'uen' +'uble_' +'uber' +'uat' +'uali' +'ual' +'tzten_' +'typ' +'tv' +'turm_' +'tune' +'tu_' +'tst' +'trum' +'truck_' +'trocken' +'triggered_' +'trigger_' +'tric' +'treffen' +'treat' +'trea' +'traum' +'transportier' +'transnational_' +'translated_' +'translate_' +'transform_' +'tragische' +'traditionell_' +'traditionally_' +'tract' +'tour' +'tos_' +'tm' +'tlin' +'tipp' +'tili' +'tighte' +'tight' +'thriv' +'threatening_' +'thread_' +'thought' +'thick' +'therapie' +'theoretische' +'themes_' +'thanking_' +'testen_' +'teru' +'terk' +'terie' +'tere' +'tends_' +'tenden' +'tell' +'tel' +'teilzunehmen_' +'technisch_' +'technik_' +'teach_' +'tausende_' +'taus' +'tatsächliche_' +'tation_' +'tat_' +'tant' +'tank_' +'tangible_' +'tande' +'tand_' +'tam' +'taatliche' +'szen' +'symptom' +'sympath' +'switched_' +'swee' +'suspicion_' +'surveys_' +'superb_' +'summ' +'suit_' +'suicide_' +'sui' +'subsid' +'stärkt_' +'städte' +'stunde_' +'strukturen_' +'strongest_' +'stressed_' +'strengths_' +'strecke' +'streben_' +'stischen_' +'stige' +'stete' +'stern_' +'steri' +'stereo' +'stepp' +'stens_' +'stellten_' +'stel' +'stehende_' +'state' +'starb' +'stands' +'stair' +'stagnation_' +'stabilen_' +'sst' +'ssions' +'ssion' +'squa' +'squ' +'späte' +'spre' +'spo' +'spite_' +'spezifisch' +'speziellen_' +'spezi' +'specific' +'specialize' +'spec' +'spare_' +'soziales_' +'south' +'somewhere_' +'sociali' +'sno' +'smart' +'sman' +'slu' +'slogan' +'slo' +'slightly_' +'sleeping_' +'sku' +'situationen_' +'sions_' +'sinkende' +'simulation_' +'simi' +'signat' +'sicherer_' +'shu' +'shortly_' +'shoe' +'shelter' +'shell_' +'shame' +'shake' +'sg' +'sexuellen_' +'sex_' +'sevent' +'sessions_' +'separate' +'sep' +'sentence_' +'sende' +'sel_' +'sektors_' +'sees_' +'sections_' +'secondary_' +'secara_' +'script' +'schützt_' +'schwächen_' +'schwi' +'schwarz' +'schnelles_' +'schloss_' +'schließe_' +'schlich' +'schlechte_' +'schle' +'schic' +'schem_' +'schein' +'schei' +'schalte' +'schaftliche' +'schafft_' +'scare' +'sc' +'sba' +'sat_' +'santa_' +'sang' +'sammeln_' +'samkeit' +'salt_' +'sailing_' +'sagten_' +'safely_' +'safeguard' +'saat_' +'räng' +'räger' +'räft' +'rup' +'rud' +'rtung_' +'rteil' +'rounds_' +'ros_' +'rop_' +'rogramm' +'rod' +'robot' +'robe' +'road' +'rne' +'rke' +'riskier' +'riots_' +'rim' +'right' +'rig_' +'richte' +'rian' +'rez' +'reveals_' +'retail' +'resum' +'restrict_' +'restraint_' +'resso' +'ression' +'respektieren_' +'resiste' +'resilien' +'reservations_' +'republic' +'representing_' +'renovated_' +'reno' +'remuneration_' +'remit' +'reminiscent_' +'reminded_' +'reliability_' +'relative' +'relationships_' +'relat' +'rejection_' +'reit' +'reisen_' +'reine' +'reihe' +'reifen_' +'reichlich' +'reha' +'regen' +'regelungen_' +'rega' +'reformist' +'reformi' +'reflecting_' +'refle' +'referendums_' +'recourse_' +'reconciliation_' +'reconcile' +'reckung' +'recipient' +'rechung' +'reche' +'recal' +'rebel' +'reasonably_' +'reap' +'ream' +'realistische_' +'reale_' +'readily_' +'rbeit_' +'ratione' +'rational' +'ras_' +'ras' +'rangi' +'raises_' +'radar_' +'rachte' +'rach' +'race' +'rac' +'rable_' +'quotas_' +'quip' +'quest' +'py_' +'py' +'pursuit_' +'punish' +'pun_' +'pull' +'pte' +'psychische' +'prüf' +'präsident' +'protest_' +'protects_' +'pros' +'prophe' +'properties_' +'produkte_' +'produkt' +'produk' +'proceed' +'prisons_' +'printer_' +'prevents_' +'pretty_' +'presidents_' +'preparing_' +'premium_' +'prejudice_' +'predictable_' +'prechen_' +'preach' +'power' +'poten' +'pot_' +'possession_' +'posi' +'populär' +'popularity_' +'pop' +'polo' +'poll' +'polic' +'poker_' +'pointing_' +'pocket' +'poc' +'plätze_' +'pläne_' +'pity_' +'pis' +'pipe' +'pine' +'pilot' +'pi_' +'physici' +'pharmaceutical_' +'phan' +'pha' +'ph_' +'pflanz' +'personen_' +'personali' +'permission_' +'perception_' +'perc' +'pent' +'pene' +'pemimpin_' +'peer_' +'pedia_' +'pear' +'pazi' +'patr' +'pat_' +'participated_' +'partei_' +'park' +'panoramic_' +'panisch' +'panic_' +'ows_' +'owner_' +'owed_' +'ove' +'outset_' +'outline_' +'outcomes_' +'outbreak_' +'ound' +'otr' +'otel_' +'ota' +'ossene' +'osition' +'orth' +'orm_' +'orium_' +'orientiert_' +'organisieren_' +'ordnungsgemäße' +'ordert_' +'ord_' +'orbit' +'oppress' +'opp' +'operator_' +'opera_' +'ont_' +'onisch' +'omp' +'omin' +'olt' +'ological_' +'olge' +'ole_' +'oku' +'oin' +'oid_' +'oi_' +'ohner' +'ogi' +'offset_' +'odo' +'oci' +'obtaining_' +'obi' +'oat' +'nützliche_' +'näh' +'näch' +'nza' +'nutzt_' +'nutzbar_' +'nummer_' +'nuklearen_' +'nuestr' +'ntsch' +'nting' +'ntin' +'notification_' +'nor' +'nomi' +'noisy_' +'nna_' +'nka' +'nitte' +'nisses_' +'ning' +'niederländischen_' +'ngly_' +'ngka' +'nger_' +'nge' +'newspapers_' +'newsletter_' +'neue' +'nera' +'neo_' +'nel_' +'nein' +'neighbor' +'neigen_' +'nehm' +'negotiating_' +'negotiated_' +'nego' +'neg' +'nec' +'ndige' +'nders' +'nded_' +'ndan' +'nca' +'nba' +'navi' +'nationalist_' +'nary_' +'nal_' +'nacht' +'nachfolgenden_' +'nac' +'müt' +'mündlichen_' +'müh' +'möge_' +'mäßige' +'mäßig_' +'myster' +'mysql' +'music' +'muscle' +'mung' +'multinational_' +'multinational' +'muda' +'mt' +'mpt' +'mouse_' +'mos_' +'moon_' +'moo' +'monopol' +'monitored_' +'moni' +'modula' +'modernisier' +'mmt_' +'mmen' +'mma' +'mix' +'mitzu' +'mitteln_' +'mitteilen_' +'mitte' +'mitglieder_' +'misu' +'mischen_' +'mirror' +'ming' +'mineral_' +'mind' +'mill' +'migra' +'mien' +'midst_' +'micro_' +'methoden_' +'mesis' +'ment' +'mening' +'menge' +'memba' +'melalui_' +'mel_' +'meister' +'meistens_' +'mei' +'mehrfach_' +'mehrerer_' +'media' +'mechanismus_' +'meantime_' +'mbur' +'mayor_' +'mature_' +'master_' +'masse' +'maschinen_' +'masa_' +'market' +'marken' +'march' +'maps_' +'manufacturer_' +'manipulation_' +'manifest' +'mangelnde_' +'mangel' +'mandatory_' +'mall' +'malig' +'maler' +'mais_' +'maintain' +'mainstream_' +'magni' +'mac' +'lüge' +'lü' +'löst_' +'längst_' +'läng' +'ländische' +'länd' +'lz' +'lw' +'luxuriöse_' +'lust' +'lungen_' +'luen' +'luar_' +'ltung_' +'loud_' +'lone' +'lon' +'lokal_' +'loka' +'logisti' +'logie_' +'logg' +'locked_' +'lock_' +'lob' +'loaded_' +'ln' +'lli_' +'lively_' +'lis_' +'liquid_' +'liquid' +'likelihood_' +'lik' +'liegenden_' +'liefer' +'liederung_' +'licht_' +'libr' +'liberalization_' +'liability_' +'lfe' +'leverage_' +'level' +'lette_' +'lend_' +'leichter' +'leicht' +'legitimier' +'lec' +'lebenslange' +'lds_' +'lava' +'laud' +'lateralism' +'lasse' +'lase' +'lar_' +'langjährigen_' +'langfristige' +'lamp' +'label_' +'kürz' +'künstlerische' +'kühne' +'kör' +'kuli' +'ktions' +'kräftig' +'krä' +'kritischen_' +'kraft' +'kr' +'kos' +'korrigieren_' +'konzept' +'kontrollen_' +'kontro' +'kontinuierliche' +'kont' +'konsolidier' +'konservativen_' +'konkurr' +'konkreter_' +'kong' +'kondi' +'komplizierte' +'kommerzielle_' +'kommen' +'komfortablen_' +'komfortable_' +'kombiniert_' +'kollektiven_' +'knowing_' +'klingt_' +'klassischen_' +'klassische_' +'klasse_' +'kki' +'kis_' +'kirche_' +'kins_' +'kingdom_' +'kilometers_' +'kie' +'kg_' +'keys_' +'keu' +'kes' +'kere' +'kenn' +'kemajuan_' +'keinesfalls_' +'keinem_' +'keeps_' +'kb' +'kate' +'kapazität' +'kap' +'kalte' +'jüng' +'jährlichen_' +'just' +'jurisdiction' +'junger_' +'junct' +'jug' +'judiciary_' +'judges_' +'ju_' +'jekt' +'jak' +'jahres' +'ié' +'izier' +'ix_' +'ives_' +'iven_' +'itten_' +'ito_' +'ition' +'istung' +'ista' +'iss_' +'isla' +'isierte' +'isches_' +'irtschaft' +'irakischen_' +'ionary_' +'ional' +'ion' +'investitionen_' +'investigate_' +'invention' +'inv' +'inu' +'interp' +'interna' +'intern' +'interinstitutional_' +'interact' +'intensiven_' +'integrity_' +'integrierte_' +'integrate_' +'insurgents_' +'insure' +'insu' +'insti' +'installi' +'inspections_' +'insecurity_' +'innovat' +'inmitten_' +'inkl' +'inhalt' +'inhal' +'inhaftiert' +'inge' +'influenced_' +'inen_' +'inefficient_' +'industri' +'indispensable_' +'indirekt_' +'indebted' +'ind' +'incident_' +'inch_' +'inacti' +'improv' +'imported_' +'immune_' +'immo' +'immens' +'imm' +'imi' +'ilt' +'illusion' +'ille_' +'ilig' +'ilfe_' +'ild_' +'ild' +'ila' +'iki' +'ikan_' +'ii' +'iha' +'igung' +'igu' +'ignoring_' +'igi' +'iff' +'iet' +'iest_' +'ies' +'ierter_' +'ierende' +'ieht_' +'ieb' +'ids' +'ideologi' +'icon_' +'ico' +'ickl' +'ichtung_' +'ichtung' +'ibt_' +'iati' +'iPod_' +'iPhone_' +'höhere' +'hängige' +'händler_' +'hypo' +'hybrid_' +'hunger_' +'hung' +'humanitäre_' +'hum' +'hub' +'hu_' +'hrte_' +'hospital_' +'horror' +'hop_' +'honor' +'hon' +'holdings_' +'ho_' +'hnung' +'hlen_' +'hle_' +'hir' +'hinweg_' +'hinein_' +'hind' +'hiking_' +'hielten_' +'hidup_' +'herzustellen_' +'herunter_' +'herrliche' +'herr' +'herbei' +'herb' +'heran' +'henden_' +'hell_' +'heli' +'heiz' +'heim_' +'heed_' +'heating_' +'heart' +'hazardous_' +'harmoni' +'happi' +'handful_' +'hamper' +'hak_' +'gänger' +'guard' +'gründlich_' +'größter_' +'grundlegender_' +'grundlegend_' +'grouping' +'grobe' +'grip' +'grenzüberschreitenden_' +'grenzt_' +'grenze_' +'greifende' +'gratis_' +'graphi' +'graph_' +'got' +'gna' +'gly' +'glu' +'globally_' +'globaler_' +'glieder' +'gliche' +'glaubt_' +'glad_' +'gipfel' +'gingen_' +'ging' +'gien' +'ghter_' +'ghte' +'ggi' +'gezielte' +'gewährt_' +'gewo' +'gewisser' +'gewidmet_' +'gewer' +'gew' +'gesundheitliche' +'gestü' +'gestattet_' +'gestatten_' +'gesorgt_' +'gesetzgeb' +'geschwächt_' +'geschlossene' +'geschafft_' +'gesamt' +'gentl' +'genetische' +'generating_' +'genera' +'genauer_' +'gemütliche_' +'gemi' +'gemeinschaft_' +'geln_' +'gelegentlich_' +'gelder_' +'gelangt_' +'geko' +'gekennzeichnet_' +'gegenseitige' +'gefährlicher_' +'gefr' +'gebühren_' +'gebra' +'gebaut_' +'gases_' +'gard' +'garage_' +'gangs_' +'gabe_' +'fünfzig_' +'fühl' +'fällig_' +'fähig_' +'futures_' +'funktion_' +'funk' +'functionality_' +'frühere_' +'früh_' +'freilich_' +'freedoms_' +'frau' +'fraction_' +'fr' +'founding_' +'fought_' +'fossilen_' +'forming_' +'formen_' +'formation' +'form' +'forgotten_' +'forge' +'foreigners_' +'followers_' +'folg' +'fol' +'flood_' +'fizier' +'fixes_' +'fitt' +'fit' +'firm' +'finali' +'film' +'file' +'fh' +'fg' +'fetch' +'festzulegen_' +'fests' +'fenster_' +'felder_' +'feier' +'fehlen_' +'feels_' +'feedback_' +'fax_' +'favourable_' +'faszinierend' +'fas' +'farms_' +'far' +'familiengeführte' +'famili' +'falschen_' +'faktor' +'eye' +'extr' +'extending_' +'expulsion_' +'export' +'exploit_' +'expla' +'experiment' +'existiert_' +'exhaust' +'exemption_' +'exacerbate' +'evolution_' +'eventuell_' +'evan' +'evaluate_' +'euren_' +'eu_' +'etu' +'eto' +'etch' +'eta_' +'esten_' +'este' +'ession_' +'espa' +'esk' +'esen_' +'esen' +'erö' +'erzeug' +'erz' +'erwi' +'erweitert_' +'ert' +'ersucht_' +'ersuchen_' +'erstreckt_' +'erstre' +'erschw' +'erregend' +'erre' +'ernannt_' +'ermutigt_' +'erleb' +'erlangt_' +'erklärung_' +'erkennbar' +'erke' +'eria' +'ergänzt_' +'erge' +'erg_' +'erfolgte_' +'erfa' +'erbringen_' +'equi' +'equ' +'enw' +'entwicklung_' +'entstand_' +'entsprechende' +'entspannen_' +'entsch' +'ently_' +'entlich' +'entlassen_' +'entgegenzu' +'entertain' +'ente' +'ental' +'enor' +'eno' +'enme' +'enjoys_' +'enjoyable_' +'enheit_' +'engi' +'enforce_' +'endgültig_' +'endeavour' +'enda' +'encourages_' +'enburg_' +'employ' +'emie_' +'ement' +'embr' +'embo' +'ember' +'embargo_' +'ella_' +'ella' +'elites_' +'eligible_' +'eless' +'elektrische' +'electronic' +'elb' +'eise_' +'einzelner_' +'eint' +'einnehmen_' +'einiges_' +'eingetr' +'eingebe' +'eile' +'eight' +'eidung' +'eide' +'ehnen_' +'eg_' +'efizit' +'effizienten_' +'een' +'eel' +'edu' +'edl' +'eden_' +'ecken_' +'ecke' +'eben' +'earnings_' +'ean_' +'eager_' +'dys' +'dy' +'dt' +'dropped_' +'droht_' +'dringende' +'dream' +'drau' +'drafted_' +'downward_' +'dow' +'doubled_' +'donors_' +'donations_' +'dokument' +'doctrine_' +'dit_' +'districts_' +'distortion' +'distant_' +'disruption_' +'displays_' +'dismantl' +'dish' +'disen' +'discret' +'discredit' +'diplomatische_' +'din_' +'din' +'differentia' +'diesbezüglichen_' +'did' +'dicht_' +'dial' +'dhi' +'dha' +'dez_' +'deux_' +'deutet_' +'deteriorati' +'detected_' +'desired_' +'descent_' +'desa' +'derselben_' +'derivative' +'deregulation_' +'deprived_' +'deno' +'dene' +'dema' +'delivers_' +'delivering_' +'defende' +'deepening_' +'deckung' +'dec' +'debtor' +'debatte' +'deadl' +'daten' +'dateien_' +'databases_' +'dara' +'dan' +'damp' +'damages_' +'daf' +'cy' +'cult' +'cul' +'criticized_' +'criminals_' +'creature_' +'cow' +'counterparts_' +'counterfeit' +'council_' +'cost' +'coole' +'cool_' +'conversion_' +'convention_' +'contradict' +'contin' +'conten' +'construct_' +'constrain' +'consiste' +'considers_' +'conservation_' +'consequently_' +'confused_' +'confront' +'config' +'confi' +'conduc' +'condemn_' +'condemn' +'concludes_' +'conci' +'comprehen' +'compound' +'compli' +'complexity_' +'completion_' +'complain' +'compile' +'competing_' +'competence_' +'compari' +'compare_' +'compar' +'compa' +'communist_' +'communicate_' +'communi' +'commonly_' +'comment' +'combines_' +'college_' +'coherence_' +'cog' +'coa' +'closest_' +'cliff' +'cleane' +'clau' +'classical_' +'clara_' +'claiming_' +'cks' +'ckel' +'cked_' +'citi' +'circum' +'ciones_' +'cier' +'cide' +'chä' +'chsen' +'chose_' +'chnungen_' +'chil' +'chief' +'chest' +'chemicals_' +'checks_' +'chauen' +'chaften_' +'ceremon' +'cer_' +'cease_' +'caution_' +'catastrophe_' +'casinos_' +'cari' +'care' +'capt' +'capitals_' +'capitali' +'cano' +'cac' +'cabin' +'bürgerliche' +'butt' +'bust' +'burn_' +'bureaucrats_' +'bull' +'broke_' +'brochure' +'brilliant_' +'bread_' +'branche_' +'bour' +'borrowing_' +'bombing' +'bombe' +'bolster' +'blocked_' +'block' +'bl' +'biot' +'billions_' +'bezug_' +'bewusste' +'bewegung_' +'bewa' +'bevorzug' +'beu' +'betriebe_' +'betrieb_' +'betrachtete' +'betra' +'besucht_' +'bestä' +'bestimmter_' +'besticht_' +'beso' +'beschaff' +'berufen_' +'berp' +'bereiten_' +'bereiche_' +'berei' +'bemerkenswerte' +'beliebige' +'belie' +'bekannter' +'beizu' +'beis' +'behold_' +'beh' +'begleiten_' +'beginne' +'begehen_' +'began' +'befr' +'beeinträchtigen_' +'bedeutendsten_' +'beauftragt' +'bears_' +'beanspruch' +'bble' +'bay_' +'basierte_' +'bargain' +'baltischen_' +'balcony_' +'balancing_' +'bak' +'bach' +'ax_' +'avo' +'außer' +'außenpolitische' +'automo' +'automatische' +'auszur' +'auszuführen_' +'auswe' +'auslösen_' +'ausgewählt_' +'ausgenutzt_' +'auft' +'aufrechterhalten_' +'aufl' +'aufkommen_' +'aufget' +'aufgebaut_' +'auen_' +'audit' +'attracti' +'ats' +'ata' +'astr' +'assignment' +'asser' +'asia' +'ase' +'ars' +'arrive_' +'arre' +'arme' +'arit' +'arises_' +'arische' +'aren_' +'architekt' +'architect' +'arche' +'arb' +'appro' +'appar' +'apologi' +'anzuh' +'anz' +'anyway_' +'any' +'anxi' +'anwesend_' +'antwortete_' +'antworten_' +'anten_' +'anteil_' +'ante' +'anstreben_' +'ansteigen_' +'ansprechen_' +'anschließen_' +'ansch' +'annu' +'announce_' +'anno' +'anme' +'anlässlich_' +'anl' +'ani_' +'angenehm_' +'angemessenen_' +'angelegt_' +'angegriffen_' +'angebot_' +'angebliche' +'angan_' +'anes' +'anen' +'anderweitig_' +'anden_' +'andauern' +'anbelangt_' +'analyse_' +'amplif' +'amme' +'ami_' +'amended_' +'amen_' +'amb' +'alter' +'alte' +'allocat' +'allo' +'alliances_' +'alleged_' +'alleg' +'alla' +'alit' +'alike_' +'alig' +'alie' +'alia' +'alg' +'ain' +'agog' +'agent_' +'aft' +'afrikanische' +'affirm' +'advertisement' +'adventur' +'admit' +'activat' +'aco' +'achung_' +'accr' +'accord_' +'accompanying_' +'accommodati' +'ac_' +'abw' +'absurd_' +'abse' +'abschließen' +'abs' +'able' +'abl' +'abhängt_' +'abhängig' +'abgele' +'abgehalten_' +'abgeben_' +'aat' +'`_' +']]''' +'] ._' +'Zypern_' +'Zwangs' +'Zunahme_' +'Zulassung_' +'Zukunfts' +'Zivilisten_' +'Zie' +'Zh' +'Zentrala' +'Zeita' +'Zealand_' +'Zahlungs' +'Zahlung_' +'YO' +'Xi_' +'XL' +'Würden_' +'Wünsche_' +'Wäre' +'Wurzel' +'Wri' +'Works' +'Wood' +'Wolf_' +'Wol' +'Wohl_' +'Wis' +'Wirtschaftsl' +'Wind' +'Willi' +'Wies' +'Wiederaufbau_' +'Wider' +'WiFi_' +'Wetter_' +'Wett' +'Werk' +'Werden_' +'Weltb' +'Weiterhin_' +'Weis' +'Wechselkurse_' +'Wechsel_' +'Wars' +'Wand' +'Walt' +'Wai' +'Wah' +'Wach' +'WA' +'Vorw' +'Vorteil' +'Vorr' +'Vorfeld_' +'Volle' +'Vogel' +'Vit' +'Visual_' +'Visit' +'Ville' +'Villa' +'Viel' +'Vet' +'Verzögerungen_' +'Verzeichnis_' +'Verwa' +'Vertrauens' +'Versicherungs' +'Versand' +'Versa' +'Vermögens' +'Vermittlungs' +'Verlängerung_' +'Vergleich' +'Vergessen' +'Verfasser' +'Vereinfachung_' +'Verda' +'Veranstaltungsräume_' +'Vegas_' +'Varia' +'Van' +'Vale' +'VAT_' +'VA' +'Users_' +'Ursprung_' +'Urheber' +'Unterschied' +'Untere' +'Unsicherheit_' +'Uns' +'Universum' +'Unge' +'Unbe' +'Unabhängig_' +'Un_' +'Umstände_' +'Uganda' +'Ub' +'UL' +'UC' +'Tür_' +'Tätigkeiten_' +'Twin' +'Tul' +'Tuesday_' +'Treiber_' +'Treib' +'Travel_' +'Translat' +'Track' +'Total_' +'Tor_' +'Top_' +'Tochter' +'Tob' +'Tir' +'Tibe' +'Theater' +'Text' +'Teufel' +'Territori' +'Terminal_' +'Techno' +'Tarif' +'Tanz' +'TT' +'TS' +'TEC' +'Südafrika_' +'Szenario_' +'Synchron' +'Suppo' +'Suiten_' +'Stütz' +'Studie' +'Strände_' +'Strahlung_' +'Straftat' +'Store_' +'Steuersystem' +'Statistik_' +'Stand' +'Stalin_' +'Stagnation_' +'Spri' +'Sprech' +'Spre' +'Spl' +'Spiegel' +'Sozialversicherung' +'Sozialdemokrati' +'Source_' +'Sou' +'Sonnens' +'Solo' +'Solana_' +'Software' +'Soft_' +'Socialist_' +'Sno' +'Sn' +'Small_' +'Sla' +'Sky_' +'SilverFast_' +'Sigma' +'Sicherung_' +'Sicherheitsfragen_' +'Sh' +'Seve' +'Settings_' +'Sem' +'Sek' +'Seen_' +'Seele_' +'Script' +'Schwan' +'Schulungs' +'Schuld_' +'Schuh' +'Schriftsteller_' +'Schriften_' +'Schreib' +'Schluss' +'Schiffs' +'Schic' +'Schach' +'Satellite' +'Santiago_' +'Sanc' +'Sammlung_' +'Sale' +'SW' +'SV_' +'STA' +'SS' +'SP_' +'SLR_' +'SL' +'SH' +'SD_' +'SCO' +'SCH' +'Rö' +'Routine' +'Route' +'Round_' +'Roten_' +'Rosa' +'Rod' +'Richtlinien' +'Rich' +'Rezept' +'Revolution' +'Revision_' +'Reserven_' +'Res' +'Repr' +'Renminbi_' +'Religio' +'Relax_' +'Reisende' +'Reisen_' +'Reinigung_' +'Reichen_' +'Regulierungs' +'Registr' +'Redner_' +'Record' +'Rechtsg' +'Rechtsetzung' +'Ratsvorsitzes_' +'Rate' +'Randlage_' +'Rande_' +'Ramblas_' +'Rahmenprogramm' +'RSS_' +'RGB_' +'RA_' +'Quer' +'Quark' +'Pun' +'Prüf' +'Präsidenten' +'Präsenz_' +'Prozent' +'Protest' +'Protection_' +'Pros' +'Promo' +'Progress' +'Programmen_' +'Prof_' +'Product_' +'Process_' +'Prob' +'Priv' +'Princes' +'Prince' +'Premi' +'Prad' +'Power' +'Pou' +'Portal' +'Polizei' +'Poli' +'Plätze_' +'Pil' +'Pie' +'Physi' +'Photos' +'Phone' +'Philosoph' +'Philadelphia_' +'Petersburg_' +'Peru_' +'Pensions' +'Pel' +'Paul' +'Partners' +'Parliamentary_' +'Parlamentswahlen_' +'Parkplätze_' +'Parameter_' +'Papier_' +'Pakistani_' +'Ox' +'Outs' +'Others_' +'Ost_' +'Os_' +'Ori' +'Options' +'Optimismus_' +'Olive' +'Oli' +'Offensi' +'Objektiv' +'Oberfläche_' +'OLAF_' +'OL' +'OG' +'Nü' +'Nuevo_' +'Nothing_' +'Notenbanken_' +'Norm' +'Nordic_' +'Nin' +'Nieder' +'News' +'Nepal' +'Nelson_' +'Neg' +'Near_' +'Nap' +'Nahrungsmittel' +'Nachdruck_' +'NS_' +'NRO_' +'NOT' +'NE_' +'NB' +'Müll' +'Mä' +'Mut' +'Music_' +'Mus' +'Monte_' +'Mitter' +'Mittelschicht_' +'Mitarbeitern_' +'Ministeri' +'Mine' +'Milo' +'Mille' +'Militar' +'Milan_' +'Migrations' +'Miet' +'Mexik' +'Metall' +'Meta' +'Meinungsäußerung_' +'Meines_' +'Meilen' +'Mehrere_' +'McCa' +'Maz' +'Mauer_' +'Master_' +'Marken' +'Marke_' +'Mang' +'Malta_' +'MC' +'M5' +'Lösch' +'Luxemburg_' +'Lux' +'Lunch' +'Luggage_' +'Lud' +'Lot' +'Lor' +'Lohn_' +'Lohn' +'Lock' +'Loca' +'Lizenz_' +'Literatur_' +'Linien_' +'Line_' +'Lind' +'Liikanen_' +'Lich' +'Liberalen_' +'Leu' +'Letzte' +'Leonardo_' +'Leistungsbilanz' +'Leica_' +'Leh' +'Lega' +'Lebensst' +'Lebensqualität_' +'Learn_' +'Las' +'Lar' +'Landes' +'Lan' +'Lama' +'Lai' +'Lager_' +'Laden_' +'Kürz' +'Königs' +'Kämpfe_' +'Kurse' +'Kub' +'Kräften_' +'Kroatien_' +'Kro' +'Kreditgeber_' +'Krankenh' +'Kraftstoff' +'Korr' +'Kopie' +'Konservativen_' +'Kompon' +'Komplettpreise_' +'Kollege_' +'Kohäsions' +'Kohlenstoff' +'Kohle_' +'Knoten_' +'Kn' +'Klon' +'Kleid' +'Kis' +'Kinders' +'Ket' +'Kenya_' +'Kennzeichnung' +'Kathedrale_' +'Kaffee_' +'Jose' +'Jon' +'Jobs_' +'Jin' +'Jh' +'Jas' +'Jam' +'Jagd' +'Jacuzzi_' +'JA' +'Italia' +'Inzwischen_' +'Investi' +'Inve' +'Intelligen' +'Intellektuelle' +'Integrität_' +'Inte' +'Instabilität_' +'Insp' +'Insofern_' +'Innere' +'Inland' +'Impfung' +'Imperial' +'Imm' +'Ice' +'INI_' +'ICA' +'Höhepunkt_' +'Händen_' +'Hyper' +'Hurri' +'Hungers' +'How' +'Hos' +'Homo' +'Holocaust_' +'Holl' +'Hold' +'Hoffnungen_' +'Hof' +'Hindernisse_' +'Hilton_' +'Hillary_' +'Hierzu_' +'Hier' +'Hes' +'Heran' +'Hence_' +'Help_' +'Helm' +'Heimatl' +'Heb' +'Heads_' +'Haushalts_' +'Hauptb' +'Hast' +'Hans_' +'Hall' +'HP_' +'Gü' +'Göteborg_' +'Gute_' +'Gui' +'Greenspan_' +'Greater_' +'Grad' +'Gr' +'Gordon_' +'God' +'Goals_' +'Gn' +'Glaubens' +'Gipfels_' +'Gewährleistung_' +'Gesellschafts' +'Gesan' +'Gerät_' +'Gere' +'Gerade_' +'Gepäck' +'Geno' +'Genf' +'Gelegenheiten_' +'Gelds' +'Gegen_' +'Gefähr' +'Gebühr_' +'Game' +'Gam' +'Gad' +'Gabriel' +'Gab' +'GR' +'Fünfte' +'Fünf' +'Further_' +'Fuji' +'Fris' +'Friedensprozess_' +'Freib' +'Freedom_' +'Four_' +'Fotokopiereinrichtungen_' +'Fond' +'Fokus_' +'Flughafen' +'Fluggesellschaften_' +'Flexi' +'Fleisch' +'Fitness_' +'Fischereipolitik_' +'Finanzsystems_' +'Finanzminister_' +'Finanzau' +'Filter' +'Fertig' +'Fernseher_' +'Fehlen_' +'Features_' +'Fabriken_' +'FR' +'FDI_' +'FA_' +'FARC_' +'Extrem' +'Expo' +'Exi' +'Excel_' +'Evolution_' +'Events_' +'Event_' +'Europäisches_' +'Eth' +'Ess' +'Erö' +'Erweiterungs' +'Ersch' +'Erre' +'Eropa_' +'Ern' +'Ermittlung' +'Erkrankung' +'Ericsson_' +'Erfüllung_' +'Erfolgs' +'Erdöl' +'Episode' +'Entwicklungsst' +'Entwicklungsa' +'Enth' +'Enron_' +'Enl' +'Enk' +'Eng' +'Elektronik' +'Elektro' +'Eisen' +'Einzelh' +'Einkaufss' +'Einhei' +'Einh' +'Eing' +'Einfuhr' +'Einf' +'Einerseits_' +'Eine' +'Eigenschaft_' +'Eigenheim' +'Edin' +'Eden_' +'Eck' +'Easy_' +'ESS' +'ENT' +'Dörfer' +'Dutzend_' +'Drittstaaten_' +'Drei' +'Draghi_' +'Dow_' +'Dou' +'Dominion_' +'Dominica' +'Doc' +'Diktator' +'Digi' +'Dienstes_' +'Dienste' +'Did' +'Dick' +'Dez' +'Dev' +'Dest' +'Denk' +'Demand_' +'Defi' +'Days_' +'Damals_' +'Dach' +'DR_' +'DR' +'DPJ_' +'DB' +'DAM' +'Crystal_' +'Croatia_' +'Cri' +'Court' +'Could_' +'Corr' +'Conver' +'Contact_' +'Consult' +'Consequently_' +'Conc' +'Comple' +'Commerce_' +'Commander_' +'Coll' +'Coffee_' +'Chirac_' +'Chip' +'Chechnya_' +'Chat_' +'Charlie_' +'Champs_' +'Chai' +'Centr' +'Cell' +'Cath' +'Carr' +'Carmen_' +'Cari' +'CL' +'Büro_' +'Büchern_' +'Bäume_' +'By' +'Business' +'Bundesrepublik_' +'Bundeskanzlerin_' +'Building_' +'Buck' +'Brüder_' +'Bruttoinlandsprodukt' +'Brown_' +'Bridge_' +'Brandenburg' +'Bour' +'Borde' +'Bonus_' +'Bonn_' +'Bomb' +'Bog' +'Bob' +'Blut_' +'Blockade_' +'Bilde' +'Bibliotheken_' +'Bezeichnung_' +'Bewe' +'Bevor_' +'Between_' +'Betriebssystem_' +'Betrachtung_' +'Besuche' +'Bestätigung_' +'Bestände_' +'Bestellung_' +'Bestand' +'Besser' +'Beruf' +'Berechnung_' +'Bere' +'Benutzern' +'Benach' +'Bem' +'Bell_' +'Beli' +'Belgian_' +'Bekenntnis_' +'Bekannt' +'Beg' +'Befürchtungen_' +'Befür' +'Beau' +'Bearbeit' +'Bath_' +'Baj' +'Bahrain_' +'Bac' +'BSE_' +'BERLIN_' +'BD' +'B6_' +'Award' +'Automobilindustrie_' +'Auswahl' +'Austritt_' +'Australian_' +'Austin_' +'Ausstellung_' +'Aussichten_' +'Ausschluss_' +'Aussagen_' +'Aussage_' +'Ausrüstung_' +'Ausbr' +'Ausblick_' +'Ausbildungs' +'Ausarbeitung_' +'Aufwertung_' +'Aufl' +'Aufg' +'Aufb' +'Atlantik' +'Ate' +'Astro' +'Arzneimittel_' +'Arth' +'Arr' +'Arms' +'Armenia_' +'Armeni' +'Arme' +'Archiv' +'Arbeitsplatz_' +'Arbeitnehmern_' +'Arbeitnehmer' +'Applikation' +'Appl' +'Appart' +'Appar' +'Apart_' +'Apa' +'Any' +'Anschlag_' +'Ansch' +'Anruf' +'Anreiz_' +'Anr' +'Anpassungs' +'Annäherung_' +'Anne' +'Anmeld' +'Angabe_' +'Anf' +'Anderson_' +'Anbieter_' +'Analysen_' +'Anal' +'Amerikanern_' +'Alon' +'Algeria_' +'Alge' +'Alf' +'Alc' +'Albu' +'Aktienm' +'Aero' +'Admi' +'Acro' +'Acqu' +'Acht_' +'Access' +'Abänderung' +'Abw' +'Absicherung_' +'Abschwung' +'Absa' +'Abkommens_' +'Abbas_' +'ASE' +'AL_' +'ALDE_' +'AKVIS_' +'89_' +'82_' +'240_' +'215' +'1982_' +'1973_' +'1948_' +'1900_' +'18th_' +'177' +'175' +'1701_' +'160' +'128_' +'/-_' +'. ({{_' +'*_' +'**' +')]' +''']]' +'%) _' +'">' +' �_' +' – _' +' ==' +' "._' +' ", _' +'€ _' +'’ – _' +'– ' +'ע' +'өз' +'і_' +'ю' +'х_' +'то' +'т_' +'ри' +'про' +'ор' +'на' +'мо' +'ль' +'лы' +'к_' +'ин' +'ем' +'ве' +'ο_' +'μ' +'ž_' +'ši' +'Č' +'ützen' +'üte' +'ügen_' +'üchter' +'üch' +'übrig_' +'üblicherweise_' +'überzeugend_' +'überwältigende' +'übertrag' +'überraschen' +'übermitteln_' +'überla' +'übergeben_' +'überarbeitet' +'ør' +'öt' +'ört_' +'örper' +'örigkeit_' +'ören_' +'örder' +'öpf' +'ökonomisch' +'ökologischen_' +'ökologische_' +'ök' +'ño' +'és' +'éo' +'ées_' +'ça' +'äßig_' +'änk' +'ängt_' +'änen_' +'ändig' +'äm' +'ältig' +'älle_' +'ährung_' +'ähnliches_' +'ächtnis' +'ál' +'Übertr' +'Übel' +'Äthiopien_' +'Ägypte' +'» | _' +'»' +'}: _' +'|''_' +'zügig_' +'zähl' +'zynisch' +'zyklus_' +'zwing' +'zweckmäßig' +'zwanzig_' +'zw' +'zuzus' +'zuzug' +'zuwe' +'zutage_' +'zunichte_' +'zukünftigen_' +'zuha' +'zugeg' +'zugeben_' +'zona_' +'zo_' +'zn' +'zle' +'zk' +'zivile_' +'zim' +'zierte' +'zid' +'zessi' +'zens' +'zeitlich' +'zahlreicher_' +'ystemen_' +'yss' +'yle' +'yh' +'yen_' +'yed_' +'ycl' +'xio' +'xen' +'würdig' +'wür' +'wählt' +'wä' +'wunderbare' +'wun' +'worm' +'womöglich_' +'wol' +'wiss' +'wished_' +'wische' +'wirkungen_' +'wirksamer_' +'winzige' +'winds_' +'winding_' +'willig_' +'wiederholte' +'widerspiegel' +'widen' +'wid' +'wichtig' +'wich' +'wheel' +'wesentlicher_' +'werte' +'werk' +'wenigstens_' +'wen_' +'weithin_' +'weil' +'weigert' +'wechselt' +'wechsel_' +'websites_' +'wear_' +'wear' +'watched_' +'warrant_' +'warnen_' +'ware' +'wandeln_' +'wandel' +'waktu_' +'wak' +'wahrgenommen_' +'wag' +'vu' +'vre' +'vot' +'vorzus' +'vorteil' +'vorsichtig' +'vorsehen_' +'vorläufige' +'vorliegende_' +'vorgez' +'vorgestellt_' +'vorgehen_' +'voraussichtlich_' +'volunteer_' +'volle' +'vo_' +'vital' +'visual_' +'visiting_' +'vice_' +'veterinar' +'vessel_' +'verzeichnet' +'verzeichnen_' +'verwunde' +'verwi' +'verw' +'verursachten_' +'verursachte_' +'vertrieb' +'vertrau' +'verteidig' +'verstärkte_' +'versich' +'versehen' +'verschw' +'verschli' +'verschlechtert' +'verschl' +'verpflichtend' +'vermute' +'vermis' +'verlängert_' +'verlorene' +'verle' +'verlangsamt_' +'verkündet_' +'verknüpft_' +'verhäng' +'verhältnis_' +'vergangen' +'verfüg' +'verbundene_' +'verbringen_' +'verbreitete_' +'verbot' +'verbi' +'verbe' +'verbal' +'verb' +'verantwortlichen_' +'verans' +'verab' +'vede' +'vate' +'varied_' +'valued_' +'vall' +'validate' +'vacuum_' +'vaccination_' +'uver' +'uu' +'utz' +'uts_' +'utm' +'usted_' +'usst' +'ussi' +'usr' +'usion' +'user' +'ursprüngliche_' +'uropäischen_' +'urh' +'urges_' +'urge' +'upload_' +'upholding_' +'uo' +'unzulä' +'unwilling_' +'unwi' +'unverzüglich_' +'unus' +'untr' +'unterz' +'unterstreicht_' +'unterschr' +'unterscheidet_' +'unterrichte' +'unternehme' +'unterliegen_' +'unterhalt' +'untereinander_' +'unterbrochen' +'unterb' +'unsu' +'unsicheren_' +'unschuldige' +'unsa' +'unres' +'unr' +'unnecessary_' +'unle' +'unjust' +'universali' +'uniti' +'union' +'unified_' +'ungst' +'ungspro' +'ungsge' +'ungsfrei' +'ungl' +'unequ' +'underw' +'underli' +'unbest' +'umgehend_' +'umgeb' +'ulation' +'ulan' +'uhig' +'uhe' +'ugh' +'ufung_' +'ufte' +'udi' +'uche' +'uc_' +'tück' +'töten_' +'tö' +'té_' +'tätige_' +'tägliche_' +'tze' +'typischer' +'typischen_' +'typische_' +'tures_' +'tungs' +'tts' +'ttl' +'tse' +'trü' +'träum' +'truktur' +'troubled_' +'troll' +'tril' +'tribunal' +'trenn' +'trauma' +'trate' +'trap_' +'transpos' +'transmitted_' +'trail' +'trade' +'toxic_' +'tower_' +'towels_' +'tot_' +'tonn' +'toffe' +'tnis' +'tliche_' +'tles' +'tiv_' +'tisa' +'tionary_' +'tionali' +'tiny_' +'timely_' +'tile' +'tide_' +'tics_' +'tically_' +'tical' +'tial_' +'tial' +'thro' +'thou' +'thirty_' +'thinks_' +'therapy_' +'thee_' +'tern_' +'termasuk_' +'terb' +'tep' +'tension_' +'tener' +'tena' +'tempora' +'tempo_' +'tem' +'telling_' +'telecommunications_' +'teilig' +'technologi' +'td' +'tche' +'tch_' +'taucht' +'tati' +'tapi_' +'tang' +'tane' +'tain_' +'tain' +'tailored_' +'tail' +'tags' +'tac' +'tab_' +'szi' +'systematische_' +'system' +'sys_' +'synthesi' +'synchroniz' +'symboli' +'symbol' +'swor' +'sweise' +'svoll' +'sverfahren_' +'sv' +'suspect' +'survey_' +'surprisingly_' +'surge_' +'suprem' +'suppressi' +'supplier_' +'supplied_' +'supermarkets_' +'suns' +'sunnitische' +'sumber_' +'suche_' +'successor_' +'substanzielle' +'subst' +'subsidiz' +'subsidiar' +'subjects_' +'stürzen_' +'stärkeren_' +'stärke' +'stupid_' +'struggle' +'strophen' +'strive_' +'strik' +'stric' +'strengere' +'streng' +'strebt_' +'stream' +'strategisch' +'strat' +'strand' +'strahl' +'strafrechtlich' +'ston' +'stolz_' +'stoff_' +'stin' +'stim' +'steuern_' +'sten' +'stellend_' +'steile' +'steckt_' +'stecken_' +'stec' +'stays_' +'statute_' +'starv' +'starters_' +'starkes_' +'standen_' +'stande' +'standardis' +'stakeholder' +'stabili' +'staats' +'staatlich_' +'staate' +'ssta' +'ssin' +'sses_' +'sserung' +'ssene_' +'ssend' +'ssed_' +'ssal' +'srecht' +'squeeze_' +'sque' +'sprin' +'spreche_' +'spreading_' +'spoil' +'spirit' +'sphere_' +'spends_' +'spell_' +'specialist_' +'special' +'speaks_' +'spatial' +'spann' +'sos' +'sorti' +'sorry_' +'sore' +'sooner_' +'sont_' +'sonder' +'sollt' +'solide_' +'solid' +'sold' +'solar_' +'socially_' +'socialism_' +'smokers_' +'smit' +'slos' +'slim' +'slide_' +'slee' +'slan' +'skri' +'skr' +'skiing_' +'skie' +'sket' +'skepticism_' +'skat' +'sir_' +'sir' +'sip' +'sins' +'simul' +'silver_' +'sika' +'sik' +'signals_' +'sights_' +'sig' +'siert_' +'sier' +'sichtbar_' +'sible_' +'sian_' +'sia_' +'sia' +'shutt' +'show' +'shortc' +'shores_' +'shaping_' +'shaped_' +'shal' +'sgr' +'seu' +'setzung_' +'set' +'separation_' +'sentiment_' +'sensi' +'sensation' +'sends_' +'sement' +'seltene' +'seller' +'sell' +'seiner' +'sebuah_' +'sebe' +'sco_' +'sco' +'scienti' +'schüre' +'schö' +'schwerwiegende' +'schweigen_' +'schwank' +'schung_' +'schulden' +'schuh' +'schreib' +'schottische' +'schnitt' +'schnellere' +'schneide' +'schmerzhaft' +'schlechteste' +'schlechter_' +'schlag_' +'schickt' +'schicken_' +'schenken_' +'scheinlich' +'scheinbar_' +'schafts' +'scarc' +'scan' +'sberei' +'satu_' +'satisfactor' +'sani' +'sanct' +'sample_' +'sal_' +'saja_' +'sacrifice_' +'rät' +'ränk' +'rzt_' +'rungen_' +'ruled_' +'ruhige_' +'ruct' +'rth' +'rro' +'royal' +'ron_' +'roi' +'rodukt' +'rocket' +'roblem' +'rla' +'rk_' +'rist' +'risik' +'risen_' +'rine' +'rily_' +'rigoro' +'rigid' +'ried' +'riding_' +'rid_' +'rice_' +'rgen_' +'rf_' +'revolutionäre' +'reviv' +'revis' +'rever' +'resur' +'resultierende' +'restrictive_' +'restr' +'respekt' +'resorts_' +'resolving_' +'reserven_' +'reserv' +'republik' +'repressive_' +'reporte' +'reor' +'renz' +'rende' +'relies_' +'relaxed_' +'relaxation_' +'rej' +'reitung' +'reiterate_' +'reiches_' +'reichende' +'reibung' +'regulierung' +'registrierte' +'registriert_' +'regiert_' +'regelmäßige' +'refusing_' +'recurr' +'recover' +'recording' +'recipe_' +'reci' +'rechtliche' +'rechtfertigen_' +'rechnung' +'rech' +'recap' +'reasoning_' +'realized_' +'reaches_' +'rdl' +'rdin' +'rche' +'raus' +'rats_' +'ratified_' +'rase' +'rasant' +'rarely_' +'rapi' +'ramp' +'ramm' +'rale' +'rak' +'rail' +'raft_' +'raff' +'radiation_' +'rad_' +'query_' +'quellen_' +'quarte' +'quantitativen_' +'purely_' +'puls' +'pull_' +'pulat' +'publish_' +'pts_' +'pta' +'pré' +'proxy_' +'provinc' +'proto' +'protests_' +'protecti' +'protagonist' +'propriet' +'proportional_' +'propo' +'prope' +'pron' +'promptly_' +'proliferation_' +'projected_' +'profit' +'profi' +'proclaimed_' +'problemlos_' +'prinzipie' +'prin' +'primäre' +'pricing_' +'pric' +'pretext_' +'prediction' +'precedent_' +'precautionary_' +'preca' +'prachigen_' +'ppo' +'pping_' +'potenziell_' +'potato' +'pot' +'postponed_' +'positioned_' +'portray' +'porte' +'por' +'poorly_' +'politischem_' +'polariz' +'pod' +'plum' +'plot' +'plo' +'plai' +'placing_' +'pita' +'pit_' +'piscin' +'pin_' +'pilla' +'pielen_' +'piele_' +'pie_' +'pian' +'photograph_' +'phe' +'pfer' +'pfen_' +'pets_' +'petition_' +'pers_' +'permits_' +'peripheral_' +'penu' +'peni' +'pende' +'pek' +'pedestrian_' +'patron' +'paths_' +'patent' +'patch_' +'passport_' +'passive_' +'passiv' +'passionate' +'partnerschaftliche' +'participating_' +'parl' +'parent_' +'paraly' +'paradi' +'pap' +'pang' +'pad_' +'packaging_' +'owe' +'overha' +'outsourc' +'outlined_' +'outlet' +'outf' +'outd' +'ount' +'ounce' +'oul_' +'oui' +'oud_' +'otis' +'osten_' +'osk' +'orti' +'ork' +'origins_' +'orient' +'ori_' +'organisms_' +'orf' +'ordne' +'ordentliche' +'orat' +'orang_' +'oran_' +'optimize' +'optimism_' +'optimier' +'optical_' +'oppo' +'opoulo' +'opol' +'opo' +'operators_' +'openness_' +'oon_' +'ood_' +'ontr' +'onist' +'onic' +'ones' +'onds_' +'ond_' +'ommene' +'ologis' +'oli_' +'oka' +'oing_' +'oid' +'ogramm' +'ogn' +'ogg' +'ofs_' +'offene' +'odell' +'occup' +'observer_' +'observe_' +'oberste_' +'oberfläch' +'obere' +'oasis_' +'nütz' +'nördlichen_' +'nächstes_' +'nä' +'nwe' +'nver' +'nur' +'null' +'ntion_' +'ntie' +'nted_' +'nstein_' +'nsh' +'nsa' +'np' +'notorious' +'nostalgi' +'normali' +'nominat' +'noc' +'nkt_' +'nineteenth_' +'nifi' +'niederge' +'nian' +'neueste_' +'netze_' +'nest' +'nese_' +'nern_' +'nente' +'neiden' +'neglect' +'neben' +'ndu' +'nds' +'ndr' +'ndo' +'ndl' +'ndet_' +'nde' +'ncia' +'nationales_' +'nas' +'nant' +'nano' +'nami' +'nahezu_' +'nad' +'nacht_' +'nachhaltiger' +'mé' +'mäßigen_' +'mäß' +'mächte_' +'muy_' +'muti' +'muster' +'murdered_' +'murder' +'mund' +'mt_' +'mst' +'mse' +'mpi' +'moves_' +'motive' +'mortgages_' +'morph' +'morat' +'moralischen_' +'monatlich' +'molecul' +'modernization_' +'modernis' +'modernes_' +'mobile' +'mmern_' +'mium_' +'mittlere' +'mistakes_' +'misleading_' +'miracle_' +'ministeri' +'minded_' +'mili' +'mie_' +'mh' +'mexikanische' +'metr' +'method' +'metal_' +'messe' +'menya' +'menta' +'menschen' +'meme' +'melden_' +'md' +'mba_' +'maximize_' +'maximal' +'mau' +'matic_' +'mathematics_' +'materielle' +'massa' +'mask_' +'marginali' +'margin_' +'marble_' +'mann' +'manipulier' +'mangelnde' +'manc' +'managers_' +'malt' +'maker_' +'maj' +'magi' +'maga' +'mad' +'macro_' +'mache' +'läßt_' +'läufig' +'lze' +'lying_' +'lush_' +'luck' +'ltungs' +'lous_' +'loss' +'lokaler_' +'lohnt_' +'logistics_' +'llusion' +'llung_' +'llschaft' +'lli' +'ller' +'llel' +'live' +'listened_' +'lism' +'lisi' +'lische' +'lio' +'link' +'lings_' +'liness_' +'lighting_' +'lifetime_' +'ließ' +'lien' +'licensing_' +'liberat' +'liberalisi' +'liberali' +'liberalen_' +'liberale' +'liat' +'lian' +'liabilities_' +'letztendlich_' +'lens_' +'lengthy_' +'lender' +'leistungs' +'leh' +'left' +'lebte' +'leas' +'lding_' +'ldet_' +'lba' +'lb' +'laureate' +'laundry_' +'laufende' +'lauf_' +'latz' +'laser_' +'laptop_' +'landwirtschaftlichen_' +'landschaft_' +'lana' +'lakes_' +'lah_' +'lage' +'ladi' +'lacking_' +'könnt_' +'käme_' +'kw' +'kurzfristigen_' +'kurzfristige_' +'kurzen_' +'kurse_' +'kup' +'kunden' +'kultur_' +'kula' +'kua' +'ktivi' +'kten_' +'kreuz' +'kret' +'kreise' +'krei' +'kratisch' +'krank_' +'kow' +'kostenfreien_' +'kostenfreiem_' +'korre' +'koreanische' +'kontinentale' +'kontaktieren_' +'konstitutionelle' +'konservative' +'komplizierten_' +'komplexen_' +'kompet' +'kompakt' +'komfortabel_' +'kolonial' +'koll' +'kok_' +'knappe' +'kluge' +'klinische' +'klich' +'kleines_' +'klassische' +'klassi' +'klapp' +'kl' +'kka' +'kische' +'kinder' +'kilometre' +'kid' +'kf' +'kett' +'kerja_' +'kehr_' +'kehr' +'kamera' +'kal' +'jö' +'justification_' +'juristische' +'jung' +'jun' +'journalist_' +'journal' +'jou' +'join' +'jeu' +'jeni' +'jeglicher_' +'jan' +'izi' +'iza' +'ix' +'ivat' +'iva' +'iv_' +'ium' +'ité_' +'itz' +'itts' +'itis' +'ita_' +'istic_' +'isti' +'iste' +'issuing_' +'issi' +'ison_' +'isolated_' +'isolat' +'islamistische' +'islamische_' +'isi_' +'irrev' +'irr' +'irgendeiner_' +'ires_' +'irc' +'iplin' +'ios_' +'ioni' +'iona' +'inviting_' +'investasi_' +'invasion_' +'intuitive' +'intra' +'intolera' +'interview_' +'interventions_' +'interv' +'interrupt' +'internationally_' +'interior_' +'interg' +'intensive' +'intensiv_' +'intelligente_' +'inte' +'intak' +'inta' +'institutionalis' +'instances_' +'inst' +'inspiri' +'inspectors_' +'insofern_' +'insist' +'inse' +'inquir' +'innehat' +'inne' +'injection' +'iniert' +'inhibit' +'inhaltlich' +'infra' +'inflict' +'infi' +'infectious_' +'infe' +'inex' +'inevitably_' +'iness_' +'ineffiziente' +'industriellen_' +'industrialis' +'indu' +'indische_' +'indirect_' +'indication_' +'incur' +'incorrect' +'incorporate_' +'inclusive_' +'incl' +'incapable_' +'inca' +'inat' +'inander_' +'inability_' +'impl' +'imperial' +'impedi' +'impede' +'impair' +'immigrant_' +'imba' +'ilte' +'illnesses_' +'ilde' +'iken_' +'ignor' +'igli' +'ight' +'ifor' +'ifel' +'ieß' +'iev' +'iess' +'ient' +'ieg_' +'ieden' +'ied' +'ido_' +'idio' +'identification_' +'identifi' +'ideale' +'ick' +'icial' +'ichtig' +'icht' +'icate' +'ibility_' +'ibi' +'iba' +'hübsch' +'höf' +'höchste' +'häus' +'häufiger_' +'hängen_' +'hw' +'hus' +'hurr' +'hunt' +'hts_' +'htm' +'hten_' +'hrung' +'hospitals_' +'hos' +'hors' +'hop' +'homo' +'homeland_' +'holt' +'hohes_' +'hochrangige' +'hnte' +'hmig' +'hmen_' +'hija' +'highway_' +'hierfür_' +'herz' +'hervorrufen_' +'hervorragende' +'herkömmlichen_' +'herausf' +'hende_' +'hemm' +'heb' +'heav' +'heated_' +'headlines_' +'header' +'headed_' +'hb' +'hav' +'hauses_' +'hause' +'haus' +'hate_' +'hast' +'harmonise_' +'harm' +'harde' +'harass' +'hanya_' +'handlungen_' +'halb_' +'haft' +'haf' +'habt_' +'günstige' +'gängig' +'gym' +'gura' +'gul' +'guitar' +'guide' +'guidance_' +'guest' +'guardian_' +'gste' +'grüne' +'gründet_' +'größerem_' +'größe' +'grösste' +'großartige_' +'grou' +'grie' +'gravierende' +'grausam' +'gog' +'god' +'gnet' +'gma' +'glücklich_' +'globalis' +'glichen_' +'girls_' +'gion' +'gin_' +'gin' +'gie_' +'giant_' +'geäußerten_' +'gewöhnlich' +'gewi' +'gewec' +'gewe' +'gewalt' +'getau' +'gesti' +'gesteckt_' +'gestaltete' +'geson' +'gesenkt_' +'gesellschaftlichen_' +'geschäft_' +'geschwindigkeit_' +'geschm' +'gerufen_' +'geriet' +'geri' +'gerechtfertigt_' +'gerechte_' +'geprägten_' +'geopolitische' +'geopolitical_' +'genom' +'gener' +'genen_' +'gende' +'gence_' +'gemessen_' +'gemeins' +'geldpolitische' +'geld' +'gehe_' +'geha' +'gegr' +'gegner' +'gegenseitige_' +'gefü' +'gefo' +'gefangen_' +'gef' +'geeignete_' +'geehrt' +'gedenkt_' +'gebiete_' +'geber_' +'gebe' +'gea' +'gather_' +'gastro' +'gasse_' +'gaps_' +'gall' +'gage' +'füllen_' +'förderung_' +'fähigen_' +'fungier' +'fung' +'fundamentally_' +'func' +'ftig' +'fter_' +'fst' +'früh' +'fruit' +'fronts_' +'frontier' +'fristig' +'frisch_' +'freut_' +'freundlichen_' +'frequent' +'fragile_' +'founder_' +'foun' +'fortzu' +'fortune_' +'fortgeschrittene' +'formulierte_' +'formali' +'formale' +'forg' +'foresee' +'forecast_' +'forder' +'forci' +'font' +'folgte' +'fold_' +'fn' +'flü' +'flugzeuge' +'flowing_' +'flower' +'flexibler' +'flexible' +'fled' +'flavo' +'flag' +'fizi' +'fix' +'fisch' +'finger' +'fiction_' +'fica' +'ffl' +'ffene' +'ffekt' +'feststellt' +'feste_' +'feiern_' +'fehlenden_' +'fec' +'featured_' +'feasible_' +'fea' +'favorite_' +'favored_' +'fau' +'fasc' +'farbe' +'fantastische' +'fangen_' +'faire' +'fahrts' +'fahrer' +'fahren' +'facilitating_' +'facilitat' +'fache_' +'fache' +'fabric_' +'fab' +'exzellenten_' +'extensi' +'exquisite_' +'exposure_' +'exploration_' +'exploiting_' +'experiencing_' +'exklusiv' +'existierende' +'exert_' +'exercise' +'exempl' +'exclu' +'exchange' +'excellence_' +'even' +'euro' +'euer' +'ette_' +'eting' +'eth_' +'eternal_' +'etabliert' +'etablieren_' +'esu' +'esto' +'estig' +'esta_' +'esses_' +'eso' +'erwecken_' +'erupt' +'erungen_' +'erta' +'erstens_' +'erstatte' +'erschl' +'errors_' +'errichtet_' +'erreich' +'erpr' +'ernen' +'ermä' +'erma' +'erläutern_' +'erleichter' +'erklär' +'erit' +'eris' +'eries_' +'erholsame' +'ergi' +'erge_' +'ergab' +'erfe' +'ereich_' +'erbe_' +'erbaut_' +'erbar' +'eran' +'equip' +'equat' +'epidemic_' +'epidemi' +'envi' +'entziehen_' +'entworfen_' +'enttäuscht' +'entspr' +'entschied_' +'entscheidung' +'entity_' +'entgegens' +'entgegenge' +'entfernten_' +'entfallen_' +'entail' +'ensured_' +'enorm_' +'enli' +'enh' +'engineer' +'energisch' +'ener' +'endanger' +'enact' +'employee_' +'empfäng' +'empfunden' +'empf' +'emotions_' +'emble' +'ellung_' +'elles_' +'electr' +'ekte' +'ej' +'eitig_' +'eir' +'einzust' +'einzuräumen_' +'einzurichten_' +'einzul' +'einzuf' +'einseitig' +'einräumen_' +'eingerichteten_' +'eingeh' +'einführen_' +'einfü' +'einbezieh' +'einbar' +'einb' +'eilt_' +'eilen_' +'eigne' +'eigenständige' +'eigens' +'eigenem_' +'eid' +'eichn' +'eho' +'ehl' +'eful_' +'efi' +'effizientere' +'effiziente_' +'effizient_' +'effizien' +'effekt_' +'editor' +'ede_' +'ecu' +'eck' +'echo' +'ebung_' +'ebnen_' +'dü' +'durchsetzen_' +'durchschnittliche_' +'durchschnittlich_' +'durchgesetzt_' +'duration_' +'duk' +'dst' +'dry' +'drucken' +'dron' +'drinking_' +'drift' +'dreh' +'drastisch_' +'drastic_' +'drasti' +'drag' +'dou' +'dorf_' +'dop' +'dog_' +'document' +'divisions_' +'dividing_' +'divide_' +'divi' +'diversen_' +'div' +'dition' +'distress_' +'distort' +'dist' +'dispose' +'dismissed_' +'disg' +'discre' +'discourse_' +'discourage' +'dische' +'disappointed_' +'disabilit' +'diper' +'dioxide_' +'dik' +'dih' +'digung' +'digkeit' +'diesbezügliche_' +'dib' +'diak' +'dg' +'devote_' +'devi' +'deutlichen_' +'detriment_' +'deten' +'det_' +'destru' +'despair_' +'designation_' +'desde_' +'derived_' +'dere' +'derartiger_' +'depressed_' +'deposits_' +'deploy' +'dense' +'denounce' +'demonstrieren_' +'demokratisch_' +'demograph' +'democrat' +'dementsprechend_' +'dell_' +'delightful_' +'delete_' +'degrees_' +'ded' +'decor' +'declines_' +'debe' +'debated_' +'dde' +'dba' +'dauerhafte' +'dare' +'dad' +'dacht' +'cycles_' +'curtail' +'cultivat' +'culminat' +'cue' +'ctur' +'ction' +'crush' +'crude_' +'critici' +'cript' +'crash_' +'cras' +'craft_' +'craft' +'cr' +'cozy_' +'couple' +'coup_' +'cotton_' +'cosmetic' +'correspond_' +'corps' +'copie' +'convince_' +'convicted_' +'convict' +'conversation_' +'controversial_' +'contagion_' +'contacts_' +'consult_' +'constitutes_' +'constituenc' +'constitu' +'consist' +'conquer_' +'connecting_' +'coni' +'confo' +'confined_' +'configure_' +'confe' +'conf' +'conciliation_' +'concentrated_' +'compre' +'compr' +'compliment_' +'complaint_' +'complain_' +'comparative_' +'common' +'commodities_' +'commission' +'commercial' +'comer' +'come' +'colonial_' +'collectively_' +'collections_' +'cola' +'cock' +'coat' +'coastal_' +'clu' +'closure_' +'clinical_' +'clin' +'cli' +'ckig' +'cket_' +'cke' +'cis' +'cinema' +'chtigen_' +'chsel' +'child' +'checke' +'chas' +'charisma' +'charakter_' +'characteristics_' +'characteristic_' +'change' +'chair' +'cere' +'censorship_' +'ced' +'cce' +'cca' +'cave' +'cautious_' +'cau' +'casual' +'casino_' +'carries_' +'capture_' +'captur' +'capability_' +'cap_' +'cant' +'cans_' +'cana' +'came' +'calculat' +'cafe' +'caci' +'bürokratische' +'bösartige' +'byl' +'but' +'burning_' +'burn' +'bureaucracy_' +'bum' +'bullet' +'builds_' +'buffer' +'brü' +'browsing_' +'brothers_' +'brake' +'boxe' +'bottle' +'borrowers_' +'borne_' +'bora' +'bookings_' +'bombard' +'boli' +'boil' +'bn' +'bloße_' +'bloß' +'blocking_' +'blockiert_' +'bloc' +'bliche' +'blase_' +'blam' +'birds_' +'billig_' +'bilität' +'bid_' +'bias' +'bia' +'bezi' +'bezeichnen_' +'bewert' +'beverages_' +'beunruhig' +'bett_' +'bett' +'betrü' +'beträchtliche' +'betrieb' +'betreiber' +'bete' +'bestrafen_' +'bestehender_' +'bestanden_' +'besitze' +'beside' +'besetzte' +'beset' +'beschw' +'beschreiben_' +'beschrei' +'berühmte_' +'beru' +'bereitstellen_' +'bereitet_' +'benötigten_' +'benötigte_' +'benen' +'benchmark' +'benachteiligt' +'benachbarten_' +'bemerken_' +'belong' +'beliefs_' +'beke' +'bekannteste' +'beit' +'beides_' +'behinder' +'begünstig' +'begriffen_' +'begrenzten_' +'begleitet_' +'begeben_' +'bege' +'begangen_' +'beförder' +'befriedigen_' +'befri' +'befrei' +'befa' +'bedeutete_' +'bedding_' +'bedauerlich_' +'bearbeiten_' +'bb' +'bat_' +'bases_' +'base' +'barri' +'banner' +'banned_' +'bang_' +'bailout_' +'bai_' +'ays_' +'aya' +'aw_' +'avoiding_' +'aviation_' +'avel' +'außergewöhnlichen_' +'automati' +'author' +'auszuw' +'auszul' +'auszuarbeiten_' +'ausstatt' +'aussi' +'ausser' +'auss' +'ausreichende' +'ausr' +'ausp' +'ausn' +'ausgest' +'ausges' +'ausgehend_' +'ausgedehnt' +'ausfallen_' +'ausbr' +'aum_' +'ault_' +'auli' +'aufzuh' +'aufrufen_' +'aufri' +'aufregende' +'aufgez' +'aufgel' +'aufgegriffen_' +'aufgef' +'auferleg' +'aufbe' +'auer_' +'audit_' +'aucht' +'ature_' +'aturan_' +'attr' +'atta' +'atra' +'atori' +'atm' +'atla' +'ativ' +'ata_' +'asy' +'astu' +'ast_' +'assumption_' +'assum' +'assessing_' +'assess_' +'asserti' +'assembly_' +'assembl' +'asse_' +'aspir' +'asks_' +'asa' +'artige' +'artif' +'arsen' +'arrangement_' +'arranged_' +'arm_' +'arität' +'arist' +'arising_' +'arde' +'archives_' +'arbeitende' +'arabische_' +'aqua' +'apt' +'approv' +'appoint' +'apart' +'anzust' +'anzukurbeln_' +'anzugehen_' +'anzuerkennen_' +'anzi' +'antis' +'antiqu' +'antic' +'anta' +'anstieg' +'anstehende' +'anspruchsvolle' +'ansi' +'anny' +'announcement' +'anni' +'anna' +'ann' +'ankomm' +'anische_' +'angry_' +'angre' +'angestrebte' +'angesprochene' +'anges' +'angene' +'angelegte' +'angekündigte' +'angekündigt_' +'angehör' +'angehen_' +'angegeben_' +'angebot' +'angeblich_' +'anfä' +'anen_' +'ands' +'anat' +'analysier' +'analyses_' +'amin' +'amer' +'ambiance_' +'aman' +'alu' +'altogether_' +'alternat' +'allu' +'alls' +'algo' +'aler_' +'alem' +'akzeptabel_' +'aktivist' +'aktiviert_' +'aktiven_' +'aktiv' +'aktion' +'ais' +'aim' +'ahlung_' +'ahlen_' +'ahe' +'ags_' +'agon' +'aggressive' +'aggre' +'after' +'afraid_' +'afi' +'afford' +'afflict' +'advocating_' +'advocates_' +'adventure_' +'adu' +'ads_' +'admitted_' +'administrat' +'adjust' +'adj' +'add' +'acute_' +'actress_' +'acquisition_' +'acle' +'acke' +'aches_' +'ache_' +'ace_' +'accumulati' +'accounted_' +'accessories_' +'accesse' +'abzust' +'abzule' +'abuses_' +'abstain_' +'abstain' +'absor' +'abschl' +'aboard_' +'ablen' +'ablauf' +'abkommens_' +'abgez' +'abgegeben_' +'abandoning_' +'Zwischen' +'Zwi' +'Zweig' +'Zut' +'Zuschauer' +'Zuf' +'Zucker_' +'Zit' +'Zell' +'Zeitschrift_' +'Zeitr' +'Zeilen' +'Zehn_' +'Zar' +'Yuk' +'Yen_' +'Yemen_' +'Yam' +'Xa' +'XVI' +'XLS' +'Wüsten' +'Wür' +'Wälder_' +'Wy' +'Worf_' +'Word' +'Woods_' +'Wissens_' +'Wissens' +'Wirtschaftswachstums_' +'Winters' +'Winds' +'Will' +'Wiener_' +'Widersprüche_' +'Wide' +'Whenever_' +'Wettbewerbe' +'Wertpapiere_' +'Wertpapier' +'Werkzeug_' +'Werkst' +'Werde_' +'Wenige' +'Weltwirtschafts' +'Wellnessbereich_' +'Weiterentwicklung_' +'Weihnachten_' +'Weich' +'Wed' +'Weber_' +'Wave' +'Watt' +'Wasch' +'Warnung' +'Wandels_' +'Wan' +'Wahrnehmung_' +'Wahlkampf' +'Wag' +'Wachstumss' +'WP' +'Völkerrecht' +'Vs_' +'Vorredner' +'Vorre' +'Vorrang_' +'Vorherrschaft_' +'Voraus_' +'Voraus' +'Volume' +'Vitorino_' +'Visu' +'Visa_' +'Vis' +'Vinc' +'Victoria_' +'Via_' +'Verwirklichung_' +'Vertrieb' +'Vertreter' +'Vertrags_' +'Vertrages_' +'Verteidigungsminister' +'Vermögen_' +'Verletz' +'Verlagerung_' +'Verkehrsnetz' +'Verkehrsa' +'Verkaufs' +'Verhältnis' +'Vereinig' +'Verbraucherschutz' +'Verbrauchern_' +'Verantwortlichkeit' +'Vario' +'VIC' +'VE' +'VD' +'Ura' +'Updates_' +'Unterscheidung_' +'Unters' +'Unternehmer' +'Unterfangen_' +'Unst' +'Universal_' +'Unionsbürger' +'Unfälle' +'Underground_' +'Unde' +'Umst' +'Umsatz_' +'Umbr' +'Ultimate' +'Ul' +'Uf' +'USE_' +'UP' +'UNM' +'Türen_' +'Tät' +'Twe' +'Turni' +'Turm' +'Turi' +'Tunnel' +'Tud' +'Tsi' +'Tschech' +'Truppe' +'Troi' +'Tric' +'Tradi' +'Tr' +'Toyota_' +'Ton_' +'Tomo' +'Tom_' +'Toleranz_' +'Tode_' +'Tod' +'Thor' +'Thom' +'Thirdly_' +'Thinking_' +'Theor' +'Theatre_' +'Thal' +'Th' +'Terrace_' +'Terra' +'Tenn' +'Tendenz_' +'Ten_' +'Temp' +'Tell' +'Tehran_' +'Technologie' +'Tay' +'Tausend' +'Tatsachen_' +'Task_' +'Take' +'Table' +'Tabak' +'TP' +'TOS_' +'TION' +'Süde' +'Südamerika' +'Säule' +'Sydney_' +'Superma' +'Sum' +'Sud' +'Subve' +'Substanz' +'Subsidiarität_' +'Stück_' +'Stuttgart_' +'Stufe_' +'Studierende' +'Student' +'Stress_' +'Stock_' +'Sto' +'Stil' +'Stig' +'Stift' +'Sti' +'Steuererhöhungen_' +'Stereo' +'Steigen' +'Stay' +'Statut' +'Statistiken_' +'Station' +'Starts' +'Standort' +'Stamm' +'Stal' +'Stabilitäts' +'Staatsb' +'Staatsa' +'Sri_' +'Sponsor' +'Spenden' +'Spekulation' +'Speed_' +'Spaziergang_' +'Sozials' +'Sozialpartner' +'Souvenir' +'Sonic_' +'Songs' +'Somit_' +'Solutions_' +'Sobald_' +'Slowakei_' +'Slideshows_' +'Sk' +'Sina' +'Simpl' +'Silver_' +'Silv' +'Sil' +'Siedlungen_' +'Sichtweise_' +'Sich' +'Shopping_' +'Sharon_' +'Sex_' +'Seuche' +'Session_' +'Serikat_' +'Seri' +'Sensor' +'Selbstver' +'Selbstbe' +'Sekunden_' +'Sekt' +'Seitens' +'Segel' +'Seg' +'Schü' +'Schönheit_' +'Schä' +'Schwerpunkt' +'Schulb' +'Schra' +'Schmidt_' +'Schlacht_' +'Schiffe_' +'Schichten_' +'Schengen_' +'Schauspieler_' +'Scandinavia' +'Save_' +'Sav' +'Sat_' +'Sanierung_' +'Samu' +'Samstag_' +'Same' +'Saharan_' +'Sah' +'Sag' +'Safe_' +'Sac' +'Sab' +'Saatgut_' +'SOL' +'SC_' +'Rückzug_' +'Rücken_' +'Roth' +'Rollen' +'Ring' +'Rig' +'Ries' +'Richtig' +'Rice_' +'Ria' +'Review_' +'Reu' +'Result' +'Ressourcen' +'Residenz_' +'Residence_' +'Reparatur' +'Rennen_' +'Renditen_' +'Rek' +'Reit' +'Reinh' +'Reihenfolge_' +'Reife' +'Reichtum_' +'Reichs' +'Reich' +'Regulierungsbehörden_' +'Regen' +'Reformp' +'Refle' +'Referen' +'Redebeitr' +'Recovery_' +'Rechtsvorschrift_' +'Rechtsgrundlage_' +'Rechnungshof' +'Rechner_' +'Rechn' +'Recently_' +'Read_' +'Read' +'Raumfahrt' +'Rauch' +'Ras' +'Rang' +'Radisson_' +'RS' +'RP' +'REACH_' +'RC' +'Quin' +'Quart' +'Qi' +'Pä' +'Pyr' +'Putsch' +'Ps' +'Präsidentschafts' +'Provid' +'Protokolls_' +'Prost' +'Promi' +'Produktp' +'Produktivitäts' +'Prinz' +'Print' +'Primär' +'Prima' +'Price_' +'Pres' +'Prag_' +'Posten_' +'Portfolio' +'Populis' +'Polizist' +'Polizeia' +'Poettering_' +'Poe' +'Plugin' +'PlayStation_' +'Plattform_' +'Pir' +'Pipe' +'Philippines_' +'Phil' +'Pfe' +'Persönlichkeiten_' +'Persian_' +'Pec' +'Pazifik' +'Passag' +'Partition' +'Part_' +'Part' +'Parlamente_' +'Parking_' +'Palästinensern_' +'Paketen_' +'Paa' +'PNR_' +'PCs_' +'PA_' +'PAR' +'Otto' +'Osteuropa_' +'Ostasien_' +'Oscar_' +'Ort' +'Oro' +'Orange_' +'Oppositions' +'Operationen_' +'Olympischen_' +'Olympi' +'Office' +'Ocean_' +'Obs' +'Obl' +'Oberflächen' +'OSZE_' +'OM' +'Nähr' +'Nous_' +'Nixon_' +'Nicaragua_' +'Nic' +'Neus' +'Netz' +'Netanyahu_' +'Nes' +'Nenn' +'Navigation_' +'Nau' +'Natural_' +'Nationalen_' +'Namens' +'Nahrung_' +'Nag' +'Nad' +'Nachweis_' +'Nachk' +'NU' +'NPT_' +'NN_' +'NL_' +'NL' +'NET_' +'NAFTA_' +'Mühl' +'Mächte' +'Must' +'Motors' +'Motor_' +'Moro' +'Morgan_' +'Morg' +'Monti_' +'Mont_' +'Mone' +'Monday_' +'Moderni' +'Mittelmeerraum_' +'Mittela' +'Mitgliedsländern_' +'Mis' +'Minister' +'Mind' +'Migu' +'Mexican_' +'Meth' +'Mercosur_' +'Menschenrechten_' +'Meldung' +'Mehrheit' +'Meeting_' +'Medikament' +'Mayo' +'Maximum_' +'Materi' +'Masse_' +'Maschine_' +'Marktk' +'Marken_' +'Marine' +'Marin' +'Mandrake' +'Mandela_' +'Mandel' +'Mand' +'Manchester_' +'Main' +'Maes' +'MU' +'MT' +'MIT_' +'MIL' +'Lyon_' +'Lun' +'Luftverkehr_' +'Los' +'Londoner_' +'Liv' +'Little_' +'Lithuania_' +'Liquiditäts' +'Linz_' +'Linken_' +'Line' +'Limit_' +'Lig' +'Licht' +'Libert' +'Liberia_' +'Liberal_' +'Level_' +'Lev' +'Les' +'Leiter_' +'Leib' +'Legislat' +'Legi' +'Lebensmittelsicherheit_' +'Lebensmitteln_' +'Leb' +'Lay' +'Lauf_' +'Large_' +'Lanzarote_' +'Lane_' +'Landschaft_' +'Lad' +'Labora' +'Labor' +'LL_' +'LCD_' +'LAN_' +'Kurs' +'Kura' +'Kuch' +'Kreis_' +'Kredit_' +'Kosm' +'Kopf' +'Kooperations' +'Konzert' +'Konzentration_' +'Kontakte_' +'Konsultation_' +'Konsolidierung' +'Konse' +'Kongress' +'Konflikt' +'Konfiguration' +'Kompl' +'Kommen' +'Kommando_' +'Kommando' +'Komit' +'Kohä' +'Kofi_' +'Koch' +'Kob' +'Knopf' +'Klä' +'Klick' +'Klarheit_' +'Klang' +'Kirchen' +'Kirch' +'Ki_' +'Khamenei_' +'Kernkraft' +'Kennedy_' +'Kay' +'Kasse' +'Kartell' +'Karibik_' +'Kapitel_' +'Kapitalm' +'Kapazität_' +'Kanten_' +'Kampa' +'Kaiser_' +'KT_' +'KT' +'KP' +'KOM_' +'Jungen_' +'Jugoslawien_' +'Jugendliche_' +'Jugend_' +'Journal_' +'José_' +'Joint_' +'Joe' +'Jia' +'Jeff' +'Jedoch_' +'Jede' +'Jardin_' +'Jane' +'Jahrzehnts_' +'Jahr' +'Isle' +'Islamist' +'Investmentbank' +'Investitionsbank_' +'Interpret' +'Internetseite' +'Interinstitution' +'Integrationsp' +'Instrument' +'Instituts_' +'Institution' +'Insi' +'Innovat' +'Innenhof_' +'Innen_' +'Ingenieure_' +'Informationsschalter_' +'Infolgedessen_' +'Indians_' +'Index' +'Independen' +'Impulse_' +'Importe' +'Immigration_' +'Ig' +'Ideal_' +'Ib' +'IST' +'ION' +'IDE' +'ICC_' +'Händler_' +'Hyde_' +'Hung' +'Hotelsafe_' +'Horn' +'Horde_' +'Hochschule' +'Hochschul' +'History_' +'Hintergr' +'Him' +'Heu' +'Herkunft_' +'Herausgeber_' +'Heim' +'Heat' +'Haushaltsp' +'Haushaltsl' +'Hat' +'Harbor_' +'Handt' +'Handlungen_' +'Handelspartner_' +'Handelsb' +'Handb' +'Halle_' +'Halle' +'Halbinsel_' +'Haiti_' +'Hag' +'Hafen' +'Hab' +'HN' +'HE' +'Gy' +'Gul_' +'Grünen_' +'Gründer_' +'Grundst' +'Grill' +'Graz' +'Gras' +'Gran' +'Grafik' +'Governor_' +'Gouverneur' +'Go_' +'Gleichw' +'Gleichheit_' +'Gleiche' +'Gift_' +'Gewissen_' +'Gett' +'Get' +'Gesundheitsp' +'Gesprächen_' +'Gespräch' +'Gesetzgeb' +'Geschäftsle' +'Geschäftsb' +'Geschäfts_' +'Gerhard_' +'Gent' +'Geni' +'Geneva_' +'Genehmigung_' +'Gene_' +'Gemüse' +'Gemeinschaftsm' +'Gemeinde_' +'Geltung' +'Geis' +'Geheimdienst' +'Gegenzug_' +'Gefühle_' +'Gefä' +'Gefangenen_' +'Geduld_' +'Gebäuden_' +'Gebäude' +'Geburt_' +'Gazastreifen_' +'Garten' +'Garni' +'Gare_' +'GNOME_' +'GM_' +'GEN' +'GC' +'GAP_' +'G8_' +'Führungskräfte' +'Födera' +'Future_' +'Futtermittel' +'Fusion' +'Furcht_' +'Funktion' +'Fuku' +'Fuerte' +'Frühling_' +'Frühjahr_' +'Friedrich' +'Friedman_' +'Friedensnobelpreis' +'Fremden' +'Freie_' +'Fran' +'Frageb' +'Fracht' +'Former_' +'Forge' +'Foot' +'Fon' +'Following_' +'Flüchtlingen_' +'Flächen_' +'Fläche' +'Flugzeug_' +'Fluggäste_' +'Flugg' +'Flotte' +'Florenz_' +'Fli' +'Fisher' +'Fine_' +'Finanzr' +'Finanzinstitute_' +'Finanzielle_' +'Finanz_' +'Files_' +'Fift' +'Few_' +'Fests' +'Festplatten_' +'Festland' +'Ferien_' +'Fels' +'Felder' +'Feind' +'Fei' +'Fea' +'Fav' +'Fasc' +'Fantas' +'Fall' +'Fahrzeug_' +'Fachw' +'FU' +'FPGA_' +'FP' +'FOR_' +'FF_' +'Extremisten_' +'External_' +'Ex_' +'Evo' +'Ev' +'Euros' +'Euch_' +'Eti' +'Etage_' +'Erwerb_' +'Erw' +'Ersten_' +'Ersatz_' +'Ero' +'Erneuerung_' +'Eri' +'Erforder' +'Erdoğan_' +'Erdbeben' +'Erbe' +'Entwicklungsb' +'Entschädigung' +'Entschuldigung' +'Entschließungen_' +'Enhance' +'Engl' +'Energieb' +'Energiea' +'Endes_' +'Employment_' +'Empfang_' +'Electric' +'Eisb' +'Eis' +'Einzig' +'Einver' +'Eintritt' +'Einsparungen_' +'Einschränkungen_' +'Eins' +'Einkaufs' +'Einheiten_' +'Eingreif' +'Einfa' +'Eigentumsrechte' +'Eigenkapital_' +'Eiffel_' +'Eif' +'Ef' +'Economi' +'Ecke_' +'Ec' +'Dänemark_' +'Dut' +'Durban_' +'Drug' +'Drohungen_' +'Dritt' +'Dringlichkeit_' +'Dresden_' +'Drama' +'Download' +'Double_' +'Doll' +'Dokument' +'Document' +'Division_' +'Dist' +'Diskurs' +'Disco' +'Direktinvestitionen_' +'Diplom' +'Dies' +'Dialog' +'Dha' +'Devisen' +'Denkens_' +'Deng' +'Demokratischen_' +'Deli' +'Delhi_' +'Deg' +'Defizite_' +'Decision_' +'Datum_' +'Datenschutz' +'Dat' +'Danke_' +'Dan_' +'Dali' +'DT' +'DNA_' +'Cz' +'Currently_' +'Curren' +'Cubase_' +'Cs_' +'Crown' +'Cross_' +'Crisis_' +'Criminal_' +'Cove' +'Cost' +'Corporate_' +'Corn' +'Cori' +'Copyright_' +'Convenient' +'Contra' +'Continu' +'Connect' +'Competiti' +'Columbia_' +'Color_' +'Colla' +'Cocktail' +'Client' +'Clear' +'Claudi' +'Clar' +'Civi' +'Choose_' +'Chemie' +'Chef' +'Check' +'Charakter' +'Channel_' +'Chame' +'Certain' +'Catholic_' +'Cathedral_' +'Castel' +'Cash_' +'Case_' +'Casa_' +'Casa' +'Carolyn_' +'Carne' +'Cara' +'Capital_' +'Cance' +'Cala' +'Cafés_' +'CS_' +'CSS_' +'COM' +'COD_' +'CNS_' +'CN' +'CHI' +'CAS' +'Burk' +'Bundesregierung_' +'Bui' +'Buche' +'Brutto' +'Brun' +'Bruch' +'Brothers_' +'Brot' +'Broadway_' +'Bring' +'Brid' +'Brea' +'Brazilian_' +'Bou' +'Boris_' +'Bombe' +'Bolivien_' +'Blume' +'Blizzard_' +'Blitz' +'Bisc' +'Bir' +'Biokraftstoffe' +'Bildungss' +'Bib' +'Bh' +'Bezug' +'Bezirk' +'Bevölkerungen_' +'Betriebe' +'Betre' +'Betrag_' +'Bestrebungen_' +'Beste_' +'Besonders_' +'Beseitigung_' +'Beschränkungen_' +'Bergen_' +'Berechtigung' +'Berater_' +'Berat' +'Benzin' +'Benutzer' +'Benutz' +'Bemühen_' +'Belle' +'Bell' +'Beleidigung' +'Beitrittsverhandlungen_' +'Behinderte' +'Behauptung_' +'Begriffe_' +'Begriff' +'Begleiter' +'Begin' +'Befehl' +'Bedauerlicherweise_' +'Bed_' +'Beam' +'Bavaria_' +'Baust' +'Battle' +'Basi' +'Bashir_' +'Bart' +'Barrier' +'Barnier_' +'Barcode_' +'Barcelon' +'Barc' +'Barbara_' +'Banglades' +'Bang' +'Ban_' +'Balkan' +'Baker' +'Bahnh' +'BT' +'BES' +'BA_' +'Außenministeri' +'Autorit' +'Autonom' +'Ausw' +'Ausse' +'Ausmaße' +'Auslös' +'August' +'Augenblick_' +'Auftritt' +'Auftrags' +'Aufsichtsrat' +'Aufsichts' +'Aufschwung_' +'Aufruf' +'Aufpreis_' +'Auflösung_' +'Audio' +'Ath' +'Astrium_' +'Asp' +'Argumentation_' +'Arbeitsweise_' +'Arbeitsk' +'Arbeitsbedingungen_' +'Arbeits_' +'App' +'Anwe' +'Antonio_' +'Anton' +'Anth' +'Anreisedatum_' +'Ano' +'Anhang_' +'Angriffs' +'Angestellte' +'Angehörige' +'Ang' +'Andrew_' +'Andreas' +'Anb' +'Alten' +'Alt_' +'Alpha_' +'Alltag_' +'Allgemein' +'Allen_' +'Alco' +'Alan_' +'Akteur_' +'Akku' +'Aix_' +'Ahn' +'Ahmadinejad_' +'Agr' +'Again_' +'Afrikan' +'Affi' +'Aff' +'Adv' +'Admiral_' +'Adi' +'Add_' +'Add' +'Activ' +'Achse_' +'Academy_' +'Abstand_' +'Abend' +'Abbildung' +'ATT' +'ASPs_' +'API' +'AN_' +'AKP_' +'A4' +'=' +'94' +'91_' +'89' +'86' +'77' +'75' +'74_' +'73' +'61' +'58' +'55' +'500' +'48' +'400' +'34' +'225' +'20th_' +'1976_' +'1970s_' +'197' +'1962_' +'1933_' +'1929_' +'1914_' +'18' +'169' +'168' +'163' +'154' +'152' +'111_' +'108' +'0s_' +'05' +'006' +'.. _' +'.-_' +', [_' +'*' +'), "_' +'))_' +') , _' +'''' ' +'&#_' +'"-_' +'")' +' :' +' // _' +' ...' +' ,' +' *' +' (* _' +' ''[[_' +' #' +' _' +'„' +'“ ' +'— _' +'ь_' +'ше' +'ци' +'х' +'ф' +'тт' +'сти' +'от' +'он' +'ол' +'о_' +'за' +'же' +'ді' +'ды' +'ан' +'С' +'И' +'τ' +'ša' +'Š' +'ław_' +'če' +'ć_' +'ütz' +'üt' +'üstung' +'üste_' +'üste' +'ürzung' +'ürzt' +'ünst' +'ünder' +'ünde' +'üllen_' +'ührt_' +'ühre' +'ügliche' +'üchte_' +'ücht' +'übl' +'überwunden_' +'überwiegen' +'übertrieben_' +'übersteig' +'überraschend_' +'übern' +'übermäßig_' +'übermittelt_' +'überlegt_' +'übergreifende' +'überga' +'überein_' +'üben_' +'ún_' +'úl' +'ösung_' +'östlich' +'österreichische' +'öpfen_' +'öne' +'öhne' +'öhn' +'ögen' +'öfe_' +'öder' +'ño_' +'ère_' +'çais' +'äuter' +'äumt_' +'ätzlich' +'äte_' +'äte' +'ässer' +'äse' +'ärts' +'ärs' +'ärme' +'äni' +'ängen' +'ändigkeit_' +'ändig_' +'änderungen_' +'änderung_' +'äme' +'älteste' +'ältere_' +'äle' +'ähnlicher_' +'ähnel' +'ähigkeit_' +'ägyptische_' +'äglich' +'ägige' +'ât' +'ßte' +'ßnahmen' +'Überw' +'Übersetzungen_' +'Überschw' +'Überraschung' +'Überprüf' +'Überlegen' +'Überein' +'Üb' +'Ölpreise_' +'Ökosystem' +'Ökonom' +'Öffentliche_' +'Öff' +'Ängste_' +'Än' +'Ähnlich' +'©' +'    ' +'}}) {{_' +'}}' +'}' +'zögern_' +'zzo_' +'zze' +'zy_' +'zwischenstaatlichen_' +'zwecke' +'zwangsläufig_' +'zuwei' +'zuversichtlich_' +'zuverlässig' +'zuteil_' +'zut' +'zuständige_' +'zuste' +'zusammensetz' +'zusammengebr' +'zurückzukehren_' +'zurückl' +'zurückgreifen_' +'zurückgeh' +'zurückgeg' +'zur' +'zuläss' +'zuk' +'zugestimmt_' +'zugefü' +'zt' +'zqu' +'zoom_' +'zna' +'zitier' +'zini' +'zin_' +'zig' +'zielle' +'ziell_' +'ziehungen_' +'ziehung' +'zeugen_' +'zeuge' +'zentrums_' +'zellen' +'zeitwei' +'zeitung' +'zeichnet_' +'zehnte' +'zed' +'zahlung_' +'zahler' +'zad' +'yw' +'yto' +'ypto' +'yl' +'ybo' +'xw' +'xt' +'xit' +'xin' +'xenophob' +'xe_' +'xamp' +'würdigkeit_' +'wünschte' +'wöhnlich' +'wäsche_' +'wärtig' +'wunderschöne_' +'wound' +'worsen_' +'wors' +'workforce_' +'wora' +'woody_' +'wollend' +'woll' +'wohlhabende' +'wle' +'wl' +'wishing_' +'wirtschafts' +'wirkungs' +'wirksamere' +'wirksamen_' +'wirksam' +'wing' +'willi' +'wil' +'wig' +'wiederherzustellen_' +'widerst' +'widening_' +'wick_' +'whit' +'westliche' +'weste' +'west' +'wertvoll' +'werb' +'wendet' +'wem' +'weiße' +'weitreichende' +'weitergehen_' +'weig' +'weifel' +'wehr' +'week' +'wedding' +'weakness_' +'weaken_' +'wea' +'wast' +'washing_' +'wary_' +'warnings_' +'wandel_' +'walke' +'wald_' +'wahrnehmen_' +'waffen' +'wad' +'wab' +'völker' +'vé' +'vä' +'vy_' +'vri' +'vou' +'vorübergehend_' +'vorzubereiten_' +'vorstellung' +'vorste' +'vorsitzende' +'vorschriften_' +'vorliegen_' +'vorkommen_' +'vork' +'vorige' +'vorhers' +'vorher' +'vord' +'vorbereitet' +'vorbereiten_' +'vorbei' +'vorange' +'vons' +'volu' +'voltage_' +'vollem_' +'volcan' +'vocational_' +'voc' +'visuali' +'visor' +'vision' +'visib' +'vine' +'vigor' +'vielerlei_' +'vici' +'vibrant_' +'veränderten_' +'verzögert' +'verzweifelt' +'verzichte' +'verweist_' +'verweigern_' +'verwalten_' +'veru' +'vertreter_' +'vertreib' +'vertraue' +'vertrags_' +'vertraglich' +'verteilung_' +'verstärkten_' +'verständ' +'verstoßen_' +'verstorben' +'versprochen_' +'versprechen' +'verspr' +'versicherung' +'verschärfen_' +'verschwunden_' +'verschwende' +'verschuld' +'verscho' +'verschmutz' +'verschi' +'verringerte' +'vernünftigen_' +'vernünftige_' +'vernünftig_' +'vermeid' +'vermehrt' +'verma' +'verm' +'verleite' +'verleg' +'verlaufen_' +'verla' +'verkörpert_' +'verkn' +'verhandeln_' +'vergrößern_' +'vergr' +'vergleichen_' +'vergeb' +'verfolgten_' +'verfasst' +'verfallen_' +'vereinen_' +'vereinbarung' +'vereinbart_' +'vereinbar' +'verei' +'verehrte' +'verda' +'verbot_' +'verbleibenden_' +'verbindlichen_' +'verarbeitet' +'verarbeiten' +'verantwortungs' +'veranstaltungen_' +'veranlassen_' +'veraltet' +'verabschiedeten_' +'venue' +'ventu' +'vehement' +'vea' +'variables_' +'valu' +'validity_' +'valid' +'vai' +'vague_' +'vage' +'vaccine' +'vaca' +'vac' +'uzz' +'uum' +'utze' +'utung' +'utuh' +'utu' +'utter_' +'utter' +'utt' +'utilis' +'utig' +'utan' +'ustan' +'ussion' +'usschuss_' +'uso' +'usly_' +'usher' +'ush' +'usan' +'urteile' +'urte' +'urt_' +'urso' +'urlaub_' +'uris' +'urie' +'ured_' +'uras_' +'uptc' +'upper_' +'uppe' +'upl' +'uph' +'unüber' +'unwind_' +'unvorher' +'unverzichtbar_' +'unvers' +'unvergessliche' +'unterziehen_' +'unterschiedlicher_' +'unterl' +'unterhält_' +'untere' +'unterbreite' +'unsicher' +'unsi' +'unse' +'unrec' +'unrealisti' +'unpopul' +'unp' +'unnötige' +'unke' +'units_' +'unit' +'unilateral_' +'uniform_' +'unification_' +'ungswe' +'ungsv' +'ungsl' +'ungsgr' +'ungsan' +'ungewöhnliche' +'ungene_' +'ungeb' +'unga' +'unfähig_' +'unfo' +'unerlässlich_' +'undin' +'undertakings_' +'undertake_' +'underpinn' +'undermined_' +'underline_' +'unda' +'unconventional_' +'uncom' +'unchanged_' +'unberührt' +'unanimity_' +'unal' +'unabhängiges_' +'umweltfreundlich' +'umsetzen_' +'umg' +'umfeld' +'umfassender_' +'umfassend_' +'umfangreicher' +'umfang_' +'umd' +'umben' +'ultur' +'ultimative' +'uls' +'ule_' +'uldung' +'ular' +'uing_' +'ugh_' +'ufhin_' +'ufer' +'ued_' +'uder' +'uda' +'uci' +'uation_' +'tü' +'töt' +'tödliche_' +'täusch' +'tändig' +'tzte_' +'typen_' +'two' +'twist' +'twelve_' +'twar' +'turn' +'turbulen' +'tums' +'tumor' +'tue' +'tuc' +'tual_' +'ttung' +'tto' +'ttler_' +'ttet_' +'tsu' +'träglich' +'trust' +'trump' +'trugen_' +'tropical_' +'triple' +'tries_' +'trial' +'trend' +'treibende' +'treib' +'trei' +'tre_' +'travelling_' +'traveller_' +'trav' +'traurige' +'trategie_' +'transported_' +'transporte' +'transport' +'transparenter' +'transpar' +'transnationale' +'translations_' +'translat' +'transform' +'transferring_' +'trans_' +'train' +'tragi' +'trafe' +'traditione' +'traders_' +'trademark_' +'traded_' +'tracking_' +'toute' +'tout_' +'tourists_' +'touristi' +'toughe' +'touching_' +'touched_' +'totalitäre' +'torture_' +'toren_' +'tooth' +'toleran' +'toc' +'tne' +'tle_' +'tland' +'tk' +'tious' +'tionier' +'tine_' +'timetable_' +'tigt_' +'tigkeit' +'tiert' +'tiefe' +'tiat' +'thrill' +'thre' +'thr' +'therm' +'therapeuti' +'theorie_' +'theme' +'theit_' +'theatre_' +'theatr' +'thal' +'texte' +'teure' +'teu' +'tete' +'tet' +'testi' +'ters' +'terrasse_' +'terp' +'tenure_' +'tentang_' +'tenen_' +'tender' +'tendenziell_' +'temptation_' +'temples_' +'tem_' +'tellt' +'tellen_' +'telefoni' +'tein' +'teilnehmenden_' +'teilgenommen_' +'teigerung' +'teg' +'teen' +'tee' +'technologische_' +'technike' +'tched_' +'tausche' +'tariff_' +'tante' +'tanta' +'tanding' +'talked_' +'talis' +'talent' +'tad' +'tackl' +'sämtliche' +'sze' +'systemische' +'synt' +'synchronisier' +'switching_' +'swer' +'swell' +'sw' +'svor' +'survive' +'surro' +'surrender' +'supranational_' +'supposedly_' +'supervisor' +'superf' +'sung' +'summari' +'suits_' +'sue' +'sudden_' +'sud' +'successive_' +'subversi' +'subscribe' +'submi' +'sua' +'stützt_' +'stür' +'stät' +'ständige_' +'ständige' +'styles_' +'studierte_' +'stud' +'strukturellen_' +'structured_' +'strom_' +'stroll_' +'stricte' +'stress' +'strengen_' +'strafe_' +'stoßen_' +'stom' +'stol' +'stoffen_' +'stock' +'stl' +'stitch' +'stirbt_' +'stilvolle' +'still' +'steuerung_' +'steuerlichen_' +'stetig' +'sters_' +'steril' +'stenz' +'stehe' +'statu' +'stattgefunden_' +'stattfindenden_' +'statistische' +'statistical_' +'stationen_' +'stat' +'start' +'starker_' +'stando' +'standardiz' +'stan' +'staltung_' +'stagnieren' +'stagnat' +'stagn' +'stabiler_' +'stabil_' +'staatlich' +'ssysteme' +'ssungs' +'sstsein' +'ssing_' +'ssige' +'ssan' +'ssa_' +'sreg' +'sre' +'sq' +'späteren_' +'spyware_' +'spur_' +'spur' +'sprü' +'sprache_' +'spr' +'sporting_' +'spontane' +'sponsor' +'spiral_' +'spin' +'sphäre' +'spen' +'spektakulär' +'speedi' +'speculat' +'spars' +'sozialistische' +'souveränen_' +'sorte' +'soph' +'sonstige_' +'solving_' +'solide' +'sole' +'sola' +'sogenannte_' +'soft' +'soeben_' +'socialist_' +'soc' +'sob' +'soared_' +'snowb' +'snow_' +'snack' +'smooth' +'sly_' +'slower_' +'slot' +'slos_' +'slippe' +'slides_' +'slic' +'sl' +'sky' +'skill_' +'skill' +'skand' +'ska_' +'sk_' +'siz' +'sive' +'siv' +'sitzung' +'sition' +'sinnlos' +'sinn' +'singles_' +'sine' +'simplifi' +'simple' +'signifikant' +'sightseeing_' +'sighted_' +'sien' +'sid_' +'sicht_' +'sicht' +'sichert_' +'shoulder_' +'shou' +'shortages_' +'short' +'shooting_' +'shoot' +'shocked_' +'shock' +'shim' +'shifted_' +'shi' +'shar' +'sew' +'severely_' +'sever' +'settle' +'seti' +'servant' +'serta_' +'sers_' +'seren' +'serbische' +'sequenc' +'seperate' +'senti' +'sensors_' +'sensor_' +'senk' +'selecting_' +'sele' +'selber_' +'seja' +'seize_' +'seekers_' +'securing_' +'secured_' +'sect' +'secretar' +'seating_' +'seasons_' +'scre' +'scra' +'score' +'schöner_' +'schätzungsweise_' +'schätzt_' +'schädliche' +'schwierige_' +'schwache' +'schw' +'schrä' +'schreckliche' +'schreck' +'schnell' +'schn' +'schmutz' +'scheitern_' +'scheduled_' +'schedule_' +'scharfe' +'schaftung_' +'schaftl' +'schaff' +'sceptic' +'scenes_' +'scenario_' +'scape' +'scann' +'scandal_' +'scan_' +'sber' +'sati' +'samt_' +'samples_' +'sam_' +'salaries_' +'saison' +'sahen_' +'sade' +'saddle' +'sacrific' +'sack' +'sach' +'röme' +'räumen_' +'räum' +'räsident' +'räfte_' +'räch' +'rá' +'rust_' +'runde_' +'rulers_' +'ruk' +'ruin' +'ruhiger_' +'ruhe' +'ructi' +'rtuni' +'rtu' +'rtain' +'rows_' +'row' +'route' +'rot_' +'rosy_' +'ropriate' +'romantische' +'rom_' +'rojekt' +'rohstoff' +'roduc' +'robuste' +'rmen_' +'rlich_' +'rlich' +'rland' +'rke_' +'rivers_' +'rival_' +'rival' +'rity_' +'rism' +'riskante' +'risch' +'ringt' +'rimier' +'ril' +'rike' +'rika' +'rift_' +'ries' +'riert_' +'rieren_' +'rien_' +'rider' +'richtig' +'richtete_' +'richt_' +'riches' +'rial' +'rhythm' +'rfen_' +'rfe' +'revised_' +'review' +'reuni' +'rette' +'retre' +'retired_' +'retire' +'reti' +'restriktive' +'restricti' +'restoring_' +'restaur' +'respon' +'resid' +'resc' +'repu' +'repräsentieren_' +'reproductive_' +'reprodu' +'repri' +'replacement_' +'replace' +'repetiti' +'repeal' +'repar' +'repai' +'renovierte_' +'renn' +'reng' +'renamed_' +'removal_' +'rement' +'remember' +'reme' +'rema' +'relying_' +'reluctant_' +'rejects_' +'rejecting_' +'reise_' +'reis' +'reint' +'reinsta' +'reinforced_' +'reicht' +'reicher_' +'regulieren' +'regulators_' +'regrettabl' +'registrieren_' +'regierung' +'regieren' +'refund' +'refuge_' +'refrain' +'refor' +'redirect' +'redefin' +'recycle' +'recreational_' +'recoveri' +'reconst' +'recognizes_' +'recognis' +'rechtmäßig' +'rechtl' +'rebalancing_' +'reassure_' +'realm_' +'realize_' +'realist' +'realis' +'readers_' +'reactor' +'rdi' +'rde' +'rce' +'rca' +'rben_' +'rbeiten_' +'rba' +'ray_' +'raw' +'rativ' +'rategie_' +'rate' +'rar_' +'ranking' +'rane' +'random_' +'rand_' +'rand' +'rance' +'ralis' +'ral' +'raft' +'radioa' +'radikalen_' +'radikale_' +'rade' +'rada' +'quota' +'quot_' +'quet_' +'quet' +'quem' +'quarters_' +'quantity_' +'qualitativ_' +'qualit' +'qualifizierten_' +'qualifiziert' +'quaint' +'pursu' +'puede' +'publik' +'publici' +'public' +'pub_' +'ptu' +'ptions_' +'psychiatris' +'pse' +'prüfung_' +'präsent' +'provin' +'prototype' +'protectionism_' +'prospe' +'proportiona' +'prompt' +'promote' +'prom' +'prolonged_' +'projection' +'prohibiti' +'programm' +'profitiert_' +'profitable_' +'profitability_' +'professionellen_' +'profess' +'prof' +'produzierende' +'produkti' +'produ' +'problematisch_' +'problematic_' +'privileg' +'prioriti' +'print' +'pries' +'pretend_' +'presu' +'prest' +'pressing_' +'pressed_' +'preside' +'present' +'prerequisite_' +'prer' +'preparatory_' +'preparations_' +'prematurely_' +'preliminary_' +'preiswerte' +'preferences_' +'predict' +'preci' +'prech' +'precarious_' +'praxis' +'praktizieren' +'praktikable' +'prakti' +'praise_' +'pragmatische' +'practic' +'pph' +'pped_' +'ppe_' +'pp_' +'powered_' +'pour' +'postpone' +'posted_' +'possess' +'positiv' +'portugiesische' +'portrait' +'portion_' +'porn' +'populistischen_' +'pok' +'pois' +'poet' +'plötzliche' +'plate_' +'plastic' +'plas' +'planes_' +'plain_' +'plag' +'placem' +'plac' +'pixels_' +'pir' +'pio' +'pilgrim' +'pid' +'pia' +'physicians_' +'philosophi' +'phenomena_' +'pf_' +'pez' +'peu' +'petani_' +'pet' +'pest' +'perver' +'pertan' +'personnes_' +'personality_' +'permissi' +'perlu_' +'perl' +'perish' +'perf' +'perba' +'pera' +'penge' +'penetration_' +'penduduk_' +'pendidikan_' +'pendi' +'penalties_' +'pembe' +'pei' +'peg' +'pec' +'pd' +'pav' +'patents_' +'pate' +'pat' +'paste' +'passt_' +'passion_' +'partic' +'parti' +'pari' +'parameter_' +'pand' +'pal_' +'pair' +'painter' +'paint_' +'pact' +'packen_' +'pac' +'oßen' +'oxida' +'oxi' +'ox' +'ovi' +'overt' +'overlooks_' +'overlooked_' +'overlook_' +'overflow_' +'overco' +'outweigh' +'outlook_' +'outermost_' +'otic_' +'oster' +'orsch' +'ors' +'orische' +'oris' +'orin' +'organize_' +'organisierte' +'orene' +'ordnungsp' +'ordnet_' +'orderly_' +'orde' +'oration_' +'opu' +'optionen_' +'optimiert_' +'oppos' +'opia' +'oph' +'opfer' +'operier' +'operativen_' +'oot_' +'ook_' +'onsk' +'onna' +'omy_' +'omo' +'ologischen_' +'ollst' +'olla' +'olis' +'olic' +'oler_' +'olat' +'oh_' +'offs_' +'offre' +'offizieller_' +'offenbar' +'odds_' +'oda' +'och' +'ocean_' +'oce' +'obsess' +'obs' +'oblem_' +'objecti' +'obersten_' +'oberst' +'oberh' +'obat' +'oad' +'nützliche' +'nötige_' +'nördlich' +'nö' +'nähere' +'nya' +'nw' +'nuts_' +'nut' +'ntwicklung_' +'ntl' +'ntif' +'nta_' +'nswert_' +'nste' +'nsp' +'nschl' +'nsche' +'nre' +'nqu' +'npr' +'nov' +'notable_' +'north' +'norms_' +'norm_' +'nommene' +'nomen' +'nod' +'nnen' +'nme' +'nly_' +'nle' +'nkt' +'nji' +'niños_' +'niv' +'nisch_' +'nießer' +'niert' +'nico' +'nick' +'ngun' +'ngt_' +'ngg' +'nges_' +'ngeb' +'nfalls_' +'newer_' +'nevertheless_' +'neutrali' +'neutral_' +'neuartige' +'netzwerk' +'nes' +'neoliberal' +'nennt_' +'neighborhood_' +'nehmbar_' +'negotiators_' +'near' +'ndt' +'ndre' +'ndlung' +'ndere' +'ncier' +'nchi' +'navigati' +'navigate_' +'nav' +'nationals_' +'nationalists_' +'nata' +'nast' +'nas_' +'narrative' +'nament' +'nam' +'nachweis' +'nachteilig' +'nachkommen_' +'nachgewiesen' +'nachdenken_' +'nable_' +'mündliche_' +'mäßigkeit_' +'mäler' +'mysti' +'mutual' +'musik' +'musical_' +'multipl' +'multilaterale' +'multic' +'mption_' +'mpfe' +'moti' +'monst' +'monet' +'momentum_' +'module_' +'modu' +'modernem_' +'modell' +'mobiliz' +'mmungen_' +'mml' +'mmer' +'mmen_' +'mmel' +'mliche' +'mle' +'mittelfristig' +'mitigat' +'mitget' +'mission' +'missed_' +'misg' +'mische' +'mins' +'ministry_' +'minimize_' +'mines_' +'miner' +'mik' +'mig' +'mia_' +'mia' +'mg' +'mers_' +'mern_' +'merkwürdig' +'mercury_' +'merchan' +'merat' +'mera' +'meny' +'menti' +'menschlicher_' +'mengha' +'meng' +'memu' +'memo' +'memi' +'meldungen_' +'meister_' +'meist' +'meint' +'mehrmals_' +'meets_' +'medication' +'mechanisch' +'mean' +'mbl' +'maß_' +'matik' +'materiellen_' +'mate_' +'masih_' +'masalah_' +'marriage_' +'marri' +'marktor' +'markieren_' +'marke' +'marit' +'marina_' +'mare' +'manufactured_' +'manual_' +'mankind_' +'manipulati' +'manipulated_' +'manifest_' +'manden_' +'mammals_' +'mainst' +'magneti' +'machine' +'löschen_' +'läufe' +'längerfristige' +'längere_' +'lz_' +'lys' +'luxu' +'lun' +'luggage_' +'luft' +'ludi' +'lud' +'ltig' +'lth' +'lte' +'lovers_' +'lof' +'locken' +'lock' +'locally_' +'loca' +'loads_' +'llung' +'llin' +'llige' +'llia' +'lizi' +'litera' +'lisiert_' +'lion_' +'liner_' +'lineare' +'linder' +'lige' +'lifestyle_' +'lif' +'lieferte' +'lieben_' +'lichste_' +'licence_' +'libysche' +'liberty_' +'liaison_' +'lh' +'lg' +'letzte' +'letz' +'letters_' +'lessly_' +'lernt' +'lerat' +'lep' +'leistungsfähig' +'leistet_' +'leiden' +'leichte_' +'lehnt' +'legitim' +'legislator' +'lebend' +'lear' +'league_' +'lder_' +'lde_' +'laying_' +'laya' +'laus' +'launch' +'lauf' +'lation_' +'las' +'landm' +'landing_' +'lain' +'laime' +'labeled_' +'label' +'künft' +'köstliche' +'könig' +'ky' +'kwa' +'kungen_' +'kultur' +'ktr' +'ktivitäten_' +'ktionen_' +'kter' +'kst' +'ksch' +'kreativ' +'krati' +'krat' +'krank' +'kra' +'kostspielige' +'kostenloses_' +'kostenlose' +'kostengünstig' +'kostenfreie' +'korrekte_' +'koordinierte' +'koordinieren_' +'konzer' +'konvertier' +'konvention' +'konte' +'konstruktiv' +'konsequente_' +'konsequent_' +'kons' +'kono' +'konferenz_' +'komplizierter_' +'kompatib' +'kombinier' +'kollektive' +'kohlenstoffarme' +'kod' +'knüpf' +'know' +'klu' +'kleinste' +'klargestellt_' +'klare' +'kis' +'kirch' +'kir' +'kill' +'kidnapp' +'ketten_' +'kese' +'kepe' +'kepada' +'kens' +'kende' +'kemu' +'keln' +'kei' +'kea' +'kaya' +'kategori' +'kare' +'kapital_' +'kant' +'kanis' +'kand' +'kanadische' +'kamer_' +'kali_' +'kala' +'kah_' +'jut' +'junt' +'jud' +'jos' +'joga' +'jin' +'jeweilige_' +'jeti' +'jes' +'jeopardi' +'jenseits_' +'jenige_' +'jenem_' +'jat' +'jalan' +'jahre' +'jadi' +'jack_' +'ière' +'ivo' +'ivit' +'ius_' +'itäten_' +'ität' +'itting_' +'itter' +'itra' +'itor' +'itive' +'itier' +'itet_' +'ites_' +'italienische_' +'isung_' +'ister_' +'isten' +'ist' +'isoliert_' +'isk' +'isieren_' +'ished_' +'irrational' +'irgendeinem_' +'irgendeine' +'ipati' +'iones_' +'invoke' +'invites_' +'invited_' +'invitation_' +'investor_' +'investigat' +'intri' +'intra_' +'intl' +'intimate_' +'interpret_' +'interne' +'intermediar' +'interiors_' +'interinstitutionelle' +'interfere_' +'interessante' +'interessant_' +'interess' +'interaktive' +'inter_' +'intent_' +'intensivier' +'intelligente' +'insula' +'instrumente' +'instructi' +'institutionen_' +'institute_' +'instan' +'installations_' +'instabile' +'inspiration' +'insisting_' +'insisted_' +'insig' +'insert_' +'insect' +'inoffiziell' +'innovativen_' +'inneren_' +'innere_' +'inner' +'inklusive_' +'injured_' +'inige' +'inhuman' +'inherent_' +'inhaber_' +'inh' +'ings' +'ingredient' +'ingl' +'inger_' +'inga' +'infrastruktur' +'informellen_' +'influences_' +'infla' +'infer' +'ined' +'industrializ' +'indlich' +'individuell_' +'individually_' +'indis' +'indirectly_' +'indigenous_' +'indifferen' +'indicating_' +'indicat' +'inconsisten' +'incompeten' +'incom' +'inal' +'inakzeptabel_' +'importiert' +'impo' +'implement' +'impetus_' +'immunity_' +'imen_' +'imate' +'imaging_' +'ima_' +'ima' +'ilung' +'ilm' +'illustr' +'illiberal' +'illegale' +'illegal' +'ilität' +'ilita' +'ilie' +'ili_' +'ilen_' +'ileg' +'ilat' +'ilan' +'ikation' +'ige' +'iga' +'ifft_' +'iffen_' +'ifen_' +'ießen_' +'ieß_' +'ierungsa' +'iens_' +'ielt_' +'iele' +'iegs' +'iede' +'iebene' +'ieb_' +'idle' +'idier' +'ider_' +'ider' +'identit' +'identisch' +'identifiziert_' +'ideally_' +'idad_' +'icon' +'icken_' +'ick_' +'ichtigt' +'iche' +'ibut' +'ibu' +'iat' +'iali' +'höhe' +'höh' +'höchstens_' +'hö' +'hän' +'häl' +'hypocrisy_' +'hydrocarbon' +'hungry_' +'humo' +'humanitären_' +'hts' +'hrs' +'house' +'hostile_' +'hostage_' +'horse_' +'horn' +'hore_' +'hopeful' +'honor_' +'homosexual' +'hofft' +'hof' +'hoc_' +'hnen' +'hman' +'hkan_' +'historically_' +'hip' +'hing' +'hinein' +'hindern_' +'hinder_' +'hinausgeh' +'hilfsbereit_' +'hilfs' +'hilfen_' +'hila' +'highlight' +'hierarch' +'hful' +'heti' +'hetero' +'hervorr' +'hervor' +'hers' +'herrschaft_' +'herein_' +'herd' +'herbeizuführen_' +'herausstellen_' +'heran_' +'herab' +'hent' +'hens' +'hend' +'hemi' +'heme' +'heirate' +'hedge' +'hed' +'hebt_' +'healing_' +'hea' +'hazard' +'haushaltspolitische' +'haupt' +'haul_' +'hath_' +'hase_' +'hars' +'harmonisiert' +'harmonis' +'hari_' +'harbour_' +'har_' +'hang' +'handl' +'handicap' +'hag' +'hace' +'haber_' +'habe' +'güter_' +'gänge' +'gw' +'guy' +'gur' +'gungs' +'gsa' +'gründ' +'grundsätzliche' +'grundlegend' +'großzügige_' +'groundwater_' +'grossen_' +'grosse' +'griffen_' +'griff_' +'grenzübergreifende' +'grenz' +'green' +'greed' +'grave_' +'grat' +'grass_' +'graphic_' +'graph' +'gram_' +'grafische' +'graduate' +'grade' +'glä' +'glich' +'gleichberechtigt' +'glaubten_' +'glas_' +'gische_' +'girl' +'giga' +'gifts_' +'gian' +'ghan' +'ggen' +'geza' +'gewor' +'gewohn' +'gewissem_' +'gewinnt_' +'getä' +'getro' +'getre' +'getestet_' +'geta' +'gesunken_' +'gesund' +'gestützt_' +'gestellten_' +'geste' +'gespräche_' +'gesp' +'geschätzte' +'geschäfte_' +'geschä' +'geschenkt_' +'geschah_' +'gescha' +'gesamte' +'geräten_' +'gerisch' +'gerichtete_' +'gerech' +'gere' +'geplanten_' +'gepa' +'geordnet' +'geographische' +'gent_' +'genocide_' +'genk' +'genia' +'generieren_' +'generator' +'genehmigt' +'gene_' +'gende_' +'genaue_' +'gemütliche' +'gemeldet' +'gemeinsames_' +'gemachte' +'geltend_' +'gelenk' +'geleitete' +'gelegene_' +'geleg' +'gelb' +'gekü' +'gekämpft_' +'geiz' +'gei' +'geho' +'geheim' +'gefällt_' +'gefährlich' +'gefä' +'gefasst_' +'geeignet' +'geehrte_' +'gedie' +'gede' +'ged' +'gebraucht_' +'gebot' +'gebor' +'gebildete' +'gebene' +'gb' +'gau_' +'garr' +'garette' +'garantierte' +'gant' +'gang' +'gaming_' +'galt_' +'galax' +'gag' +'fürchten_' +'fünfte' +'fühlte' +'fähige' +'fähig' +'furthermore_' +'furt' +'furnishings_' +'funktionierende' +'funktionen_' +'fund' +'fun' +'fulfilling_' +'fuer' +'ftl' +'frustrated_' +'fru' +'frischen_' +'friend' +'friedlich_' +'freundschaft' +'freundlich' +'fres' +'freight_' +'freies_' +'französischer_' +'frankly_' +'fotografi' +'foster' +'fortgeführt_' +'formulier' +'formulate' +'formul' +'formel' +'foreg' +'forecasts_' +'foo' +'folk' +'foc' +'fläch' +'fluss_' +'flourish_' +'flour' +'flotte' +'flos' +'florier' +'flora_' +'flop' +'flood' +'fliegen_' +'fleisch' +'flas' +'fishermen_' +'fischen_' +'fis' +'firma' +'firewall' +'fires_' +'finest_' +'find' +'financially_' +'filter_' +'filter' +'fika' +'fighters_' +'fifteen_' +'fierce' +'fielen_' +'ffnung' +'ffende' +'ffel' +'ffe_' +'festlegen_' +'festivals_' +'festi' +'festges' +'festgehalten_' +'fes' +'fertigte' +'fertige' +'fere' +'fens' +'feine' +'feindlichen_' +'fehlge' +'fed' +'fast' +'fassung' +'fass' +'fashion' +'fascis' +'fare' +'farb' +'faktisch' +'fairer_' +'fad' +'factories_' +'ezieh' +'ez' +'exzellente' +'extremist' +'extraordinarily_' +'extracti' +'extern' +'extends_' +'exposed_' +'exporter_' +'exploring_' +'explains_' +'expire' +'experiments_' +'expedi' +'expectation_' +'existent' +'execute_' +'exe_' +'excite' +'excessively_' +'excel' +'examined_' +'evil_' +'eventual_' +'eve_' +'evade' +'europaweite' +'europaweit_' +'europa' +'europ' +'eur' +'etzungs' +'ette' +'ett_' +'etr' +'etisch' +'eties_' +'etie' +'etho' +'ethnische_' +'ethischen_' +'etan' +'estimat' +'ester_' +'espo' +'esh' +'esco' +'escalati' +'erörtert_' +'erörtern_' +'eröffnete' +'erzähl' +'erzeugte' +'erwähnte_' +'erwies_' +'erweiterte_' +'erweise_' +'erwartete' +'erw' +'erve' +'erto' +'ertig' +'erstes_' +'erstaunliche' +'erstaun' +'erstattung' +'erspar' +'erschien_' +'erschaffen_' +'eros' +'erobe' +'ernsthaften_' +'erni' +'erneute_' +'erneuerbaren_' +'erneuerbare_' +'ermu' +'erledigen_' +'erlan' +'erj' +'erinner' +'erier' +'erhöhungen_' +'erhobene' +'erhalte' +'ergänzende' +'ergen' +'ergebnisse' +'ergeb' +'erfreu' +'erfolgreicher_' +'erfahrene' +'erfahr' +'ereigne' +'erect' +'erbr' +'eras' +'erarbeitete' +'equal' +'episodes_' +'env' +'entspannten_' +'entsende' +'entschuldig' +'entries_' +'entrant' +'entrance_' +'entm' +'entlich_' +'entities_' +'entirety_' +'enthusiasm_' +'entha' +'entgehen_' +'entgegen' +'enswerte_' +'ension' +'ensh' +'enriche' +'enp' +'enorm' +'enl' +'englis' +'engineers_' +'engaging_' +'engagieren_' +'enforced_' +'energi_' +'energi' +'enduring_' +'endung' +'endorsed_' +'endi' +'endete' +'endemi' +'ency' +'encryption_' +'encouragement_' +'encountered_' +'encompassing_' +'encompass' +'ena_' +'empower' +'empl' +'empiri' +'empha' +'empfind' +'empfiehlt_' +'empfehle' +'empe' +'emotional' +'emer' +'embryon' +'embass' +'embarrass' +'eman' +'eln' +'ellungs' +'ellte' +'elle' +'elin' +'elig' +'elevated_' +'elevat' +'elemente' +'elem' +'eleganten_' +'elegan' +'elan' +'ektiv' +'ektion' +'eks' +'eke' +'eitige' +'eitet_' +'eisung' +'eisen' +'eis_' +'einzutreten_' +'einzuh' +'einzugehen_' +'einzub' +'einzigartige_' +'einzigartig' +'einz' +'einw' +'eintritt_' +'einstimmig_' +'einsti' +'einstellungen_' +'einste' +'einsa' +'einrichten_' +'einkommens' +'einhergehen' +'einher' +'einheimischen_' +'eingest' +'eingesp' +'eingeschl' +'eingeräumt_' +'eingeleitet_' +'eingeladen_' +'eingehe' +'einfl' +'eindruck' +'eindeutige' +'einander' +'eih' +'eigen_' +'eigen' +'eift' +'eien_' +'eichnet_' +'ehrung' +'ehrgeizige_' +'ehrgeizige' +'ehn' +'ehmen_' +'ehens' +'ehe_' +'egg_' +'efor' +'eff' +'ef_' +'editing_' +'edit_' +'eding' +'eder' +'ection_' +'eci' +'echse' +'ebenen_' +'dämm' +'dynamische' +'dyna' +'dwindl' +'dust_' +'durchschnitt' +'dumping_' +'dumm' +'dum' +'dua_' +'drohende' +'droh' +'dritter_' +'dring' +'dres' +'dramatische_' +'dox' +'downs_' +'downloading_' +'downloade' +'dose_' +'dore' +'doppelt_' +'doppel' +'dong' +'dominiert_' +'dominated_' +'domains_' +'dom' +'dogs_' +'dog' +'documentation_' +'dn' +'dk' +'diving_' +'diversi' +'divergen' +'disziplin' +'distributions_' +'distract' +'distinct_' +'disso' +'disre' +'dispo' +'displa' +'dispe' +'dispa' +'diskriminierung' +'discriminat' +'discovering_' +'discount_' +'disclose_' +'disciplin' +'disappointing_' +'disagreement' +'disadvantage' +'dire_' +'diplomatische' +'dip_' +'dioxid' +'dio' +'diminish' +'dimensions_' +'dili' +'dilemma_' +'dikti' +'digt_' +'dif' +'diet_' +'dies' +'dienste' +'dick' +'dichte' +'diagnosti' +'dez' +'devo' +'devis' +'developer' +'devaluation' +'deutscher_' +'deutsch' +'deut' +'determining_' +'determin' +'deter_' +'deter' +'detection_' +'detai' +'destinations_' +'designated_' +'deserv' +'ders' +'derogations_' +'derartig' +'depression_' +'depreciation_' +'depreciat' +'depre' +'depos' +'dependency_' +'depan_' +'denomination' +'dende' +'demonstration_' +'demonstr' +'delte' +'deliberate' +'delegati' +'delays_' +'deko' +'dek' +'deinem_' +'deflation' +'definitive' +'definitions_' +'definierte' +'definieren_' +'defer' +'defensive_' +'defa' +'deeper_' +'deepen' +'decreas' +'decoupling_' +'deciding_' +'decides_' +'debattiert' +'deal' +'dauernd' +'date' +'dargelegt' +'dankbar_' +'dana_' +'damaligen_' +'damalige_' +'damaging_' +'dahin' +'cynic' +'customize' +'custom_' +'curr' +'cure' +'cula' +'ctable_' +'crown' +'crossing' +'cron' +'criticise' +'criterion_' +'criminali' +'cove' +'coupon' +'coun' +'couldn_' +'cott' +'corresponds_' +'correlation' +'corrections_' +'correction_' +'correct' +'corr' +'corpora' +'cornerstone' +'corn' +'cooperative_' +'cooked_' +'coo' +'convers' +'contribu' +'contradiction' +'contra' +'continuously_' +'continuous_' +'continents_' +'contest_' +'contemp' +'cont' +'consume' +'consultations_' +'constructi' +'constitution' +'conso' +'consent_' +'connectivity_' +'conjunction_' +'congress_' +'congratulations_' +'confort_' +'conform' +'conflict' +'configured_' +'confess' +'conductors_' +'conditione' +'concessions_' +'concert_' +'concert' +'composit' +'composer' +'complied_' +'complementary_' +'complement_' +'comple' +'complacen' +'compensat' +'compelling_' +'compe' +'comparable_' +'communa' +'commu' +'commend' +'comf' +'comb' +'colorful_' +'color' +'colony_' +'collecti' +'collect' +'collapsed_' +'coi' +'coe' +'cob' +'coastline' +'coach_' +'cluster_' +'clus' +'cloth' +'clos' +'climb_' +'clearer_' +'clean' +'clamp' +'cking_' +'cited_' +'cisi' +'circles_' +'circ' +'cipa' +'cion_' +'cil_' +'cic' +'chw' +'chuld' +'chu' +'chtung' +'chtigt' +'chter_' +'chtbar' +'chste' +'chst' +'chor' +'choi' +'chk' +'chips_' +'ching' +'chi_' +'chei' +'cheat' +'chat_' +'chat' +'chase' +'charit' +'charakteristisch' +'chapter' +'challenge' +'chairs_' +'cerca' +'centrali' +'center' +'cens' +'celebration_' +'cease' +'cci' +'categori' +'cated_' +'cate_' +'catalyst_' +'carri' +'card' +'capa' +'cancell' +'camps_' +'cals_' +'call' +'calculation' +'calculated_' +'caf' +'cabl' +'bürge' +'busi' +'bureaucratic_' +'bureaucra' +'burdens_' +'bundle' +'bukan' +'bud' +'buck' +'buchs' +'brücke' +'bräu' +'broaden' +'broad' +'bridge' +'brid' +'breites_' +'breakthrough_' +'breaking_' +'breach_' +'brat' +'brand' +'branche' +'brai' +'brac' +'boutique_' +'bout' +'borrow_' +'bone_' +'bond' +'bombs_' +'bold' +'boh' +'board' +'blä' +'blutigen_' +'blutig' +'blue' +'bloods' +'blo_' +'blier' +'bliebe' +'blicken' +'blick' +'blen_' +'bkommen_' +'bj' +'bits_' +'biss' +'bish' +'bip' +'biologischen_' +'biologis' +'biolog' +'bination' +'billigen_' +'bilie' +'bile' +'bilaterale' +'bila' +'bicycle' +'bezogene' +'beziehungsweise_' +'bezeichnete' +'bewä' +'bewussten_' +'bewirkt_' +'bewerten_' +'beweg' +'bew' +'bevorstehende' +'bev' +'beurteilen_' +'beträchtliche_' +'beträ' +'betrug_' +'betreffend_' +'betray' +'besuchten_' +'bestätig' +'bestraft_' +'bestmögliche' +'bestimmungen_' +'bester_' +'bestehe' +'besseres_' +'besonderem_' +'besitz_' +'besiege' +'besichtig' +'beseitig' +'bese' +'beschädigt_' +'beschränkung' +'beschränkte' +'beschl' +'berühr' +'berühmteste' +'berüc' +'beruh' +'berufliche_' +'berufliche' +'bern_' +'berkembang_' +'berk' +'berichterstatt' +'berichten_' +'bericht' +'bereitzustellen_' +'bereite' +'berda' +'berb' +'bequeme' +'beobacht' +'bent' +'bemerkenswerte_' +'beme' +'bem' +'belonging' +'believ' +'beliebige_' +'belgischen_' +'beleb' +'belastet_' +'belast' +'bekämpft' +'bekräftigt_' +'bekräftig' +'bekanntlich_' +'beiträgt_' +'beitreten_' +'beiten_' +'beit_' +'beispiellose' +'beispielhaft' +'bein' +'behält_' +'behindertenfreundliche_' +'behi' +'beherrsch' +'beher' +'behandlung' +'begrenzte_' +'begreifen_' +'begeistert' +'befolgt_' +'befand' +'beer_' +'beeindruckende' +'beein' +'bedien' +'bedenke' +'bedenk' +'beautifully_' +'beating' +'beat_' +'beamte' +'beam_' +'beam' +'beachtet_' +'beabsichtigt_' +'bd' +'baute' +'baue' +'basket' +'basierende' +'barometer' +'bares_' +'barem_' +'bao_' +'bankruptcy_' +'banken_' +'bands_' +'ballot_' +'balances_' +'baik_' +'backward_' +'backd' +'awful_' +'awaken' +'await_' +'aven' +'avant' +'autoritären_' +'autor' +'automated_' +'autob' +'authorize' +'authentic' +'ausüben_' +'auszugleichen_' +'auszud' +'auszubauen_' +'ausspr' +'ausse' +'ausschließ' +'ausmachen_' +'auslös' +'ausgezeichnet_' +'ausgez' +'ausgewählten_' +'ausgewogenen_' +'ausgestellt' +'ausgeprägte' +'ausgelegt_' +'ausgehende' +'ausgeh' +'ausgebildete' +'ausg' +'ausführlich_' +'ausf' +'ausd' +'ausbe' +'augment' +'aufzuz' +'aufwa' +'auftragten_' +'aufstrebende' +'aufle' +'aufhören_' +'aufgestellt_' +'aufen_' +'aufeinander' +'aue' +'audiovisual_' +'auchen_' +'atur' +'attraktiv_' +'attitudes_' +'attent' +'attacked_' +'ato' +'atively_' +'ations' +'ational_' +'ation' +'atie' +'atemberaubende' +'asur' +'astronom' +'astonish' +'asti' +'aster_' +'assung_' +'assiste' +'assist' +'asses' +'ass' +'asil' +'ash_' +'asch' +'asc' +'aru' +'artner' +'articulate' +'arte_' +'arrives_' +'arri' +'arkt_' +'ark_' +'aring_' +'ardo' +'architectural_' +'arbe' +'approximat' +'appreciation_' +'applicant_' +'appease' +'appear' +'appeal' +'appalling_' +'app_' +'aper' +'anzusehen_' +'anze' +'anza' +'antwort' +'antra' +'antly_' +'antin' +'antik' +'antibiotics_' +'antara_' +'answered_' +'anstr' +'anstelle_' +'ansta' +'anspruch' +'anspr' +'anor' +'announc' +'anniversary_' +'annex' +'ankurbel' +'ankung' +'animation' +'anima' +'ania' +'anhe' +'anhaltende_' +'anh' +'angu' +'angle_' +'angez' +'angewendet_' +'angewa' +'angeschlossen_' +'angeh' +'angeführt' +'angebotenen_' +'anfällig_' +'aneinander_' +'anderung' +'andern_' +'andards_' +'anbieter' +'anbet' +'analog' +'ams' +'amount' +'amerikanische' +'aly' +'aluminum_' +'altige' +'alti' +'alor' +'allocation_' +'allgemeiner' +'allge' +'aller' +'allen' +'alkohol' +'aliz' +'alische' +'algorithms_' +'ald_' +'alas' +'alarming' +'alarmier' +'aktuelle' +'akibat' +'akh' +'aket' +'aken_' +'ais_' +'ains' +'aining_' +'ahrt_' +'agu' +'ags' +'agree' +'agne' +'agn' +'aggressi' +'aggravat' +'agentur' +'afts' +'aften_' +'affiliate' +'affe' +'advoca' +'advisor' +'advisers_' +'advanc' +'adr' +'adore' +'ador' +'admi' +'adjusted_' +'additi' +'adding_' +'acu' +'activate_' +'acquire_' +'acion' +'acies_' +'achse' +'achievable_' +'achi' +'accumulate' +'accordingly_' +'accord' +'accomplish_' +'accompany_' +'accom' +'accidental' +'access' +'abzuwe' +'abz' +'abweichen' +'abus' +'abstract' +'absti' +'abschließend_' +'abra' +'abolish' +'ables_' +'ablehn' +'abit' +'abgeschnitten_' +'abger' +'abgeb' +'aber' +']]), ' +'].' +'Zö' +'Zweit' +'Zwangsv' +'Zuwanderer' +'Zusätzlich' +'Zurich_' +'Zun' +'Zuerst_' +'Zube' +'Zor' +'Zoom' +'Zoo_' +'Zionis' +'Zielsetzung' +'Ziels' +'Zentrali' +'Zel' +'Zehn' +'Yugoslav_' +'YouTube_' +'Yi' +'Yel' +'Yacht' +'Xinjiang_' +'Xin' +'Xe' +'Wörter_' +'Wört' +'Wählern_' +'Worse_' +'Working_' +'Worker' +'Wonder' +'Wollen_' +'Woll' +'Wochenende_' +'Wirtschaftsw' +'Wirtschaftse' +'Wirkung' +'Willkommen_' +'Williams_' +'Wild_' +'Wil' +'Wiederbelebung_' +'Whatever_' +'Wette_' +'Westjordanland_' +'Werke' +'Wen_' +'Weltmarkt' +'Weltkriegs_' +'Weltgesundheitsorganisation_' +'Wellen' +'Welle_' +'Weißbuch_' +'Weiterver' +'Wehr' +'Week' +'Wasserstoff' +'Wassers' +'Want_' +'Wales_' +'Wahrscheinlich' +'Waf' +'WWII_' +'WE_' +'Vulkan' +'Vr' +'Vorwand_' +'Vortr' +'Vorsicht_' +'Vormittag_' +'Vorm' +'Vorhan' +'Vorg' +'Vorb' +'Vollst' +'Voice_' +'Vita' +'Visa' +'Vila' +'Vig' +'Vietnam' +'Vie' +'Victor' +'Veto' +'Verwundbarkeit' +'Verwend' +'Verv' +'Verurteilung_' +'Vertriebs' +'Vertreibung' +'Vertiefung_' +'Versuchen_' +'Versu' +'Verstöße' +'Verstärkung_' +'Verständ' +'Verschärf' +'Verschuldung_' +'Verschmutzung_' +'Versch' +'Versagen_' +'Vermögenswerten_' +'Vermittlung_' +'Verlierern_' +'Verknüpfung' +'Verkehrsanbindung_' +'Veridian_' +'Verhältnisse_' +'Verhinderung_' +'Verhaftung_' +'Verfechter_' +'Verfassungsvertrag_' +'Verdienst' +'Verbrennung' +'Verbreche' +'Verbindungs' +'Verarbeitung' +'Verabschiedung_' +'Vend' +'Vat' +'Vas' +'Vall' +'Vali' +'Valent' +'VIP_' +'VII' +'VC_' +'Uruguay_' +'Ursprungs' +'Ursache_' +'Urb' +'Unzufriedenheit_' +'Unz' +'Unw' +'Unver' +'Untu' +'Unternehmenss' +'Untergang' +'Unterg' +'Unterb' +'Unless_' +'Ungleichgewichte_' +'Ungeachtet_' +'Unemployment_' +'Unein' +'Unabhängig' +'Umweltbe' +'Umweltausschuss_' +'Umwelta' +'Umsch' +'Ultra_' +'Ult' +'Ukrain' +'Uhr' +'Ufer_' +'Ud' +'UNI' +'UNHCR_' +'UND_' +'UA' +'Tür' +'Täuschung' +'Ty' +'Tube' +'Tschechische' +'Tsa' +'Trü' +'Trump' +'Truc' +'Trop' +'Trock' +'Trip' +'Trinkwasser' +'Trin' +'Tribu' +'Trial' +'Trennung_' +'Traum' +'Transparen' +'Tran' +'Tram' +'Trainings' +'Training_' +'Train_' +'Trail' +'Touristen' +'Toulouse_' +'Touch' +'Toten_' +'Tol' +'Titel' +'Titan' +'Tit' +'Tip' +'Tin' +'Tierschutz' +'Tiefe' +'Tie' +'Tibetan' +'Tia' +'Thyssen' +'Threa' +'Thr' +'Tho' +'Therma' +'Thema' +'Textes_' +'Termine_' +'Tennis_' +'Templates_' +'Telekommunikations' +'Techniken_' +'Tauche' +'Tastatur' +'Tank' +'Tang' +'Tamp' +'Tam' +'Tai' +'Tahrir_' +'Tabellen_' +'TTIP_' +'TS_' +'TRA' +'TG' +'TFT' +'Sünden_' +'Südwest' +'Sá' +'Synth' +'Symptom' +'Symp' +'Symbol' +'Swe' +'Sustainable_' +'Surf' +'Supp' +'Superior_' +'Super_' +'Sunni_' +'Subsidiarit' +'Stö' +'Sty' +'Student_' +'Stro' +'Stress' +'Strecke_' +'Strategi' +'Strat' +'Strassen_' +'Stran' +'Strafgericht' +'Strafe' +'Stol' +'Stoffen_' +'Stipendi' +'Still' +'Stick' +'Stich' +'Steve_' +'Steuersenkungen_' +'Stern_' +'Sterb' +'Steph' +'Step' +'Stellvertreter_' +'Stellungnahmen_' +'Stellenwert_' +'Steinberg_' +'Steel' +'Statisti' +'Stadtteil_' +'Stadium_' +'Stable_' +'Stabilisierung' +'Staatsanw' +'Staat' +'Später_' +'Spyware_' +'Spy' +'Spr' +'Sporta' +'Spion' +'Speak' +'Spannung' +'Spanische' +'Spaniens_' +'South' +'Sonntag_' +'Song' +'Sonders' +'Sonderbe' +'Somet' +'Solution_' +'Solidarity_' +'Sofi' +'Socialists_' +'Smart' +'Slovakia' +'Sli' +'Skript' +'Ski_' +'Skepti' +'Ske' +'Sixt' +'Sitzung' +'Sis' +'Singh' +'Simply_' +'Silva_' +'Silicon' +'Signal' +'Sig' +'Sicherheitsst' +'Sicherheitsrates_' +'Sicherheitskräfte_' +'Sich_' +'Show' +'Short_' +'Sheraton_' +'Sham' +'Shaf' +'Shadow' +'Sev' +'Setz' +'Sensibili' +'Sender_' +'Seminar_' +'Select' +'Segment' +'Securit' +'Screen' +'Scottish_' +'Schätz' +'Schäd' +'Schwung_' +'Schwimm' +'Schwelle' +'Schwed' +'Schwarzen_' +'Schutzmaßnahmen_' +'Schutze' +'Schur' +'Schuldner_' +'Schuldenerlass_' +'Schottland_' +'Schnittstelle_' +'Schn' +'Schmu' +'Schmit' +'Schmi' +'Schlussfolgerung_' +'Schließung_' +'Schim' +'Schiffen_' +'Schar' +'Schan' +'Schall' +'Scar' +'Scanne' +'Scal' +'Saudis_' +'Satelliten_' +'Sant_' +'Sample' +'Sammel' +'Sal' +'Saison' +'Saf' +'Sacr' +'Sachver' +'Sachs_' +'SY' +'SV' +'SSL' +'SSE' +'SR' +'SN' +'SK' +'SITE_' +'SIM' +'SDR' +'SARS_' +'SAP_' +'Rückz' +'Rückschlag_' +'Rückg' +'Röm' +'Räume' +'Russische' +'Rus' +'Rumsfeld_' +'Rov' +'Route_' +'Rotarian' +'Rota' +'Romulan' +'Romans_' +'Romano' +'Rol' +'Rog' +'Roc' +'Road' +'Rival' +'Risk' +'Rindfleisch' +'Rin' +'Rif' +'Ride' +'Richtungen_' +'Rh' +'Revolutionary_' +'Revol' +'Resultate_' +'Restaura' +'Resources_' +'Resol' +'Reso' +'Repu' +'Representative' +'Rentner' +'Renov' +'Reli' +'Release' +'Relati' +'Rein' +'Reichweite_' +'Reich_' +'Regulierungen_' +'Regulat' +'Regionalpolitik_' +'Regierungsvertreter' +'Regierungsführung_' +'Regierungse' +'Regent' +'Regens' +'Regel' +'Rega' +'Referenz' +'Redner' +'Recon' +'Rechtsp' +'Rechtsakt' +'Rechtfertigung_' +'Recht' +'Rechenschaft_' +'Rebellen' +'Realis' +'Reakt' +'Rav' +'Raus' +'Ratspräsident' +'Rassismus_' +'Rasse' +'Range' +'Ramada' +'Rak' +'Rai' +'Rag' +'Radi' +'Rabatt' +'RU' +'ROM' +'RL' +'Quit' +'Quartet' +'Quartal' +'Quality_' +'Qualifi' +'Quadra' +'QU' +'Py' +'Purvis_' +'Pur' +'Puffer' +'Präventi' +'Prämi' +'Prozessor_' +'Provokation' +'Provi' +'Protest_' +'Protektionismus_' +'Prophet' +'Prope' +'Prog' +'Profite_' +'Production' +'Produ' +'Probe' +'Privatsphäre_' +'Privathaus' +'Price' +'Preventi' +'Prestige_' +'Prese' +'Preiss' +'Portug' +'Porti' +'Popular' +'Poor_' +'Poly' +'Politis' +'Polic' +'Points_' +'Plug_' +'Ple' +'Platte' +'Platform_' +'Plasma' +'Pix' +'Pitt' +'Pira' +'Picture' +'PiS_' +'Photo_' +'Pflege' +'Pferde' +'Pfei' +'Petitions_' +'Petitions' +'Petition' +'Pesti' +'Peru' +'Persönlichkeit' +'Peripherie_' +'Period' +'Pere' +'Pentax_' +'Pensionen_' +'Penis' +'Peninsula_' +'Pear' +'Pav' +'Pauls' +'Patente_' +'Passi' +'Passagiere_' +'Partition_' +'Partie' +'Parlamentsabgeordnete' +'Pare' +'Panzer' +'Panasonic_' +'Palästin' +'Palais_' +'Paket' +'Pai' +'Pack' +'PV_' +'PU' +'PT' +'POS' +'PM_' +'PE_' +'Oz' +'Outdoor_' +'Organismen_' +'Optimierung' +'Omniture_' +'Om' +'Okt' +'Ohren_' +'Oft' +'Offs' +'Official_' +'Occ' +'Obst' +'Obr' +'Objekte_' +'Objekt_' +'Objekt' +'Obers' +'OR' +'ONE_' +'OE' +'Nächte' +'Ny' +'Nuclear_' +'Novo' +'Nov' +'Nots' +'Noti' +'Noten' +'Norwege' +'Norway_' +'Norw' +'Normalerweise_' +'Norde' +'Nordamerika' +'Nikotin' +'Nieders' +'Nicholas_' +'Ng' +'Neuigkeiten_' +'Neug' +'Neue' +'Neube' +'Neub' +'Neuan' +'Netze' +'Network' +'Nephi_' +'Need_' +'Need' +'Nazis_' +'Navy_' +'Navig' +'Natursch' +'Native_' +'Nationalstaaten_' +'Nationalist' +'Nationale_' +'Namun_' +'Nai' +'Nahverkehr' +'Nachteil_' +'Nachbarschafts' +'Münzen_' +'Möchte' +'Mé' +'Mär' +'Mängel_' +'Muss' +'Musk' +'Musiker_' +'Musi' +'Muni' +'Mun' +'Mum' +'Movi' +'Movement_' +'Mosc' +'Montp' +'Montenegro_' +'Montage' +'Monaco_' +'Modells_' +'Mobili' +'Mits' +'Mitgliedstaat' +'Mitgefühl_' +'Mischung_' +'Ministr' +'Minimum_' +'Minderheiten' +'Million' +'Millennium' +'Militär_' +'Miles_' +'Mig' +'Messung_' +'Merkmale' +'Mercur' +'Menschenrechtsverletzungen_' +'Meinungsverschiedenheiten_' +'Mehrwert_' +'Mehr' +'Megapixel' +'Meg' +'Meer' +'Medit' +'Medikamenten_' +'Medikamente_' +'Medicine' +'Maur' +'Matth' +'Matte' +'Mathematik_' +'Mathe' +'Massenvernichtungswaffen_' +'Massen_' +'Massagen_' +'Massage' +'Massachusetts_' +'Maschinen' +'Mary' +'Marshall_' +'Marra' +'Maritime_' +'Marie' +'Margaret_' +'Marco' +'Marbella_' +'Mao' +'Mant' +'Mano' +'Mannschaft_' +'Mann' +'Mandat' +'Malay' +'Mahm' +'Magne' +'Magn' +'Maf' +'Madr' +'Made' +'Maci' +'Machthaber_' +'Machi' +'Macedonia_' +'Maca' +'MID' +'MDGs_' +'MAR' +'MADRID_' +'Luftfahrt' +'Luca' +'Lub' +'Lore' +'Looking_' +'Loo' +'Lond' +'Lodge_' +'Lob' +'Ll' +'Lith' +'Lita' +'List' +'Liquidität_' +'Lip' +'Lion' +'Linu' +'Limited_' +'Lieferungen_' +'Lieferant' +'Lieblings' +'Liebe' +'Lichte_' +'License' +'Letztere_' +'Letter' +'Lek' +'Leistungsfähigkeit_' +'Leipzig_' +'Legitimation_' +'Legislative_' +'Lef' +'Led' +'Lebensbedingungen_' +'Learning_' +'Laur' +'Laufwe' +'Laser_' +'Laos_' +'Lanka_' +'Langstrecken' +'Landschaft' +'Lande_' +'Lance' +'Lamanites_' +'Lack' +'LS' +'LP' +'LOS_' +'LICH' +'LES' +'LDP_' +'Kürzungen_' +'Künstler' +'Könnte_' +'Kurzu' +'Kurve' +'Kun' +'Kumari_' +'Krugman_' +'Krone' +'Krist' +'Krim' +'Kriegsverbreche' +'Kreditk' +'Kreativität_' +'Krat' +'Krank' +'Kraftfahr' +'Korrekt' +'Koordination_' +'Kooperationsabkommen' +'Konzern' +'Konzepte_' +'Konve' +'Kontroverse' +'Kontrast_' +'Konstruktion' +'Konkur' +'Kondition' +'Kompromisse_' +'Kompe' +'Kommunistische' +'Kommissare_' +'Kommentare_' +'Kode' +'Kne' +'Klimasch' +'Klassen_' +'Klassen' +'Klagen' +'Kissinger' +'Kind' +'Kha' +'Kenne' +'Keep_' +'Kaz' +'Katzen' +'Kategorien_' +'Kaste' +'Kasach' +'Karten' +'Karr' +'Karl_' +'Kare' +'Kapitalst' +'Kapell' +'Kanzle' +'Kandidatenländer' +'Kamera' +'Kambodscha_' +'Kalt' +'Kalifornien' +'Kalif' +'Kad' +'Kabine' +'KB_' +'Jörg_' +'Jup' +'Juncker_' +'Julian_' +'Jugendherberge_' +'Jos' +'Johannes_' +'Jiang_' +'Jel' +'Jazz' +'Jarzembowski_' +'Jame' +'JPEG_' +'JP' +'Iss' +'Isolation_' +'Islamischen_' +'Iron' +'Iri' +'Ira' +'Ion' +'Inva' +'Intr' +'Interview' +'Interv' +'Intern' +'Interi' +'Interess' +'Inten' +'Integrat' +'Inspektor' +'Insel' +'Inn' +'Inkrafttreten_' +'Ink' +'Ini' +'Informations_' +'Industrien' +'Indikatoren_' +'Indikat' +'Indian' +'Inde' +'Inc' +'Implementi' +'Imperi' +'Ih' +'If' +'Identitäten_' +'Iber' +'IX' +'ISS' +'INS' +'IND' +'INC' +'Hürde' +'Höh' +'Häusern_' +'Häuser_' +'Hut_' +'Hut' +'Hus' +'Humanit' +'Hug' +'Hu_' +'Hoste' +'Hospi' +'Honor' +'Home' +'Hoheit' +'Hohe' +'Hochzeit' +'Hob' +'Hirsch' +'Hip' +'Hinterl' +'Hinsichtlich_' +'Himm' +'Highlight' +'Hierbei_' +'Herzego' +'Herunter' +'Herstell' +'Herkunftsl' +'Herberge_' +'Hell' +'Held' +'Hektar_' +'Heimat_' +'Heid' +'Hegemonie_' +'Heer' +'Heating_' +'Head_' +'Head' +'Haza' +'Haw' +'Havel_' +'Haushaltsk' +'Haushalten_' +'Hauptziel' +'Hauptg' +'Harry_' +'Hardliner' +'Handy_' +'Hands' +'Handelsd' +'Halbjahr_' +'Haft_' +'Had_' +'Habr' +'Gut_' +'Gus' +'Gul' +'Guid' +'Guer' +'Guantánamo_' +'Gründungs' +'Grö' +'Grä' +'Grundwerte_' +'Grundv' +'Grundge' +'Gru' +'Green' +'Grap' +'Grant_' +'Grand' +'Granada_' +'Gran_' +'Gou' +'Got' +'Goe' +'Glückw' +'Globalis' +'Globale_' +'Glaube_' +'Glacier_' +'Gl' +'Giu' +'Gitarren' +'Gir' +'Ghana_' +'Gewässer' +'Gewi' +'Gewebe' +'Getreide' +'Gesundheitswesen_' +'Gesundheitsschutz' +'Gestern_' +'Gestalt_' +'Gesta' +'Gesichtspunkt' +'Gesetzen' +'Geschäftsreise' +'Geschäftsführ' +'Geschmack' +'Geschichts' +'Geschenk_' +'Gescheh' +'Geräten_' +'Georgian_' +'Gemeinschaftsrecht' +'Gemeinsame_' +'Gemeinsam_' +'Gegebenheiten_' +'Gefangene' +'Gefa' +'Gedanke' +'Ged' +'Gebühren_' +'Gebietskörperschaften_' +'Gaul' +'Gau' +'Gastl' +'Garc' +'Ganzes_' +'Gandhi_' +'Galic' +'Galax' +'Gai' +'GT' +'GMO_' +'GM' +'GIMP_' +'GF' +'GBP_' +'Fürsten' +'Fürs' +'Führungsp' +'Führungen_' +'Führer' +'Fäl' +'Fäh' +'Fuss_' +'Funktionieren_' +'Funktionalität_' +'Full_' +'Fue' +'Früher' +'Frucht' +'Fronte' +'Frist_' +'Friedh' +'Freilassung_' +'FreeBSD_' +'Frattini_' +'Frassoni_' +'Franz_' +'Franz' +'Fox_' +'Fourth_' +'Fotograf' +'Fortschritts' +'Forst' +'Formulierung_' +'Forme' +'Forex_' +'Folter_' +'Folgende' +'Flüge_' +'Flü' +'Flugver' +'Flucht' +'Flach' +'Fl' +'Fitnessraum_' +'Fitnesscenter_' +'Fitness' +'Fiskalp' +'Fischf' +'Finn' +'Finanzt' +'Finanzministeri' +'Finanzma' +'Finanzinstitut' +'Finanziellen_' +'Finanzi' +'Finanzhilfe' +'Finanzdienstleistungen_' +'Filip' +'Figu' +'Field' +'Fie' +'Feu' +'Ferrer' +'Ferr' +'Fernsehs' +'Fen' +'Feli' +'Feinds' +'Fehlern_' +'Fehle' +'Faz' +'Fax' +'Fanati' +'Fan' +'Fail' +'Fahnen' +'Fah' +'FOR' +'FI_' +'FC' +'FBI_' +'Extremismus_' +'Exporte' +'Expansion' +'Exce' +'Exa' +'Everyone_' +'Eurosta' +'Europäer' +'Europe' +'Europarat' +'Eure' +'Eto' +'Ethiopia_' +'Es' +'Erzeug' +'Erwägung_' +'Erwe' +'Ersparnisse_' +'Ernährungs' +'Erlös' +'Erlebnis_' +'Erla' +'Erkenntnisse_' +'Erkenn' +'Erinnerungen_' +'Erika_' +'Eric' +'Erhalt_' +'Erfordernissen_' +'Erfa' +'Erf' +'Ereignissen_' +'Ereignis_' +'Erb' +'Era' +'Equally_' +'Equal' +'Epidemi' +'Entwicklungszusammenarbeit_' +'Entwicklungshilfe_' +'Entspannen_' +'Entschl' +'Entscheidungsfindung_' +'Entscheide' +'Entlassung' +'Entfernung_' +'Entdeckung_' +'Entdecke' +'Engine' +'Engel' +'Energieversorgung_' +'Energieverbrauch' +'Energietr' +'Empfehlung_' +'Emm' +'Emb' +'Eman' +'Elys' +'Eliten_' +'Eli' +'Element' +'Elektrizität' +'Elect' +'Elb' +'Einwohnern_' +'Eintrag_' +'Einstei' +'Einnahme' +'Einla' +'Einig' +'Einheitswährung_' +'Eingriff' +'Eingang_' +'Eindr' +'Eight' +'Eigentums' +'Ehren' +'Effe' +'Edward' +'Economists_' +'Ecol' +'Echt' +'Eben' +'East' +'Earl' +'EV' +'EQ' +'EOS_' +'ENE' +'EME' +'EFSF_' +'EF' +'EEC_' +'EE' +'EA_' +'Durchschnitt_' +'Durchs' +'Duke' +'Duc' +'Dua' +'Dry_' +'Drogenh' +'Drac' +'Dosi' +'Domain' +'Dollars_' +'Dokumenten_' +'Dokt' +'Dod' +'Doctor_' +'Divi' +'Disziplin' +'Distri' +'Disney_' +'Discover' +'Direktor_' +'Direkt_' +'Diamant' +'Devisenwechsel_' +'Device' +'Develope' +'Dess' +'Demonstrationen_' +'Demonstration_' +'Dei' +'Defizit_' +'Definitionen_' +'Deep' +'Deco' +'Deborah_' +'Deal_' +'Davon_' +'Dav' +'Datenbank' +'Darstell' +'Dark_' +'Danube_' +'Dalai_' +'Daily' +'Dafür_' +'Dach_' +'DU' +'DSLR_' +'DF' +'DEL' +'Cyp' +'Cus' +'Cub' +'Crow' +'Cristi' +'Cris' +'Cr' +'Coven' +'Course' +'Coup' +'Cort' +'Corp' +'Cookies_' +'Cookie_' +'Coo' +'Control' +'Conte' +'Contain' +'Const' +'Conservative_' +'Configur' +'Cond' +'Compo' +'Compli' +'Communities_' +'Communis' +'Commonwealth_' +'Commons_' +'Commerc' +'Collection_' +'Co_' +'Cluster' +'Cli' +'Claude_' +'Citi' +'Christi' +'Chief_' +'Chic' +'Chian' +'Chest' +'Chelsea_' +'Charme_' +'Charlotte' +'Champions' +'Catherine_' +'Catalunya_' +'Cart' +'Carol_' +'Cap_' +'Cana' +'Campi' +'Camp_' +'Came' +'Cale' +'Cadiz_' +'CV' +'CK' +'CF_' +'CER' +'C6_' +'Bürgerrecht' +'Bügelservice_' +'Byrne' +'Button_' +'Burning_' +'Bureau_' +'Bul' +'Built_' +'Buffe' +'Buen' +'Budget' +'Buddhist_' +'Bucharest_' +'Brüder' +'Brücken' +'Brücke_' +'Brook' +'Broad' +'Bristol_' +'Brief' +'Brasiliens_' +'Brand_' +'Bow' +'Boutique' +'Borr' +'Born' +'Boote' +'Boom_' +'Bolivia_' +'Boliv' +'Bod' +'Blase' +'Blanch' +'Blan' +'Bit' +'Binnenmarkts_' +'Bind' +'Bildschirm' +'Bic' +'Bibl' +'Bewusstsein_' +'Beton' +'Besuchen_' +'Bestec' +'Bestands' +'Besichtigung' +'Besi' +'Beschl' +'Besatzungs' +'Berna' +'Berichterstatters_' +'Berge' +'Bergbau' +'Berei' +'Beratungs' +'Benalmadena_' +'Beleg' +'Belange_' +'Bek' +'Beitrittsländer_' +'Behinderung' +'Begr' +'Befreiung' +'Befehl_' +'Beck' +'Beantwortung_' +'Bauern' +'Batterie' +'Bat' +'Bass' +'Barre' +'Barr' +'Baron' +'Barba' +'Bankr' +'Bala' +'Bak' +'Bagdad_' +'Baby_' +'Baby' +'BP' +'BEA' +'BBC_' +'Außens' +'Außenhandel' +'Autovermietung_' +'Autos_' +'Autorität_' +'Autok' +'Auth' +'Ausstellung' +'Ausser' +'Ausschüsse_' +'Auss' +'Ausrichtung_' +'Ausländer' +'Auslegung' +'Ausge' +'Ausgang_' +'Ausg' +'Ausfuhr' +'Auseinandersetzungen_' +'Auseinandersetzung_' +'Ausdruck' +'Auschecken_' +'Ausbreitung_' +'Ausbau' +'Auktion' +'Aufwand_' +'Aufw' +'Aufschub_' +'Aufrechterhaltung_' +'Auflagen_' +'Aufla' +'Auditor' +'Attrakti' +'Athens_' +'Assozi' +'Asc' +'Arts_' +'Artikels_' +'Arra' +'Aro' +'Arn' +'Argentiniens_' +'Arg' +'Archive_' +'Arbeitszeit' +'Arbeitsplatz' +'Arbeitsmärkte' +'Arbeiter' +'Aqu' +'Application' +'Applause_' +'Appartement_' +'Apollo_' +'Aparthotel' +'Apartamentos_' +'Apache_' +'Anwende' +'Anweisungen_' +'Antw' +'Antrieb' +'Ansta' +'Anschuldigungen_' +'Anschluss' +'Anrei' +'Anleitung_' +'Ankündigung_' +'Ankara_' +'Animat' +'Anhä' +'Angreifer' +'Angeli' +'Anfä' +'Anfa' +'Andy_' +'Analyst' +'Amat' +'Alvaro_' +'Alternative' +'Alte' +'Almost_' +'Allgemeine' +'Alkohol_' +'Ali_' +'Alexanderplatz_' +'Alba' +'Aktiv' +'Akt_' +'Ahmed' +'Ahmadinedschad_' +'Agriculture_' +'Agent' +'Against_' +'Afrikanischen_' +'Afghan_' +'Advi' +'Advent' +'Adress' +'Adam_' +'Ach' +'Aca' +'Abwärts' +'Abstände' +'Absp' +'Abschreckung_' +'Abschnitt' +'Abschluss' +'Abre' +'Abraham_' +'Above_' +'Abn' +'Abhol' +'AX' +'ATION' +'ANY_' +'ALL_' +'AK' +'AE' +'ABAP_' +'AA_' +'A350_' +'A1_' +'; • _' +';' +'93' +'92' +'91' +'84_' +'84' +'825' +'81_' +'78' +'73_' +'71_' +'70' +'57' +'450' +'42' +'320' +'3000_' +'2nd_' +'226' +'204' +'2018_' +'2017_' +'2016_' +'1981_' +'1974_' +'1969_' +'1958_' +'1957_' +'1955_' +'1951_' +'1930er_' +'1918_' +'179' +'158' +'140_' +'13th_' +'125' +'110_' +'10th_' +'104' +'103_' +'100' +'0ern_' +'011' +'010' +'// _' +'.  ' +'.: _' +'...) _' +'.)._' +'. – ' +'.   _' +'. .' +'. ) _' +'. " _' +', ..._' +')|_' +'): «' +'() , _' +'%), _' +'": _' +'")._' +'!”' +'!!!!' +' „ _' +' –&' +' –' +' « _' +'  ' +' ..' +' ($_' +' ''' +'™-_' +'€_' +'…_' +'”) _' +'“) _' +'‘' +'ا' +'י' +'ң' +'қ_' +'ін' +'ында' +'ші' +'ть_' +'сын' +'со' +'р_' +'пр' +'пар' +'ных_' +'на_' +'кономи' +'ка_' +'ит' +'ел' +'гі' +'го_' +'га' +'бе' +'ас' +'Т' +'К' +'Г' +'ρ' +'ο' +'Ž' +'ż' +'ška_' +'ý_' +'üße' +'üß' +'ütung' +'ütlich' +'ütige' +'üstet_' +'ürze' +'ürt' +'ürge' +'ürfe' +'ürdige' +'üpf' +'üng' +'ündete' +'ünde_' +'üllt_' +'ührten_' +'ührende' +'ühmt' +'üf' +'ücken_' +'üblich_' +'überzogen' +'übersetz' +'überschü' +'überschreitende' +'überra' +'übernahme' +'überleb' +'überholt' +'übergehen' +'überflü' +'übereinstimmen_' +'übereinkommen_' +'ößt_' +'ött' +'östlichen_' +'öster' +'öst_' +'öse' +'örtlichen_' +'örtliche' +'örte' +'örig' +'ör_' +'ökologisch' +'öhnlich_' +'öhe' +'öglichkeiten_' +'öge' +'öffnete' +'öffnet_' +'öffentliches_' +'öd' +'öcke' +'ôte_' +'ò' +'ï' +'î' +'être_' +'ête' +'ém' +'èr' +'ège_' +'äßig' +'äußerte_' +'äußert_' +'äuser' +'äume_' +'äum' +'ässig' +'ärz' +'ärt' +'ärmeren_' +'ärer_' +'ändler' +'ändiger_' +'änderte' +'ämte' +'ällt_' +'äle_' +'ähle' +'ähig' +'ägyptischen_' +'ägt_' +'äger' +'äg' +'ächtige' +'äche' +'ás' +'ßlich' +'ßer_' +'ßer' +'ßb' +'Überwindung_' +'Übersetzer' +'Überna' +'Überlebens' +'Übergriffe' +'Überflu' +'Übereinkommens_' +'Überarbeitung_' +'Österreich' +'Öls' +'Ölpreis' +'Öle' +'Ökolo' +'Äußer' +'Äquivalen' +'Ähnliche' +'Ã_' +'·      ' +'²_' +'®_' +'®, _' +' – ' +' %, _' +'}}) ==' +'}})' +'}{_' +'}, _' +'|' +'zünd' +'zzle' +'zykli' +'zwischenstaatliche' +'zweitgrößte_' +'zweimal_' +'zweige' +'zweier_' +'zweie' +'zwec' +'zwang' +'zuwider' +'zuvorkommende' +'zuverlässiger_' +'zuv' +'zutreffend' +'zusammenzuf' +'zusammenzuarbeiten_' +'zusammentre' +'zusammenhä' +'zusammenf' +'zurückzuh' +'zurückkehren_' +'zurückk' +'zurückbl' +'zunahm' +'zulässig_' +'zula' +'zukomm' +'zugr' +'zugewiesen' +'zugeschnitten' +'zugesa' +'zugelassen_' +'zugehen_' +'zufügen_' +'zufällig' +'zubereitet_' +'zte_' +'zst' +'zoo_' +'zonen_' +'zol' +'zitä' +'zitiere_' +'zit' +'zipation_' +'zines' +'zien' +'zheimer_' +'zerr' +'zero' +'zerbrech' +'zentri' +'zentral' +'zend' +'zeitweilige' +'zeitl' +'zeitiger_' +'zeitig' +'zeita' +'zeilen_' +'zeil' +'zeigten_' +'zeige' +'zeichnung_' +'zar_' +'yte' +'yment' +'yla' +'yie' +'yg' +'yev' +'yell' +'yea' +'ydr' +'yak' +'xts_' +'xon_' +'xn' +'xist' +'xim' +'xes_' +'würdige_' +'wört' +'wöchentlich' +'wärt' +'wäl' +'währung' +'wski_' +'wron' +'writer_' +'wrap' +'wozu_' +'wovon_' +'wounded_' +'worthwhile_' +'worries_' +'workshop' +'workplace' +'womit_' +'wofür_' +'wling' +'withstand_' +'withdrawn_' +'withdraw' +'wissenschaftliche' +'wissenschaftl' +'wirkte' +'wins_' +'winners_' +'winkel' +'willkürlich' +'willen_' +'wilde' +'wiese' +'wiederher' +'wiederge' +'widmet' +'widersetz' +'wick' +'whatsoever_' +'wettbewerbsfähig_' +'wettbewerb_' +'wertig' +'werkzeug' +'wende_' +'wend' +'wen' +'welding_' +'welders_' +'weites' +'weiterf' +'weiterentwickel' +'weilen_' +'weiche_' +'wege_' +'weekend' +'weck' +'wechsels' +'wechs' +'weakening_' +'wc' +'way' +'wav' +'watt' +'watershed_' +'waterfalls_' +'wasted_' +'warrior' +'ward' +'wanting_' +'wang' +'wandte' +'wahrha' +'wach' +'völligen_' +'völ' +'vá' +'vulnerabilities_' +'vul' +'votre_' +'vorzugehen_' +'vorzei' +'vorz' +'vorsorge_' +'vorrangige' +'vorne_' +'vorle' +'vorlag' +'vorhin_' +'vorhersehbar' +'vorherrschende' +'vorherigen_' +'vorherge' +'vorhandene_' +'vorhaben' +'vorgetragen_' +'vorgesehene_' +'vorgeleg' +'vorgebracht_' +'vorb' +'voraus' +'vorantreib' +'vorangehen' +'vorangegangenen_' +'voranbringen_' +'voor_' +'von' +'volunt' +'volumes_' +'vollziehen_' +'vollendet' +'voli' +'vole_' +'voic' +'vivid' +'vität_' +'vita' +'visit' +'viru' +'viou' +'violate_' +'violate' +'viol' +'vio' +'villa' +'vigorously_' +'vigilant' +'viewing_' +'viewer_' +'viet' +'vierz' +'vielversprechend' +'viels' +'vielfach' +'vidue' +'vide' +'vib' +'viat' +'vete' +'vet_' +'verzögern_' +'verzerr' +'verzeichnis_' +'verzauber' +'verwöhn' +'verwirr' +'verwir' +'verwendete_' +'verweigert' +'verweh' +'verwand' +'verwaltung' +'verwaltet_' +'verurteilen_' +'verursachen_' +'vertritt_' +'vertretene' +'vertretende' +'vertiefen_' +'vertie' +'verteil' +'verteidigt_' +'versäume' +'versuchten_' +'versu' +'verstümmel' +'verstehe_' +'versteckt' +'verstecken_' +'versta' +'versp' +'versorgen_' +'versorg' +'versicherung_' +'verschwenderische' +'verschre' +'verschr' +'versatil' +'versammel' +'versagt_' +'vers_' +'verpflichtung' +'verordn' +'vernünftige' +'vernetz' +'vernehm' +'vernachlässigt' +'vernachlässigen_' +'vermindert' +'verlässliche' +'verließ' +'verlie' +'verletzen' +'verleih_' +'verlangsam' +'verlang' +'verlagerung' +'verkünde' +'verkl' +'verkehr' +'verkaufte' +'verhältnisse' +'verhält' +'verheirat' +'verhandl' +'verhandelt' +'verhaftet_' +'vergift' +'verfügbare' +'verfolgte_' +'verfe' +'verfa' +'vereinte' +'vereinig' +'vereinfachen_' +'vereinbarten_' +'vereinbarte' +'verein' +'verdreifach' +'verdoppelt_' +'verdoppel' +'verdan' +'verbu' +'verbrenn' +'verbrachte' +'verborgen' +'verbleiben' +'verban' +'veranstaltet' +'veranlasst_' +'verankert_' +'verabscheu' +'venture' +'vent_' +'vendor' +'veil' +'vec' +'vd' +'vastly_' +'variier' +'variat' +'var_' +'valve' +'vale' +'vald' +'ußen' +'ux' +'uv' +'utsbe' +'utr' +'utin' +'uth' +'utenant' +'ustausch_' +'usgaben_' +'urz' +'uru' +'urt' +'ursa' +'urging_' +'urg' +'urch' +'urbaniz' +'urban' +'urate' +'ura_' +'upte' +'uphold' +'upheaval' +'upa' +'uon' +'unzähligen_' +'unzwe' +'unzureichende' +'unz' +'unwillingness_' +'unweigerlich' +'unwanted_' +'unverä' +'unu' +'unthink' +'unterzeichnete' +'unterzeichn' +'unterworfen_' +'unterteilt_' +'untersuch' +'unterstrichen_' +'unterstreiche' +'unterschätzt' +'unterschiedlich' +'unterschied' +'untersch' +'unterminieren_' +'untergräbt_' +'unteren_' +'unterbrechen_' +'unsc' +'unresolved_' +'unrea' +'unqu' +'unpredictab' +'unmittelbarer_' +'unmen' +'unlängst_' +'unkomp' +'unko' +'unite_' +'unis' +'unilateralism_' +'unica' +'uni_' +'unha' +'ungü' +'ungsze' +'ungsverfahren' +'ungso' +'ungser' +'unglück' +'unglaublich' +'ungerecht' +'ungenügend' +'ungehe' +'unfr' +'unfortunate_' +'unforgettable_' +'unfolding_' +'unfair' +'unexp' +'uneven' +'unes_' +'unerwünscht' +'unerwartete' +'undurch' +'undung' +'undi_' +'underestimate' +'underc' +'undeniabl' +'unden' +'unconditional' +'uncomfortable_' +'uncle' +'unbegrenzt' +'unaufhaltsam' +'unat' +'unam' +'umstr' +'umst' +'umsetz' +'umm' +'umin' +'umh' +'umgest' +'umgeh' +'umgebung' +'umfassendere_' +'umfangreichen_' +'umfangreich' +'umfa' +'umf' +'ume_' +'umbrella' +'uma_' +'ultra_' +'uls_' +'ulos' +'ulierung' +'uldn_' +'ulde' +'ularly_' +'ukr' +'uko' +'uit_' +'ugu' +'ugi' +'ugge' +'ufs' +'ufl' +'uff' +'uerung_' +'uern_' +'uellen_' +'uelle_' +'udo' +'udia' +'ubt_' +'uba' +'ub_' +'uay' +'uate' +'uas' +'tüt' +'tümer' +'tödlich' +'täts' +'tätigkeit' +'täter' +'tär' +'tzl' +'tzende' +'tze_' +'tyrant' +'typis' +'twin' +'tutt' +'tutor' +'tus_' +'turm' +'tur_' +'tuous' +'tunnel' +'tuna_' +'tuition' +'tuber' +'tti_' +'ttert' +'tters' +'tsp' +'tsi' +'tscheni' +'tsa' +'trö' +'trä' +'trusted_' +'truppen_' +'trupp' +'trug_' +'truc' +'trouble' +'trou' +'trot' +'tropis' +'trophi' +'trivial' +'trink' +'trim' +'trigger' +'trieben_' +'trieb' +'tribute_' +'treu_' +'tres_' +'tres' +'tree_' +'treasure_' +'treas' +'traße_' +'travelers_' +'travail_' +'traue' +'trau' +'trat_' +'trat' +'trapp' +'transparente_' +'transp' +'transmitt' +'translator' +'transformer_' +'transformati' +'transatlantic_' +'tran' +'tram_' +'trali' +'trail_' +'trai' +'tragbar' +'traditioneller_' +'trades_' +'trac' +'tournament_' +'tourismus' +'touri' +'totalit' +'tos' +'tops_' +'topbonus_' +'too' +'toni' +'tone' +'toma' +'tolerate' +'token_' +'toilet_' +'toilet' +'tment' +'tma' +'tkan_' +'titles_' +'titi' +'tist' +'tisch' +'tis_' +'tings_' +'timme' +'timing_' +'timi' +'tilt' +'till' +'tiker' +'tightening_' +'tig_' +'tiere' +'tier_' +'tiefen_' +'tief' +'tied_' +'tid' +'tick' +'tici' +'tiate' +'tian_' +'thwart' +'thun' +'throne_' +'thresholds_' +'threatens_' +'thoroughly_' +'thon_' +'theori' +'theoretical_' +'tgut' +'tformen_' +'textiles_' +'textile_' +'text' +'teurer' +'test' +'terte_' +'tert' +'terroristische_' +'terroris' +'territoriale_' +'territoriale' +'terminology_' +'terminat' +'term' +'terli' +'teren' +'terd' +'terblichkeit' +'terba' +'tent_' +'tendi' +'tempt' +'template' +'temperature' +'temperatur' +'tels' +'teles' +'telephones_' +'tekn' +'teilung_' +'teilnehmer' +'technologie_' +'technologie' +'technically_' +'tching_' +'tbe' +'tbar' +'tav' +'tausende' +'taught_' +'tatte' +'tator' +'tation' +'tate_' +'tasa' +'tariffs_' +'targeting_' +'tape_' +'tandard' +'tand' +'tamb' +'tally_' +'tali' +'takti' +'takeover_' +'tains_' +'tail_' +'tahu' +'taha' +'tactic' +'taat' +'taa' +'südlich_' +'sü' +'säum' +'säu' +'sätz' +'säkulare' +'säch' +'szei' +'systemi' +'systematische' +'syrischen_' +'syrische_' +'synd' +'symbolische' +'swing_' +'swin' +'sweet_' +'sweeping_' +'sust' +'suspended_' +'suspend' +'survivors_' +'surviving_' +'surv' +'surgery_' +'surfing_' +'supr' +'suppress_' +'support' +'superpower_' +'superiority_' +'sunny_' +'sung_' +'sums_' +'sug' +'suffice' +'suchte' +'succeed' +'subtr' +'subti' +'substitute' +'subsistence_' +'subsidiaries_' +'subscription' +'subo' +'subjecti' +'stücke' +'ständnis' +'ständi' +'stände_' +'städtischen_' +'styl' +'sty_' +'stumbl' +'stuhl_' +'studying_' +'studios_' +'studiere' +'stub' +'ströme_' +'strukturierte' +'stroke' +'stritt' +'strip' +'stringen' +'striking_' +'strenge_' +'streiche' +'streaming_' +'straße_' +'strain_' +'strain' +'strafrechtliche_' +'storm_' +'stopped_' +'stones_' +'stole' +'stm' +'stliche' +'stische_' +'stipulate' +'stimm' +'stills' +'stig' +'stian' +'stere' +'stems_' +'stelle' +'stee' +'stattfand_' +'stattdessen_' +'statis' +'stating_' +'starte' +'starship' +'starring_' +'starr' +'standar' +'stam' +'stall_' +'staged_' +'staff' +'stabilisi' +'ssystem_' +'ssung_' +'ssten_' +'sslich' +'ssige_' +'ssert' +'ssenen_' +'ssad' +'srat' +'square' +'spürbar' +'spü' +'spy' +'spu' +'sprung' +'sprozess' +'sprogramm' +'spro' +'spritz' +'sprech' +'spreads' +'sprachig' +'spots_' +'spotlight_' +'spora_' +'spoo' +'sponsor_' +'spolitik_' +'spokes' +'splendid_' +'spitz' +'spiritual' +'spin_' +'spill' +'spielte' +'spiele_' +'spher' +'sph' +'spezialisierte' +'spezialisiert_' +'sperr' +'sper' +'spektrum_' +'spekt' +'speicher' +'speculati' +'spectac' +'specif' +'specially_' +'specialist' +'specialis' +'spark_' +'spannende' +'spanische' +'spalte' +'spac' +'sozioökonomische' +'soz' +'sovi' +'souverän' +'souls_' +'sorgte_' +'sorgan' +'sonstigen_' +'song_' +'sonabl' +'solusi_' +'solo_' +'soll' +'soi' +'sofortige' +'soci' +'soaring_' +'soap' +'sness_' +'sneak' +'smug' +'smoke' +'smitte' +'smell_' +'smallest_' +'small' +'slowenisch' +'slope_' +'slin' +'slide' +'slau' +'sland_' +'skontroll' +'skew' +'skeptisch' +'siu' +'sitzt_' +'sistem_' +'sinkt_' +'sincere_' +'simplify_' +'simo' +'similari' +'simila' +'silent' +'signifikante_' +'signi' +'signalisier' +'signal' +'sige' +'sies_' +'sierende' +'sieb' +'sider' +'sidelin' +'sid' +'sichtlich_' +'sicherge' +'sicherere' +'sica_' +'sibl' +'sibi' +'shri' +'showers_' +'shot' +'shortfall_' +'shorter_' +'shore_' +'shof' +'shir' +'shifting_' +'shie' +'shi_' +'she' +'shatter' +'sharpe' +'sham' +'shadow' +'shade' +'sges' +'sger' +'sgebiet' +'sfähig' +'sfr' +'sfe' +'sfa' +'sexual' +'setzten_' +'setze_' +'setup_' +'settle_' +'servici' +'serv' +'seria' +'sequence_' +'sequen' +'sepe' +'separated_' +'sensitivity_' +'sensibl' +'senh' +'sender_' +'sena' +'seminar' +'semi_' +'selu' +'selige' +'self' +'selecti' +'sela' +'sekitar' +'seitig' +'sein' +'sehingga_' +'sehbare' +'seeds_' +'secu' +'sectarian_' +'secrets_' +'secrecy_' +'sechs' +'sece' +'season' +'searche' +'sdi' +'sdat' +'sda' +'scrib' +'scourge_' +'schönes_' +'schädlich_' +'schwing' +'schwierig' +'schwerwiegende_' +'schweizer' +'schwarzen_' +'schwache_' +'schule' +'schuldig_' +'schte_' +'schs' +'schrittweise' +'schriftliche_' +'schriftliche' +'schon' +'scholarship' +'schnellstmöglich' +'schnellstens_' +'schlä' +'schlosse' +'schlichte' +'schlechte' +'schlage' +'schizophreni' +'schirm' +'schienen_' +'schenk' +'scheine' +'scheiden' +'schadet' +'scenery_' +'scenarios_' +'scen' +'scatter' +'scar' +'scal' +'sburg_' +'saver' +'saudi' +'satisfy' +'satisfaction_' +'sar_' +'sangat_' +'sanf' +'sandy_' +'sammlung' +'sames_' +'salv' +'salon' +'salmon' +'sall' +'sail' +'sah' +'sacred_' +'sabotage' +'saa' +'rüst' +'rüh' +'rüf' +'rückt_' +'rücke_' +'rüch' +'römische' +'rés' +'ränken_' +'räglich' +'räder' +'rwart' +'rva' +'rust' +'russi' +'rush' +'rupulous' +'rupt' +'rumo' +'ruft_' +'rufe' +'rue' +'ruch' +'ru_' +'rting_' +'rtig' +'rthe' +'rter_' +'rsion' +'rse_' +'rren_' +'rozess_' +'rox' +'routine' +'rous_' +'rounde' +'rote_' +'rotat' +'rose' +'ror_' +'rooftop' +'rome_' +'roman_' +'roller' +'roh' +'rogue' +'rogramm_' +'rocks_' +'robie' +'rnt_' +'rmt' +'rmina' +'rmat' +'rman_' +'rlin' +'rkste_' +'riös' +'riu' +'ritte' +'ritt_' +'ritis' +'riter' +'risi' +'rises_' +'rische_' +'ringung_' +'ringe' +'rima' +'rigkeit_' +'rigen_' +'rifft_' +'riffen' +'riff' +'riesiger_' +'riesige_' +'rieben' +'ridge' +'richtungen_' +'richtung' +'richer_' +'ribut' +'riad' +'rgen' +'rfs' +'rfer' +'rey_' +'reward' +'revolution' +'revolt' +'revo' +'revision_' +'revision' +'reva' +'rev' +'rett' +'retreat_' +'retic' +'reth' +'reten_' +'rete' +'retains_' +'retained_' +'ret_' +'result' +'rests_' +'restru' +'reste' +'responds_' +'responding_' +'respi' +'respecting_' +'resolu' +'resistant_' +'resident_' +'resi' +'reservier' +'resentment' +'resemble' +'repudiat' +'republikanischen_' +'repräsentati' +'repro' +'representa' +'repositor' +'replacing_' +'repercussions_' +'repea' +'repay_' +'rente' +'renewal_' +'renc' +'remedy_' +'religiös' +'relian' +'relevan' +'releases_' +'rela' +'reitungs' +'reist' +'reise' +'reinve' +'reinforces_' +'reif_' +'reie' +'reicher' +'reichend_' +'reibungslosen_' +'reh' +'reguläre' +'regulier' +'registr' +'regio' +'regi' +'regel' +'regain_' +'refuses_' +'refurbish' +'refu' +'refrigerat' +'refresh' +'reformiert' +'refine' +'reduzierte' +'reduce' +'redo' +'redis' +'redet' +'recyc' +'rect' +'recreat' +'recommend' +'recom' +'reckt_' +'reciproca' +'rechnet' +'rechen_' +'recepti' +'receiver' +'receipt' +'rebuilt_' +'rebuild' +'rebell' +'reba' +'realm' +'reakti' +'reagierte' +'reaffirm' +'readin' +'reacted_' +'react' +'rds_' +'rdnung' +'rdinate_' +'rde_' +'rda' +'rbu' +'rbi' +'rbeit' +'raums_' +'rations_' +'rationalis' +'ratings_' +'ratify_' +'ratifizieren_' +'rassis' +'raschen_' +'rape_' +'rangig' +'rally_' +'rakete' +'raid_' +'raf' +'rado_' +'radikal' +'rad' +'raci' +'quote' +'quicker_' +'quero' +'quenz' +'quee' +'quasi_' +'quart' +'quar' +'qualities_' +'qualify_' +'pé' +'puri' +'purchased_' +'punishment_' +'punishe' +'pundits_' +'punct' +'puff' +'publishe' +'publications_' +'publication_' +'publi' +'pub' +'ption' +'ptic' +'pti' +'pter_' +'psychologi' +'psychi' +'pson_' +'prüfe' +'präzise_' +'präventive' +'pruden' +'prozessor' +'prozess' +'proz' +'proyecto' +'proximité_' +'provoke' +'provisional_' +'protocol' +'protestier' +'protectionist_' +'prostitution_' +'prosper_' +'prosecutor' +'prosecute' +'propag' +'proofed_' +'prone_' +'prompt_' +'promo' +'promi' +'projekten_' +'proj' +'progress' +'programmier' +'programme' +'prognos' +'profile' +'profession_' +'produz' +'produktiver' +'produktiven_' +'prochen' +'processor_' +'processed_' +'problematische' +'probe' +'probability_' +'prizes_' +'privatization' +'privatiz' +'privat' +'principal' +'preview_' +'presumably_' +'pressu' +'presse' +'presenta' +'prep' +'premises_' +'premier_' +'prejud' +'preisen_' +'predecessor_' +'predator' +'precon' +'precis' +'preceding_' +'preceded_' +'praktischer_' +'practise' +'prachige' +'prach' +'ppt_' +'ppne' +'pper_' +'pparat' +'potenzial_' +'potenti' +'posting_' +'poster' +'possesse' +'positiver_' +'positively_' +'positionier' +'posit' +'portugiesischen_' +'portfolio_' +'pornographi' +'porat' +'populist' +'popu' +'pop_' +'pons' +'poni' +'pone' +'polizeilichen_' +'polio_' +'pointer' +'poet_' +'pneum' +'pluralist_' +'plug' +'pling_' +'plikat' +'plica' +'ples' +'pleas' +'playground_' +'plausible_' +'plaus' +'platzier' +'platz' +'platte' +'plates_' +'planet' +'pl' +'pix' +'piso' +'pirate' +'pira' +'pieler' +'phä' +'physische' +'physics_' +'photographer' +'philosophers_' +'philo' +'pher' +'phen' +'phases_' +'pharma' +'pg' +'pflicht_' +'pfer_' +'pfei' +'pfa' +'pezifische' +'petro' +'pest_' +'pessimist' +'pes' +'perusahaan_' +'perta' +'pert_' +'persuaded_' +'perspectiv' +'personal' +'persona' +'perso' +'persiste' +'persecut' +'perse' +'permanente' +'perja' +'periodi' +'performa' +'perform' +'perfekten_' +'perfekte_' +'perceptions_' +'peny' +'pensioner' +'pengu' +'peng' +'penal' +'pektive' +'pedi' +'peculiar' +'pect' +'pc' +'paßt_' +'payer_' +'paya' +'pay' +'pause_' +'patron_' +'patriot' +'patriarch' +'patience_' +'pati' +'patenti' +'patch' +'patan_' +'passp' +'passenden_' +'passende' +'passen_' +'passage_' +'passa' +'pasa' +'partition' +'partisan' +'particul' +'participa' +'partially_' +'partial_' +'partei' +'parte' +'parque' +'parlam' +'paris' +'parc' +'parasit' +'paran' +'parag' +'paradoxical' +'parado' +'paradise_' +'panne' +'panels_' +'panc' +'palästinensische' +'palm' +'pale' +'palace_' +'pai' +'pack_' +'pable_' +'overwhelmingly_' +'overthrow' +'overs_' +'overlap' +'overl' +'overd' +'ova_' +'ova' +'outright_' +'outrageous_' +'outr' +'outer_' +'outbreaks_' +'oup' +'ough' +'otten_' +'osten' +'ostasiatische' +'osphär' +'osph' +'oso' +'osh' +'orz' +'orum_' +'orten' +'orre' +'ormi' +'origine' +'originated_' +'original' +'orientieren_' +'orientation_' +'orie' +'oria' +'organisierten_' +'organise_' +'organisch' +'ordinat' +'orden' +'orch' +'orb' +'orate' +'oral' +'optimistic_' +'optimist' +'optimis' +'optimalen_' +'optim' +'opfern' +'operative_' +'operationen_' +'open' +'ope_' +'oor' +'oo_' +'onwards_' +'onste' +'onomi' +'onment' +'oniert' +'oner' +'onc' +'onale' +'onal' +'ommt_' +'ommen' +'oming_' +'omen_' +'oman' +'olv' +'olungs' +'ols' +'olli' +'olle' +'oll_' +'oliz' +'olive_' +'oliga' +'okan_' +'oire' +'oint_' +'ohnehin_' +'ohl' +'ogic' +'ogenheit_' +'ogati' +'offsho' +'offenkundig_' +'offe_' +'offe' +'ocken_' +'ocht' +'oceans_' +'occurring_' +'occurrence_' +'occupying_' +'occupies_' +'occasion' +'obsole' +'observ' +'obscure' +'oblige' +'obligator' +'objektive' +'objektiv' +'objection_' +'obgleich_' +'obesity_' +'obacht' +'oba' +'oard' +'nützig' +'nöte' +'nössische' +'näher' +'nzende' +'nym' +'nvi' +'nven' +'nutzung' +'nutri' +'nurse' +'nungsl' +'nungs' +'nukl' +'nuan' +'nty_' +'ntrat' +'ntische' +'ntal' +'nstig' +'nst_' +'nsic_' +'npo' +'nour' +'notwendiger' +'notw' +'notified_' +'notebook' +'normen_' +'normale' +'nop' +'nons' +'nonetheless_' +'nominal' +'noble_' +'nob' +'nnial' +'nlich' +'nli' +'nland' +'nl' +'nitt_' +'nistr' +'niss' +'nischer_' +'nik' +'night' +'nieren' +'niedrigste' +'niederzu' +'nichts' +'nia_' +'nho' +'nheit' +'nhaft' +'ngliche' +'ngle' +'ngl' +'ngkin' +'ngh' +'ngens' +'ngen' +'ngeh' +'nfr' +'neutr' +'neuerliche' +'neueren_' +'neuem_' +'nette_' +'nets_' +'nerv' +'nent' +'nenn' +'nel' +'neighbours_' +'neighbor_' +'neid' +'nei_' +'negeri_' +'negativ' +'need' +'ndten_' +'ndliche_' +'ndli' +'nding_' +'nderung_' +'ndern' +'ndene' +'ndelt' +'ndbar_' +'nbild' +'nberg_' +'nbe' +'nationality_' +'nationalistische' +'nationalis' +'nannten_' +'name' +'naive_' +'nahegeleg' +'nage' +'nachzudenken_' +'nachlassen_' +'nachl' +'nachfo' +'münd' +'möglichkeit_' +'möglich' +'même_' +'mé_' +'männliche' +'mw' +'mußt' +'mutmaßliche' +'mutati' +'muslimische_' +'muslimisch' +'municipality_' +'muni' +'multiplie' +'multinationalen_' +'multilingual' +'mulat' +'mud' +'muc' +'mps' +'mple' +'mpho' +'mp3' +'mour' +'motorways_' +'motivierte' +'motivieren_' +'motivated_' +'mother' +'mosqu' +'mosa' +'mort' +'moreover_' +'moralisch_' +'moralis' +'moral' +'monsters_' +'monopolies_' +'mond_' +'monate' +'mois' +'mog' +'modifications_' +'modif' +'moderati' +'modelle_' +'modele' +'modalit' +'modal' +'mobilisiert_' +'mobil' +'mmu' +'mmission' +'mming_' +'mitzuteilen_' +'mittlere_' +'mittl' +'mitt' +'mithilfe_' +'mitgliede' +'mitb' +'mismanage' +'miser' +'mino' +'minimalis' +'minati' +'minat' +'mimic_' +'millenni' +'militia' +'militarily_' +'militari' +'milia' +'milestone' +'mildern_' +'migrator' +'miete' +'mfa' +'metry_' +'metropolitan' +'metropolis' +'meti' +'metaphor' +'metal' +'merupakan_' +'mering' +'merikas_' +'merika' +'merger_' +'merge_' +'menun' +'mention' +'mentally_' +'mentali' +'menschlich' +'meno' +'meni' +'mengg' +'mendo' +'mendapat' +'memori' +'memiliki_' +'membe' +'melo' +'meld' +'melak' +'meinte_' +'mehrheitlich' +'mediterran' +'medien' +'mbr' +'mbo' +'maßgebliche' +'maßgeblich_' +'mayors_' +'may' +'maturit' +'matt' +'matri' +'matisch' +'matis' +'mathematical_' +'mathemati' +'material' +'matching_' +'masyarakat_' +'massacre' +'mass' +'marvel' +'mars' +'markiert_' +'marki' +'mari' +'manufa' +'mans' +'manifestation' +'mane' +'mandate' +'mancher_' +'managements' +'macht' +'lüsse' +'lösungen_' +'lärung' +'lärm' +'längeren_' +'läh' +'lädt_' +'läche' +'lve_' +'luss' +'lui' +'ltungen_' +'lts' +'lton' +'ltet' +'ltes' +'ltere' +'ltene' +'lpin' +'love' +'lov' +'loser_' +'lop' +'loop' +'looming_' +'longe' +'lohnen' +'logisch_' +'login' +'logen_' +'lod' +'locals_' +'loading_' +'load' +'lls_' +'lligte' +'lles_' +'llect' +'llb' +'lj' +'liziert_' +'livel' +'litä' +'litt' +'lita' +'lit_' +'listing_' +'lish' +'lio_' +'linux' +'linking_' +'linke' +'lining_' +'linien_' +'lini' +'linguist' +'lingeri' +'linge_' +'linen_' +'limitations_' +'limit' +'likes_' +'liga' +'lifts_' +'lifting_' +'liest_' +'liegende_' +'lieferungen_' +'lief' +'lied' +'liebe' +'licht' +'lichem_' +'licenses_' +'licens' +'lic_' +'libert' +'liberalism' +'liable_' +'lia_' +'lia' +'lge_' +'lga' +'lfi' +'leverag' +'leva' +'leut' +'letztere' +'letzt' +'lete' +'lest_' +'lesse' +'lers_' +'lern_' +'leo' +'lent_' +'lende' +'lend' +'lement' +'lek' +'leite' +'leistungsstarke' +'leiste' +'leidenden_' +'leid' +'leichten_' +'lehnte_' +'legung_' +'legu' +'legislative' +'legale' +'lega' +'lecht' +'lebendige' +'lebenden_' +'lean' +'lden' +'lc' +'layo' +'layers_' +'laut' +'launder' +'laugh_' +'laub' +'latte' +'lations' +'lateinamerikanischen_' +'lated_' +'lares_' +'lapse' +'langwierige' +'langsamer' +'langsame' +'landwirtschaftliche' +'landscape' +'landmark_' +'landes_' +'landes' +'landed_' +'lamat' +'lain_' +'laim_' +'lager' +'laboratory_' +'küste_' +'künften_' +'kündig' +'kümmert_' +'kühle' +'körperlich' +'käufe' +'kämpfung_' +'kämpft' +'kus_' +'kurzlebig' +'kurdische' +'kunst_' +'kung_' +'kundig' +'kulturelle' +'kul' +'ktive_' +'ktie' +'ksh' +'kse' +'kräfte_' +'kritisieren_' +'kriti' +'kriminelle' +'krie' +'kreis_' +'krebs_' +'kreative_' +'krankheit' +'kraftwerke_' +'kota' +'kosm' +'korrigiert_' +'korr' +'kopp' +'koordin' +'kooperieren' +'konzipier' +'konzentrierte_' +'konzentr' +'konv' +'kontinu' +'kontakt' +'konsum_' +'konsultier' +'konfigur' +'kompromi' +'komponente' +'komplexer' +'komple' +'kommiss' +'kommene_' +'kommende_' +'kommand' +'komitmen' +'komfort' +'kolo' +'kohärent' +'kna' +'klüg' +'klini' +'kling' +'klick_' +'klein' +'klause' +'kland_' +'kk' +'kipun_' +'king' +'kids_' +'kick' +'keyboards_' +'keyboard_' +'keti' +'kep' +'kennzeichne' +'kenntnis' +'kenne' +'kelo' +'kell' +'kela' +'keitsp' +'kehrten_' +'kehrte_' +'keh' +'keeper' +'ked' +'kd' +'kauft' +'kati' +'katastrophen' +'katastrophalen_' +'katastroph' +'kass' +'kart' +'kapitalistischen_' +'kapitalistisch' +'kapitali' +'kapit' +'kanäle_' +'kanzler_' +'kannte' +'kanische_' +'kane' +'kampagnen_' +'kammer' +'kad' +'kabel' +'kab' +'kW_' +'jüngst_' +'jüdisch' +'jähriger_' +'jähr' +'juta_' +'justizielle' +'justifiable_' +'junior' +'jump_' +'juice' +'judgments_' +'juan' +'jp' +'joy_' +'joy' +'jours_' +'journe' +'journal_' +'jour_' +'jor_' +'jon' +'jm' +'jk_' +'jk' +'jen' +'jemandem_' +'jected_' +'jaz' +'jang' +'jail_' +'jahrzehntelang' +'jahrelange' +'jad' +'jacuzzi_' +'iß' +'izer_' +'iw' +'iver' +'itäre' +'itä' +'itz_' +'ituation' +'itted_' +'itiv' +'itischen_' +'itions' +'itin' +'ithm' +'itglied' +'ited_' +'itas_' +'istische_' +'istans_' +'istan_' +'issen' +'isse_' +'isse' +'issa' +'israel' +'ism' +'ision' +'isier' +'irs' +'irrig' +'irresponsibl' +'irresp' +'irrel' +'irregular' +'iro' +'irku' +'irische_' +'irgendwo_' +'irgendwie_' +'irgendwelche' +'ipt' +'ipp' +'ipl' +'iose' +'ios' +'ionss' +'ionist' +'iologi' +'inz' +'inward_' +'invoice' +'investition' +'investigations_' +'invest' +'inventor' +'invented_' +'invent' +'introduces_' +'inti' +'inters' +'interoperability_' +'interne_' +'internally_' +'interfere' +'interessierte' +'interdi' +'interconnect' +'interactive_' +'intensiver_' +'intensify' +'intellectuals_' +'intell' +'integrative' +'integrati' +'integr' +'int_' +'int' +'insurgen' +'insul' +'instruct' +'institutionelle' +'inste' +'inspirierende' +'inspire' +'inspe' +'insolvenc' +'insight_' +'inser' +'insel' +'inputs_' +'innu' +'innovativer' +'inners' +'innenpolitische_' +'innenpolitisch' +'inne_' +'inm' +'inland_' +'inko' +'injustice_' +'initi' +'iniste' +'inherit' +'inherently_' +'ingt_' +'informi' +'informelle' +'informationen_' +'informa' +'influen' +'infl' +'infizier' +'inferior_' +'infection_' +'infecti' +'inexpe' +'inertia' +'inequalities_' +'inen' +'ineff' +'indung' +'indices_' +'indications_' +'indexe' +'indefinite' +'inda' +'incremental' +'incorporati' +'incor' +'incomplete_' +'incompa' +'incline' +'inciden' +'inbe' +'inau' +'ination_' +'inapp' +'inanzierung' +'inan_' +'inali' +'inacc' +'imstande_' +'impul' +'impu' +'imprisoned_' +'impressi' +'impressed_' +'importi' +'importe' +'implying_' +'implizi' +'impli' +'implant' +'impf' +'imperialist' +'imperial_' +'imperfect_' +'impe' +'immun' +'imminent_' +'immigkeit' +'immerhin_' +'imma' +'imeter' +'imer_' +'iment' +'iman' +'imagination' +'image' +'illustrate' +'illig' +'ilis' +'ilian' +'ilet' +'iles' +'iler' +'ildet_' +'ila_' +'iki_' +'ikan' +'ihood' +'igte_' +'igte' +'igste' +'igert_' +'igenen_' +'igende_' +'igend' +'igat' +'igan' +'ify_' +'ifica' +'ific' +'iffe' +'ifer' +'ießungs' +'ieu' +'iertes_' +'ierte' +'ierli' +'ierenden_' +'ientierte' +'ient_' +'iell' +'iehungs' +'iegende' +'iegen' +'ieferung_' +'iefe' +'idylli' +'idos' +'ido' +'ideologische' +'identifizieren_' +'identi' +'iden_' +'iden' +'icu' +'ickt_' +'ickel' +'ichtliche' +'ichteten_' +'ichtete' +'icherte_' +'icherheit_' +'icherheit' +'ibus' +'ibly_' +'iben' +'ibel' +'ibe' +'iate' +'iar' +'iani' +'iana_' +'iam' +'ially_' +'iale' +'iad' +'höheres_' +'höchstwahrscheinlich_' +'häuser_' +'häufigste' +'härter' +'hysteri' +'hypothes' +'husband_' +'hurt_' +'hunde' +'humiliati' +'humanis' +'humane' +'huk' +'hub_' +'hter' +'hrte' +'hrop' +'hp' +'hov' +'hotele' +'hostilit' +'hostages_' +'hospi' +'horses_' +'horri' +'hormon' +'horizontal' +'horizon' +'honour' +'hones' +'holis' +'holder_' +'hochgradig_' +'hochentwickelte' +'hnya_' +'hns' +'hma' +'hlung_' +'hls' +'hlen' +'hitt' +'historischer_' +'hist' +'hion' +'hinweg' +'hinterla' +'hina' +'hilfe' +'highlights_' +'high' +'hig' +'hieß' +'hie' +'hid' +'hibit' +'hib' +'het_' +'hesitate_' +'hesita' +'herzlichen_' +'hervorgeh' +'hervorge' +'herstellen_' +'herrschende_' +'herrschen_' +'herrsche' +'herrlichen_' +'heroic_' +'hergestell' +'hered' +'heranzu' +'hene_' +'helpful' +'helme' +'hellen' +'helle_' +'helle' +'helicopter_' +'heitlich' +'heilige' +'heikle' +'hegemon' +'heftig' +'heel' +'hectare' +'heblich' +'heat' +'hear' +'headwa' +'haven_' +'hasn' +'harvest' +'harn' +'harmonisch' +'hap' +'hanging_' +'handlung' +'handelte_' +'handelbar' +'haltung_' +'haltig' +'haltestelle' +'halle_' +'half' +'hake_' +'hairdryer' +'hafter_' +'hafte' +'hadi' +'habitacion' +'habita' +'habet' +'güt' +'günstigste' +'günstigen_' +'gültig' +'gé' +'gäste' +'gänzlich_' +'gut' +'gust' +'guise' +'guilty_' +'guiding_' +'gue_' +'guaranteeing_' +'größtmögliche' +'grundlage_' +'großzügige' +'großartigen_' +'grow' +'groundwork_' +'grin' +'grim' +'grid_' +'gres' +'greife' +'greet' +'gray_' +'grave' +'grau' +'gratul' +'grass' +'grants_' +'grande' +'gram' +'grain_' +'graduated_' +'gradual_' +'grac' +'governor_' +'gove' +'gott' +'goldene' +'goa' +'gno' +'gnizing_' +'gnisse' +'gne' +'gnant_' +'gmatis' +'glück' +'glori' +'globe_' +'globalisierte' +'global' +'glied' +'gler' +'gleichg' +'gle_' +'glaubwürdige' +'glanc' +'gl' +'gische' +'gins' +'ginat' +'giganti' +'gift_' +'gift' +'ghts_' +'gha' +'ggf_' +'gger' +'gged' +'gf' +'geänderte' +'gezielt_' +'geze' +'gez' +'gewünschte_' +'gewü' +'gewöhnt_' +'gewährte' +'gewä' +'gewohnt_' +'gewisses_' +'gewerbliche' +'geweiht_' +'gewarnt_' +'gewann_' +'gewandt' +'gewalttätig' +'gewaltsam' +'gewaltigen_' +'gewaltige' +'getreten_' +'getrennt_' +'geteilte' +'geteilt_' +'gesunde_' +'gesucht_' +'gestric' +'gesteuert' +'gestell' +'gestalt' +'gesichert_' +'gesetzlichen_' +'gesetzliche_' +'gesetzlich_' +'geschütz' +'geschu' +'geschränkt' +'geschri' +'geschoss' +'geschn' +'geschmackvoll_' +'geschla' +'geschickt' +'geschichte' +'geschic' +'geschi' +'gescheiterten_' +'gescheitert' +'gerä' +'gert' +'germ' +'geringst' +'geringfügig_' +'geringe' +'gerichteten_' +'gerichte' +'gericht' +'geplante' +'geometri' +'geologi' +'geographic' +'geografisch' +'genügt_' +'gentur' +'generosity_' +'generali' +'genauen_' +'gemischte' +'gemeinschaftliche_' +'geme' +'gelöscht_' +'gelä' +'gelobt_' +'gelingen_' +'gelin' +'geliebt' +'geleistete' +'gelei' +'gelegenen_' +'gelangte' +'geladen_' +'gela' +'gekauft_' +'geka' +'gehörte' +'gehöre' +'gehend' +'geheimnis' +'gehe' +'geh' +'gegl' +'gegenübersteht_' +'gegebenenfalls_' +'gegebene' +'geführte' +'gefühl_' +'gefüg' +'gefälscht' +'gefährdete' +'gefundenen_' +'gefl' +'gefahren_' +'geeinigt' +'geehrter_' +'geda' +'gebun' +'gebro' +'gebnis' +'gebilligt_' +'gebieten_' +'geben' +'gay_' +'gathering' +'gathered_' +'gastronomy_' +'garten_' +'garde_' +'gant_' +'game' +'gamb' +'galow' +'galleries_' +'gale' +'gab' +'fürs' +'fürchtet' +'fünfzehn' +'führend_' +'führ' +'fühle' +'fügt_' +'fügen_' +'förm' +'fälsch' +'fusion_' +'funktions' +'funktional' +'funktion' +'fundamentalism_' +'fulfilled_' +'fulfill_' +'fulfil_' +'fug' +'fueling_' +'fting_' +'ftige' +'frühe' +'frustration' +'fruitful_' +'fruchtbare' +'front' +'froh_' +'frighten' +'frequen' +'fremden' +'freiz' +'freiwillige' +'frameworks_' +'frames_' +'fram' +'fraglich' +'foto' +'fosil_' +'fortunately_' +'fortunate_' +'fortschrittliche' +'fortschritt' +'fortgesetzte' +'fortgesetzt_' +'forsche' +'fors' +'formulation' +'formier' +'formelle_' +'forme' +'forgive' +'foreseeable_' +'foremost_' +'forecast' +'forbidden_' +'foodstuffs_' +'folgender' +'folgend' +'folde' +'flüsse_' +'flüge' +'flächen_' +'flus' +'fluctuations_' +'fluc' +'flow' +'fließ' +'fliehen_' +'flich' +'flex' +'flat' +'flam' +'fk' +'fizierten_' +'fixing_' +'fist' +'fishe' +'fische' +'firmen_' +'finishing_' +'finishes_' +'fing' +'finanzierten_' +'finanzier' +'finanziell_' +'filme' +'fill' +'fies_' +'fid' +'fici' +'fib' +'feuer' +'fett' +'festh' +'festge' +'fester_' +'fertiliz' +'fert' +'fers' +'fernseh' +'ferenz' +'fera' +'feminist' +'felde' +'feindliche_' +'fehlerhafte' +'fehlende_' +'fehl' +'feed' +'fear' +'faz' +'favori' +'favorable_' +'faun' +'fathers_' +'fastest_' +'fashioned_' +'farm' +'fang_' +'fana' +'familien' +'falt' +'fallend' +'fair' +'fahrten_' +'facult' +'faction_' +'faction' +'facet' +'fabricat' +'fabri' +'fa_' +'ezi' +'ezei' +'eze' +'extract_' +'extinction_' +'exte' +'expressions_' +'exportiert' +'exportieren' +'exporters_' +'exported_' +'exponenti' +'explosion' +'exploited_' +'explod' +'explizit' +'explanations_' +'expecting_' +'expectancy_' +'expansive' +'expans' +'expandier' +'exot' +'exklusive_' +'existen' +'exert' +'exer' +'excuse' +'exceeds_' +'exceeded_' +'exceed_' +'examining_' +'examination' +'ewicht' +'evolved_' +'evolve_' +'eviden' +'evaluated_' +'eva_' +'ev_' +'eut_' +'eus' +'europä' +'etung_' +'ets' +'etri' +'etliche' +'ethni' +'ethisch' +'ethics_' +'etet' +'estan' +'establishes_' +'establ' +'esst' +'essourcen_' +'essors_' +'espe' +'esp' +'esisch' +'esie' +'esh_' +'esar' +'esa_' +'esa' +'erzählt_' +'erzwingen_' +'erzw' +'erzig' +'erzielten_' +'erzieh' +'erzi' +'erzeugung_' +'erworben' +'erwirt' +'erweckt_' +'erwach' +'eruf' +'eru' +'ertr' +'erti' +'erteilt_' +'erteilen_' +'ersönlichkeit' +'ersto' +'erstmalig' +'ersorgung_' +'erschütter' +'erschweren_' +'erri' +'erreg' +'ero_' +'ernähr' +'ernsten_' +'erneuten_' +'ermöglichte' +'ermutigend' +'ermordet' +'ermittelt' +'ermitt' +'ermaßen_' +'erläutert' +'erlich' +'erkrank' +'erklärten_' +'erische' +'erin_' +'erhol' +'erho' +'erhebt_' +'erhaltene' +'ergreif' +'erfuhr' +'erforsch' +'erfasst_' +'erfassen_' +'eres_' +'erer' +'erenz' +'erend_' +'eren' +'ereit' +'ereignis' +'erbracht_' +'erate' +'erals' +'eradicati' +'erac' +'eption' +'epl' +'eou' +'envisage' +'environments_' +'entzi' +'entwickelnde' +'entwick' +'entum' +'entsp' +'entschä' +'entschl' +'ents' +'entrepreneurs_' +'entrepreneurial_' +'entrepr' +'entra' +'ento' +'entn' +'entla' +'entko' +'entitlement' +'ention' +'entia' +'enthusiastic' +'enthusiast' +'enthielt_' +'entg' +'entfern' +'entfalten_' +'enter' +'enteil' +'entdeckte' +'entailed_' +'entag' +'ensw' +'enrichment_' +'enpr' +'enormer_' +'ennen_' +'enne' +'enna_' +'enlarged_' +'enke' +'enische' +'enie' +'enhancing_' +'enhancement' +'engst' +'engines_' +'engere_' +'engagiert_' +'engag' +'energies' +'enerati' +'endors' +'endlos' +'endig' +'endg' +'encr' +'encounter' +'enchant' +'enberg_' +'enau' +'enan' +'enade' +'emulat' +'empör' +'empt' +'emphasized_' +'emphasises_' +'empfohlen_' +'empfinden' +'empfan' +'emitte' +'emit_' +'emis' +'eminent' +'emic' +'emergen' +'ementa' +'embraced_' +'embodie' +'emancipation_' +'elz' +'elu' +'elte_' +'elpr' +'elnde' +'elm' +'ellit' +'ellig' +'elimin' +'elektronische_' +'elektroni' +'eleg' +'electorate_' +'elderly_' +'ela_' +'ektors_' +'eiz' +'eiv' +'eise' +'eis' +'einzubringen_' +'einziger_' +'einzigartiger_' +'einzelstaatliche_' +'einzelstaatliche' +'einsetzt_' +'einschließ' +'einnimmt_' +'einn' +'einm' +'einleite' +'einlage' +'einka' +'einig_' +'einheitlich' +'einheit_' +'einheimische_' +'einhalten_' +'eingreif' +'eingesetzte' +'eingereichten_' +'einger' +'eingehende' +'eingefü' +'eingebrachte' +'einführt' +'einfließen' +'einfache' +'eindr' +'eilnehmer' +'eilig' +'eigt' +'eifen_' +'eichnungen_' +'eichnete' +'eichheit' +'eichen_' +'eiche' +'eibungen_' +'ehrliche' +'ehren' +'ehmend' +'ehm' +'ehemaliger_' +'egu' +'egr' +'egio' +'efficiently_' +'effi' +'effektive' +'eein' +'eed' +'educat' +'edoni' +'edly_' +'editorial_' +'editi' +'edingungen_' +'ediate_' +'ectiv' +'ect_' +'ecosystem_' +'economic' +'ecommerce_' +'eckung' +'echter_' +'echnologi' +'echni' +'ece' +'ec_' +'ebi' +'earthquake_' +'eare' +'eagle' +'ead' +'eab' +'eB' +'düstere_' +'dürft' +'dünne' +'dün' +'dó' +'dí' +'dè' +'dänische' +'dämpf' +'dynamisch_' +'dyn' +'dying_' +'durchgeführten_' +'durchführ' +'durable_' +'dura' +'duplicate' +'dule' +'dubio' +'dual_' +'dsch' +'drückt' +'drücke' +'drängt_' +'drink' +'dringender_' +'drige' +'dress_' +'dreim' +'dreie' +'drea' +'dran_' +'dramatische' +'drama_' +'drai' +'drafts' +'dozens_' +'dozen_' +'downs' +'downfall_' +'douche' +'doubl' +'dort' +'doppelten_' +'door' +'doom' +'dominieren_' +'dominance_' +'dom_' +'dokument_' +'doct' +'dock' +'doc' +'dne' +'dlin' +'divide' +'divert' +'diversification_' +'diversif' +'diver' +'dity' +'distributi' +'distinguish' +'distinctive_' +'distin' +'distanzier' +'distan' +'dissi' +'disqualif' +'disproportionate' +'disposition' +'displaced_' +'dispens' +'disparities_' +'disorders_' +'disor' +'diskriminier' +'diskreditier' +'disintegrati' +'discover' +'discontent' +'discl' +'discard' +'disar' +'disappoint' +'disappeared_' +'disadvantage_' +'disabl' +'dirty_' +'diri' +'direkter_' +'directors_' +'dir' +'diplomati' +'diplom' +'dingung' +'dinar' +'dimensionale' +'dimension' +'dilakukan_' +'diktat' +'dika' +'digte' +'diffus' +'differentiated_' +'differ_' +'diesjährigen_' +'diesel_' +'diesbezüglich_' +'diente_' +'dienstleistung' +'dictator_' +'dictator' +'dicta' +'dich' +'dice' +'dicat' +'dibandingkan_' +'dial_' +'diagnostizier' +'diagnosis_' +'dezentral' +'devise_' +'develop' +'devastat' +'deva' +'dev_' +'deutschs' +'deutliche' +'deutig' +'deterrence_' +'determina' +'detect_' +'detaine' +'destroying_' +'destotrotz_' +'destiny_' +'desti' +'destabilisieren' +'dest' +'desse' +'desperate_' +'desk' +'designi' +'designe' +'desert_' +'desert' +'descript' +'describing_' +'desc' +'derte' +'derse' +'derin' +'deput' +'depri' +'depressive' +'deplor' +'depict' +'depe' +'dent_' +'denkbar' +'demselben_' +'demonstrati' +'demoli' +'demokratische' +'demografische' +'democratiz' +'democratically_' +'demis' +'demics_' +'demi_' +'demagogue' +'dell' +'delight' +'delic' +'deleveraging_' +'deleted_' +'delet' +'deleg' +'delay' +'dei' +'deg' +'defizite' +'definite_' +'definierten_' +'deficien' +'defenses_' +'dedicati' +'decree' +'decoration_' +'decisively_' +'decen' +'dece' +'deca' +'debu' +'debat' +'deba' +'dealers' +'deaktiviert' +'deadlock_' +'daya_' +'day' +'davon' +'daughter_' +'dauern_' +'dauerhaft_' +'dauer_' +'datei_' +'dat_' +'darl' +'dark' +'darge' +'dank' +'dangerously_' +'dane' +'dance' +'dairy_' +'dai' +'dage' +'dae' +'dachte_' +'custom' +'curi' +'cup_' +'culos' +'culi' +'culati' +'cui' +'cture_' +'ctor_' +'cto' +'cteri' +'cruel_' +'crop_' +'crop' +'critics_' +'criticise_' +'cription' +'cred' +'creativity_' +'crea' +'cram' +'crafted_' +'covert' +'coverage_' +'courte' +'court' +'courag' +'counting_' +'counterpart' +'cosmo' +'corrupt' +'correspond' +'coordinate_' +'cool' +'cooking_' +'cookies_' +'convinc' +'conviction_' +'convey_' +'controvers' +'contro' +'contributes_' +'contraction' +'continuity_' +'continual' +'continental_' +'conti' +'contempt_' +'contemplate' +'container' +'consum' +'consultant' +'consolidate' +'console' +'consig' +'conservat' +'conscien' +'congratulat' +'confirms_' +'confine' +'conducti' +'condo' +'conditi' +'conceptual' +'concepts_' +'concentrating_' +'conceive' +'conceivable_' +'compromises_' +'comprise_' +'composition_' +'compos' +'complimentary_' +'competitors_' +'compet' +'communicati' +'commission_' +'commi' +'commerce' +'commentator' +'comitology_' +'combination' +'combin' +'colleg' +'collateral_' +'collaps' +'colla' +'coins_' +'cof' +'coexist' +'codec' +'cod_' +'cock_' +'clou' +'cloning_' +'cliente' +'clear' +'classification_' +'classif' +'clash_' +'clarifi' +'ckung_' +'ckten_' +'ckr' +'ckne' +'ckier' +'civiliz' +'civilisation' +'civ' +'circulation_' +'circula' +'circuit_' +'cip' +'ciona' +'ciation' +'ciar' +'chwor' +'chwi' +'chwe' +'chut' +'chuss' +'chur' +'chunk' +'chuh' +'chtliche' +'chtet_' +'chtern' +'chslung' +'chrono' +'chronis' +'chronic_' +'christ' +'chriften_' +'chreib' +'chooses_' +'choc' +'chnik' +'chni' +'chn' +'chme' +'chläge_' +'chli' +'chke' +'chisc' +'chip_' +'chinesischer_' +'chine' +'china_' +'chier' +'chicken_' +'chess' +'cherung' +'chere' +'chemis' +'chem_' +'charta_' +'characteris' +'chara' +'chanis' +'champions_' +'chambres_' +'chamber' +'cham' +'chain' +'chaff' +'ceu' +'cet_' +'certification' +'certificates_' +'certificate_' +'cents_' +'centrist_' +'centraliz' +'cent' +'cen_' +'cema' +'celebrati' +'celebrat' +'cele' +'ceiling_' +'cattle_' +'cations_' +'cater' +'catching_' +'catas' +'carte_' +'caro' +'cardi' +'cape' +'capacities_' +'cane_' +'campaign' +'calme_' +'calibration' +'cache_' +'bürger_' +'bünd' +'bücher' +'bü' +'byte_' +'byp' +'buyers_' +'bustl' +'burst' +'burgh' +'burg' +'bure' +'bundes' +'build' +'bug' +'buf' +'buen' +'bten' +'bte_' +'bsc' +'brut' +'brus' +'bruch' +'browse' +'brow' +'britischer_' +'bringen' +'brig' +'brew' +'brethren_' +'breiteren_' +'brechen' +'brav' +'brasilianische_' +'bras' +'brands_' +'bourne_' +'boundary_' +'boss' +'bos' +'borrowed_' +'bonus_' +'bomb' +'bodi' +'boden_' +'bnis' +'blü' +'blur' +'blood' +'blogs_' +'blogg' +'blockier' +'blin' +'bled_' +'blaue' +'blatt_' +'blasen' +'bisc' +'birthday_' +'birth' +'bird_' +'biom' +'biological_' +'biog' +'biofuel' +'bindung_' +'binary_' +'bilitati' +'bilis' +'bildete' +'bilder_' +'bilanz_' +'bil_' +'bike' +'biete_' +'bid' +'bibliot' +'bias_' +'bia_' +'bezwe' +'bezog' +'bezieh' +'bewunder' +'bewohne' +'bewirt' +'bewi' +'bewerbe' +'bewege' +'bewe' +'bewahr' +'bewaffnet' +'beur' +'beunruhigende' +'betriebs' +'betriebene' +'betr' +'betonte' +'beteiligten_' +'beteiligen_' +'beta' +'bestände_' +'bestr' +'besto' +'bestimmtes_' +'besti' +'bestens_' +'bestellt' +'bestellen_' +'besser' +'besorgniserregende' +'besie' +'besetzt_' +'beschriebenen_' +'beschrieb_' +'beschr' +'beschlossene' +'beschloss_' +'bescheiden' +'besche' +'besa' +'berühren_' +'beruhte_' +'beruhigen' +'berke' +'berichtete_' +'berh' +'bereits' +'bereichen_' +'berater' +'berada_' +'beobachtet_' +'benign_' +'bende' +'benar_' +'bemüh' +'belt_' +'belt' +'belongs_' +'belohn' +'belo' +'beln_' +'beliebt_' +'belegen_' +'beleben' +'bekunde' +'bekomm' +'beklagen_' +'bekl' +'beinhalten_' +'beider_' +'beherrscht_' +'behave' +'behauptete_' +'beharrt' +'begrüß' +'begrenzter' +'begrenzen_' +'beginn' +'begi' +'begegnet' +'befürworten_' +'befürworte_' +'befürchten_' +'befürchte' +'befu' +'befo' +'befindlichen_' +'befindliche' +'beeindruckt' +'beeindruckenden_' +'bedingt_' +'bedeutsam' +'bedeutender' +'bedeutend_' +'bedauere_' +'bedank' +'beauf' +'bearbeitung' +'bearbeitet' +'bear' +'beant' +'beans' +'beachtliche' +'beachte' +'beabsichtig' +'bbe' +'battles_' +'baths_' +'basierte' +'basel' +'baru' +'barre' +'barr' +'baro' +'barbe' +'barbari' +'bant' +'bans_' +'banker' +'bani' +'bana' +'bakar_' +'baj' +'baggage_' +'baden_' +'bacteria' +'backup_' +'backpack' +'backlash_' +'až_' +'año' +'axis_' +'ax' +'awards_' +'await' +'awa_' +'avoid' +'außergewöhnliche_' +'außerge' +'außenst' +'autoritäre' +'automatisier' +'automatischen_' +'authorita' +'authorisation_' +'authentic_' +'ausüb' +'auszusp' +'auszugeben_' +'auswirkt_' +'ausweiten' +'ausweis' +'auswa' +'austausch' +'aust' +'aussp' +'aussichten_' +'aussa' +'ausrüstung' +'ausrichte' +'ausreicht_' +'auslöste' +'ausländischer_' +'ausle' +'ausla' +'ausgewählte_' +'ausgewogen_' +'ausgewertet_' +'ausgetragen_' +'ausgerichteten_' +'ausgenommen_' +'ausgeglichene' +'ausgegli' +'ausgefü' +'ausgebe' +'ausgearbeitet' +'ausgab' +'ausfäll' +'auseinandersetz' +'ausdrückliche' +'ausdrück' +'ausar' +'aur' +'augen' +'aufzut' +'aufzust' +'aufzub' +'aufwer' +'auftrag' +'aufsch' +'aufs_' +'aufr' +'aufnahme_' +'aufkommende' +'aufk' +'aufhalten' +'aufgreif' +'aufgeworfen' +'aufgewe' +'aufgetr' +'aufgeben_' +'aufeinander_' +'auern_' +'audi' +'auction' +'auch' +'auben' +'atu' +'attraktiver_' +'attraktiv' +'attracted_' +'attacker' +'atrocities_' +'aton' +'atom' +'atmospher' +'ative' +'atische' +'atisch_' +'ationsa' +'athlet' +'ater_' +'aten' +'atel' +'atastrophe' +'assured_' +'assur' +'assu' +'associate' +'assert_' +'asse' +'assault' +'assassination' +'asis' +'asin' +'asce' +'artung' +'artist' +'artis' +'artikel_' +'artig' +'arriving_' +'arrange' +'arra' +'arom' +'armies_' +'arl' +'arke' +'aris_' +'arien_' +'arians_' +'argument' +'arguabl' +'arf_' +'arena_' +'ardin' +'archäologische' +'archiv' +'architektonische' +'archipelago' +'archi' +'arbeitung_' +'arbeitslos_' +'arbeitete' +'arabisch' +'appropriati' +'appropriate' +'appreciat' +'applicants_' +'applaud' +'appelliere_' +'apli' +'ao_' +'anzub' +'anzeig' +'anybody_' +'anwendungen_' +'antwortlich' +'antrags_' +'antivirus_' +'antim' +'antiken_' +'anticipate_' +'antibioti' +'anter' +'antag' +'anstrebt' +'ansieht_' +'anschließende' +'anschaue' +'anpassen_' +'annähernd_' +'anktionen_' +'ankl' +'anken_' +'ank_' +'anj' +'angriffe' +'angig' +'angga' +'angetrieben' +'angesp' +'angese' +'angesch' +'angesammelt' +'angenomme' +'angenehme_' +'angemessener' +'angeme' +'angehoben_' +'anga' +'anfü' +'anfängt_' +'anforderung' +'anesische' +'anerkenn' +'aner_' +'andin' +'andi' +'andeu' +'anchor' +'ance' +'analyti' +'analysing_' +'analy' +'analog_' +'anak_' +'ample_' +'amid' +'amending_' +'ambiguous_' +'ambient' +'ambience_' +'amba' +'altr' +'altet_' +'alternative' +'altern' +'alm' +'allo_' +'allmählich' +'alljährlich' +'alli' +'alleine_' +'allegedly_' +'alk' +'alive_' +'alität' +'alition' +'alisieren_' +'alin' +'align_' +'align' +'alienation_' +'alien_' +'alan_' +'aktualisiert' +'aktu' +'aktivitäten_' +'aktive' +'aktion_' +'akti' +'airspace_' +'aire' +'aikan' +'aid' +'ahlung' +'agung_' +'agt_' +'agrees_' +'agierende' +'aggebe' +'agendas_' +'agein' +'agar' +'aftermath_' +'afor' +'afghanischen_' +'afghanische_' +'affen_' +'affair_' +'aerospace_' +'advertise' +'adversaries_' +'adver' +'admissi' +'admira' +'administrator_' +'administrativ' +'administer' +'admin_' +'adjusting_' +'adic' +'ader' +'adan' +'activis' +'activ' +'acquis_' +'acquiring_' +'acqui' +'achtung' +'achte' +'acht' +'ached_' +'accueil_' +'accomodati' +'accommodate_' +'acco' +'accessibility_' +'accede' +'abzuw' +'abzus' +'abzielen_' +'abwechs' +'abundan' +'abstra' +'absorbi' +'absorb_' +'absol' +'absi' +'absent_' +'abschrecken' +'abort' +'abolition_' +'abolished_' +'abnehmen' +'abilität' +'abilities_' +'abili' +'abide_' +'abhalten_' +'abgeschl' +'abgescha' +'abgesch' +'aben' +'abduction' +'aban' +'aat_' +']]. _' +']] ' +'[ _' +'[' +'Zürich_' +'Zü' +'Zyp' +'Zyklus_' +'Zyklen_' +'Zwe' +'Zw' +'Zuwa' +'Zuw' +'Zuverlässigkeit' +'Zutaten_' +'Zuständ' +'Zusch' +'Zusammenschluss_' +'Zusammenhänge' +'Zusammen_' +'Zurückh' +'Zula' +'Zuhöre' +'Zuhause_' +'Zugeh' +'Zufriedenheit_' +'Zuflu' +'Zivilisation_' +'Zinssatz' +'Zin' +'Zimmerman' +'Zielvorgabe' +'Zeug' +'Zentren_' +'Zensur' +'Zem' +'Zellen' +'Zeitungs' +'Zeits' +'Zeitlinie_' +'Zeitge' +'Zahlungsv' +'Zahlungsaus' +'Zah' +'Yor' +'Yale_' +'YA' +'XWB_' +'XT' +'XII' +'XF' +'X1' +'Wünsch' +'Wäldern_' +'Wähler' +'Wähl' +'Wut' +'Wunde' +'Works_' +'Wolfs' +'Wolfgang_' +'Wolfensohn_' +'Wohnungen_' +'Wohnung_' +'Wohnb' +'Wohlstands_' +'Wohlergehen' +'Wohlbefinden_' +'Wochen' +'Wirtschaftsn' +'Wirtschaftsg' +'Wirtschaftsbe' +'Wirts' +'Wirks' +'Wire' +'Wirbelst' +'Wins' +'Wing' +'Winde' +'Wind_' +'Willens' +'Wille_' +'Wilders_' +'Wiedervereinigung' +'Wiederholung_' +'Wiederh' +'Wiederaufnahme_' +'Wieder_' +'Widget' +'Wichtiger' +'Wic' +'Whirlpool_' +'Whereas_' +'Whe' +'Wetter' +'Wettbewerbsvor' +'Wettbewerbsver' +'Wettbewerbspolitik_' +'Westeuropa_' +'Wesentliche' +'Wertschöpfung' +'Werts' +'Wert' +'Weltr' +'Weltor' +'Weltme' +'Weltkultur' +'Weltkrieges_' +'Welthandels' +'Weltg' +'Weltbevölkerung_' +'Weiße_' +'Weish' +'Weine_' +'Weile' +'Weigerung_' +'Wednesday_' +'Wechsel' +'Webserver' +'Way' +'Wasserkraft' +'Washing' +'Wanderung' +'Wallström_' +'Waldbrände' +'Wald_' +'Wake' +'Wahler' +'Wahlbe' +'Wagen_' +'Waffenstillstand_' +'Wachstumsraten_' +'Wachstumsrate_' +'WT' +'WS_' +'WM_' +'WEI' +'WAV_' +'Völkern_' +'Völkermord' +'Vö' +'Vé' +'Vä' +'Vá' +'Vulcan_' +'Vul' +'Vot' +'Vos' +'Vorstands' +'Vorsta' +'Vorst' +'Vorsitzende' +'Vorliebe' +'Vorla' +'Vorhersage' +'Vorgang' +'Vorgabe' +'Vorfall_' +'Vorder' +'Volvo_' +'Vollm' +'Volkswagen_' +'Volkspartei_' +'Volksabstimmung_' +'Vladimir_' +'Vizepräsident_' +'Vize' +'Vitamin' +'Virginia' +'Viol' +'Vik' +'Viewer_' +'View' +'Viertel' +'Vielf' +'Via' +'Verzögerung_' +'Verzweiflung' +'Verzug' +'Verwer' +'Verweise' +'Verw' +'Verträgen_' +'Vertrauens_' +'Vertragsver' +'Verteil' +'Verteidiger' +'Versäum' +'Verste' +'Versp' +'Verschwörung' +'Verschwi' +'Verschwendung_' +'Verschuld' +'Verschlechterung' +'Verschiedene_' +'Verschieb' +'Vers' +'Verpa' +'Verp' +'Vero' +'Vernichtung_' +'Verne' +'Vermeidung_' +'Verlaufe_' +'Verlangsamung_' +'Verkäufe' +'Verkehrst' +'Verkehrss' +'Verhandlung_' +'Vergleichss' +'Vergew' +'Verfassungsentwurf_' +'Verfall' +'Vere' +'Verbü' +'Verbr' +'Verbe' +'Verantwortlichen_' +'Veranstaltungs' +'Veran' +'Venus_' +'Vent' +'Venezia' +'Vec' +'Vaters_' +'Variablen_' +'VS' +'VIN' +'VA_' +'Uti' +'Urteile' +'Ursprünge_' +'Urlaube' +'Uribe_' +'Urheberrechts' +'Urbanis' +'Upgrade_' +'Unterwa' +'Untert' +'Unterschrift' +'Untersch' +'Unternehmensf' +'Unterl' +'Unterhaltung' +'Unterha' +'Unterbrechung' +'Untera' +'Unsinn_' +'Unrecht_' +'Unix_' +'Universitäts' +'Universal' +'Unit_' +'Unglücklicherweise_' +'Unglück_' +'Ungleichgewicht' +'Ungere' +'Une' +'Umweltver' +'Umweltschutz' +'Umwelts' +'Umweltpr' +'Umweltpolitik_' +'Umverteilung' +'Umstellung_' +'Umfragen_' +'UV_' +'URL' +'UNICEF_' +'Türke' +'Tyrol' +'Tyran' +'Tyr' +'Type_' +'Tusk' +'Turnier_' +'Tunis' +'Tunes_' +'Tsunami_' +'Tschad_' +'Träger_' +'Truste' +'Trojan' +'Trichet_' +'Trend' +'Tren' +'Treibhausgasemissionen_' +'Treibhausgas' +'Treasury_' +'Travel' +'Trau' +'Trap' +'Transit_' +'Transform' +'Transaktionen_' +'Trans_' +'Trainer_' +'Tow' +'Tours_' +'Tourist' +'Tool' +'Too' +'Tole' +'Tisch' +'Timor' +'Timo' +'Til' +'Tiger' +'Tiere' +'Tiera' +'Tic' +'Think' +'Thing' +'Therme' +'Therap' +'Ther' +'Theorien_' +'Theodore_' +'Thatcher_' +'Texte' +'Terrorismusbekämpfung_' +'Terroranschl' +'Territorium_' +'Terr' +'Terms_' +'Tendenzen_' +'Temperatur_' +'Temperatur' +'Televis' +'Telekommunikation_' +'Teilung' +'Teilnehmer' +'Technologies_' +'Technis' +'Technical_' +'Tea_' +'Taylor_' +'Tax_' +'Tax' +'Tatatabot_' +'Taste_' +'Tasche' +'Target_' +'Tale' +'Tagung' +'TZ' +'TW' +'TU_' +'TRO' +'TPP_' +'TEN' +'TA_' +'T2' +'Südtirol_' +'Südoste' +'Südostasien' +'Süd_' +'Säug' +'Sän' +'São_' +'Sz' +'Syriza_' +'Syndrom' +'Swim' +'Sustain' +'Surve' +'Surely_' +'Suprem' +'Sup' +'Sunnis_' +'Sunn' +'Summers_' +'Summer_' +'Summen_' +'Summ' +'Suf' +'Suda' +'Substantiv' +'Subsi' +'Subscri' +'Subs' +'Subm' +'Subjekt' +'Stücke_' +'Störun' +'Stärken_' +'Stuf' +'Studi' +'Ström' +'Stric' +'Strau' +'Strategic_' +'Strasse' +'Strafv' +'Stornierung' +'Stop' +'Stone' +'Stock' +'Stimmen' +'Stillstand_' +'Stie' +'Steven' +'Steuerh' +'Steuerer' +'Steuereinnahmen_' +'Stern' +'Stehende_' +'Stefan_' +'Stea' +'Statute_' +'Standpunkten_' +'Standpunkte_' +'Stammzellen' +'Stahl_' +'Stadtk' +'Stadi' +'Stabilitätspakt_' +'Staatsp' +'Staatsbürgerschaft_' +'Sprung' +'Sprin' +'Split' +'Spitzenpo' +'Spit' +'Spirit' +'Spezifikation' +'Spezialist' +'Spend' +'Spen' +'Spektrum' +'Speise' +'Speicherka' +'Spaß_' +'Spazier' +'Sparpolitik_' +'Sparmaßnahmen_' +'Sparen' +'Spam' +'Spalte' +'Sozialp' +'Sozialisten_' +'Sozialismus_' +'Sozialdemokraten_' +'Sowjet' +'Sonnenunterg' +'Sonderg' +'Sommers' +'Somalia_' +'Soft' +'Society_' +'Snowboard' +'Slu' +'Slovenia_' +'Sle' +'Skl' +'Skandal' +'Sitzungsperiode_' +'Sitz' +'Simon_' +'Silber' +'Siena_' +'Siedl' +'Sieben' +'Side' +'Sicherheitsbe' +'Sicherheit' +'Shut' +'Shows' +'Shop' +'Shinzo' +'Shell_' +'Shel' +'Sharia_' +'Sex' +'Seven_' +'Ses' +'Serv' +'Senkaku_' +'Senior' +'Senegal_' +'Sendung_' +'Semit' +'Self' +'Selbstz' +'Selbstvertrauen_' +'Selbstmord' +'Seiten' +'Seit' +'Sehen' +'Seeverkehr' +'Sechste' +'Sech' +'Seattle_' +'Seas' +'Screening' +'Schüt' +'Schönheit' +'Schön' +'Schö' +'Schwächung' +'Schwächen_' +'Schwinde' +'Schwimmbad_' +'Schwie' +'Schwester_' +'Schwellenmärkte' +'Schweigen_' +'Schwe' +'Schwarze' +'Schwachstelle' +'Schutzge' +'Schuss' +'Schuman' +'Schulter' +'Schuldenlast' +'Schuhputzmaschine_' +'Schröder' +'Schritten_' +'Schrei' +'School' +'Schock_' +'Schock' +'Schnitt' +'Schloss' +'Schlo' +'Schlecht' +'Schlacht' +'Schla' +'Schil' +'Schiene_' +'Schied' +'Schein' +'Schauspiele' +'Schatten' +'Schaff' +'Schaf' +'Schadens' +'Schad' +'Saš' +'Savo' +'Savi' +'Sauberkeit_' +'Sauber' +'Sard' +'Saraj' +'Sarah_' +'Santo' +'Sandstr' +'Sand_' +'Samo' +'Sammlungen_' +'Samb' +'Salva' +'Sali' +'Sak' +'Saint' +'Sahara_' +'Sachverhalte' +'Saal_' +'SSI' +'SPE' +'SOEs_' +'SMEs_' +'SL_' +'SIS_' +'SING' +'SDL_' +'SB' +'Rücküberweisung' +'Rücktritt_' +'Rückst' +'Rückhalt_' +'Rückführung_' +'Ryan_' +'Ry' +'Rural_' +'Run_' +'Run' +'Rum' +'Ruine' +'Ruhes' +'Rue' +'Roy' +'Row' +'Rotterdam_' +'Rotarier' +'Rost' +'Ros' +'Root' +'Roo' +'Ron_' +'Ron' +'Rohstoffpreise' +'Rohstoffe_' +'Robot' +'Robin' +'Risikobe' +'Rio' +'Rim' +'Richter' +'Rib' +'Rhythm' +'Rho' +'Rhin' +'Reze' +'Revi' +'Reve' +'Respons' +'Residen' +'Reservierung' +'Reservation' +'Republikan' +'Repräsentanten_' +'Repräsentant' +'Repression' +'Renzi_' +'Renten_' +'Rente_' +'Religions' +'Relevanz_' +'Relation' +'Reisez' +'Reisever' +'Reisetipp_' +'Reis' +'Reha' +'Register' +'Regierungspo' +'Regarding_' +'Refu' +'Reden_' +'Reco' +'Rechtssysteme' +'Rechtssystem' +'Rechtsst' +'Rechtssicherheit_' +'Rechtsr' +'Rechtschreib' +'Rechtsbe' +'Rechtsausschuss' +'Rechtsanw' +'Recherche' +'Rechenschaftspflicht_' +'Recep' +'Rebellion_' +'Realit' +'Realisierung_' +'Reading' +'Reac' +'Raums_' +'Raume' +'Rauchen_' +'Ration' +'Rating_' +'Ratifi' +'Rathaus' +'Rapid_' +'Rangliste' +'Ral' +'Raketen_' +'Rail_' +'Raci' +'RT_' +'RK' +'RANT' +'RAM_' +'RAC' +'Quoten_' +'Quellcode_' +'Quar' +'Quanti' +'Quant' +'Qualifikationen_' +'Qual' +'Quadrat' +'Qu' +'Qatar_' +'Qaddafi_' +'Qa' +'QUI' +'Pv' +'Push' +'Pump' +'Pull' +'Pub' +'Psycho' +'Psych' +'Präzisi' +'Präsidentschaftswahlen_' +'Präsentation' +'Prämien_' +'Präf' +'Provinz' +'Provider_' +'Prototyp' +'Proto' +'Proteine_' +'Protein' +'Protect' +'Propo' +'Propaganda_' +'Promenade_' +'Programmier' +'Profit_' +'Profil_' +'Professional_' +'Produkts' +'Produktionsst' +'Produktionspr' +'Product' +'Proc' +'Problemati' +'Privileg_' +'Privats' +'Privatis' +'Prior_' +'Priester' +'Prev' +'Prepa' +'Premierminister' +'Preisstabilität_' +'Preises_' +'Prakti' +'Prag' +'PowerP' +'Potter' +'Potsdam' +'Poten' +'Postgre' +'Postdienst' +'Posse' +'Position' +'Portugies' +'Portale' +'Porta' +'Population_' +'Pon' +'Politicians_' +'Pola' +'Plo' +'Plenar' +'Play_' +'Plata_' +'Plast' +'Planet_' +'Pizza' +'Pino' +'Pilote' +'Pilot' +'Pier' +'Pick' +'Picc' +'Pic' +'Photocopying_' +'Phoeni' +'Philippi' +'Pharma' +'Pflanz' +'Pfl' +'Pfeiler_' +'Pfad_' +'Peters' +'Perspective_' +'Persi' +'Perm' +'Perfe' +'Pension_' +'Pennsylvania_' +'Pend' +'Pemb' +'Pedro' +'Peak' +'Pax_' +'Paus' +'Pati' +'Patent_' +'Passw' +'Passport_' +'Passei' +'Passe' +'Pass_' +'Partner' +'Partic' +'Parti' +'Parm' +'Parlamentarier_' +'Parkplatz_' +'Parke' +'Paris' +'Parc' +'Paramet' +'Parallelen_' +'Parallel_' +'Paradox' +'Paolo_' +'Pani' +'Pand' +'Pan_' +'Palästinensischen_' +'Pakets_' +'Paint' +'Packag' +'PROGR' +'POL' +'PN' +'PLAYER' +'PIC' +'PAS' +'Oxford_' +'Ow' +'Outlook_' +'Ostse' +'Oster' +'Osborne_' +'Ortho' +'Ortega' +'Organisat' +'Organen_' +'Ordn' +'Order' +'Orden_' +'Optimi' +'Opportuni' +'Opfern_' +'Operation' +'Omni' +'Offi' +'Oe' +'Obersten_' +'Oberste_' +'Oberh' +'Obergrenze_' +'ORT' +'OM_' +'OFI' +'Nöt' +'Nö' +'Nutzungsbedingungen_' +'Nutze' +'Num' +'Now' +'Nove' +'Notwendig' +'Notfall' +'North' +'Normali' +'Nordi' +'Nomin' +'Nomad_' +'Nobody_' +'Nis' +'Nina_' +'Niko' +'Night_' +'Night' +'Niederla' +'Niedergang_' +'Nico' +'Nick' +'Nichte' +'Newsletter_' +'Nevada_' +'Nev' +'Neustart' +'Neuschwanstein_' +'Neuf' +'Neues_' +'Neuen' +'Neuausrichtung_' +'Nerv' +'Ner' +'Neo' +'Neighbo' +'Nego' +'Neb' +'Neapel_' +'Nazi' +'Naturwissenschaft' +'Naturpark' +'Naturk' +'Natural' +'Nationalst' +'Nationalpark_' +'Nationalpar' +'Namib' +'Nam' +'Nak' +'Nahrungs' +'Nahost_' +'Nahe' +'Nachw' +'Nachmittag_' +'Nachhaltige' +'Nachbarschaftspolitik_' +'Nachbarschaft_' +'Nachbarländer' +'Nachbar' +'NY' +'NIC' +'NG_' +'NGO_' +'NEC' +'ND_' +'NDE' +'Mütter_' +'Mün' +'Mönch' +'Möglicherweise_' +'Mythos_' +'MySpace_' +'Mutter' +'Muslim' +'Musical' +'Museums_' +'Museen_' +'Muse' +'Murdoch_' +'Mull' +'Mozart' +'Moz' +'Movielearn_' +'Movie_' +'Mosle' +'Moses_' +'Mosambik_' +'Mord' +'Mora' +'Montreal_' +'Monterrey_' +'Montai' +'Montag' +'Monster_' +'Mons' +'Molda' +'Modi_' +'Moderne_' +'Moder' +'Model' +'Moda' +'Mobiltelefon_' +'Mobilität_' +'Mittleren_' +'Mittelwe' +'Mitteleurop' +'Mitteil' +'Mitleid' +'Mitbe' +'Mitarbeiter' +'Mist' +'Missverständnis' +'Misstrauen_' +'Mission' +'Missb' +'Mira' +'Mir' +'Mins' +'Ministerrat_' +'Ministerpräsidenten_' +'Min_' +'Millions' +'Millia' +'Militära' +'Milchprodukt' +'Milch' +'Mikrof' +'Mike_' +'Metropoli' +'Metalle' +'Metal' +'Messa' +'Mercedes_' +'Menü' +'Menschenrechtsko' +'Mena' +'Memory_' +'Memori' +'Meli' +'Meister' +'Mein' +'Mehrzahl_' +'Mehrwertsteuer' +'Meeresf' +'Meere' +'Medina' +'Mechani' +'McK' +'McG' +'Maßstäbe' +'Maxi' +'Max_' +'Mauri' +'Mauer' +'Massenm' +'Massaker' +'Marí' +'Marta_' +'Mars_' +'Marr' +'Marqu' +'Marktzug' +'Marktt' +'Market' +'Mare' +'Mara' +'Malware_' +'Mali_' +'Maje' +'Mainstream' +'Main_' +'Mailand_' +'Mahlzeiten_' +'Magst_' +'Magazin_' +'Magazin' +'Machtver' +'Machts' +'MX' +'MT_' +'MS' +'MP4_' +'MOV_' +'MOS' +'MIN' +'MIDI_' +'MG' +'MENT' +'MB' +'MAT' +'MAN_' +'MAN' +'M4' +'Lüg' +'Lücke_' +'Lä' +'Luz' +'Lup' +'Luk' +'Lufthansa_' +'Ludwig_' +'Lucas_' +'Loyali' +'Louvre' +'Lohns' +'Logi' +'Lloyd' +'Livi' +'Liverpool_' +'Liter' +'Listen_' +'Lima_' +'Liese' +'Lieferu' +'Lied' +'Libye' +'Libyan' +'Leser_' +'Lesen_' +'Leo' +'Leno' +'Leiche' +'Lehrer' +'Lehman_' +'Legend' +'Leg' +'Lebensstandard_' +'Lebense' +'Lebanese_' +'Leave_' +'Laz' +'Lava' +'Laufzeit_' +'Laufen' +'Latvia' +'Latin' +'Lass' +'Larry_' +'Lannoye_' +'Langzeit' +'Landwirtschafts' +'Landw' +'Lampe' +'Lamaniten_' +'Lager' +'Lagen' +'Lady' +'Label_' +'Lab_' +'LR' +'LC_' +'Küsten_' +'Künst' +'Kün' +'Kühlschra' +'Kü' +'Körperschaft' +'Königreichs_' +'Köln' +'Käufer' +'Käuf' +'Kyr' +'Kyi' +'Kuwait_' +'Kurze' +'Kuro' +'Kurdish_' +'Kunde_' +'Kulissen_' +'Kuli' +'Kriterium_' +'Kriminelle' +'Krieg' +'Krem' +'Kreditvergabe_' +'Kreditv' +'Kreditnehmer' +'Kreditkarten' +'Kreditb' +'Kreditaufnahme_' +'Krebs_' +'Kreaturen' +'Kreat' +'Kraftwerk' +'Kow' +'Kosov' +'Korruptions' +'Kori' +'Koreans_' +'Koran' +'Kora' +'Kopiere' +'Kopfs' +'Koordin' +'Konzerne' +'Konvertier' +'Konvers' +'Kontextmenü' +'Konte' +'Konsultationen_' +'Konserv' +'Konferenzr' +'Konferenze' +'Kompetenz_' +'Kommuni' +'Kommissionsvorschlag_' +'Kommissionsmitglied' +'Kommissar' +'Kommentar_' +'Kolumbien_' +'Kolonie' +'Kollekti' +'Koizumi_' +'Kohlendioxid_' +'Koh' +'Kofinanzierung' +'Knowledge_' +'Kno' +'Kni' +'Klu' +'Klingon' +'Kleine' +'Klein_' +'Klausel' +'Klassifi' +'Klage_' +'Kinnock_' +'King' +'Kindes' +'Kinderar' +'Kin' +'Khomeini_' +'Kho' +'Khan' +'Khal' +'Keys' +'Kernel' +'Kenn' +'Kaukasus_' +'Katastrophenschutz' +'Katalo' +'Kaschmir' +'Karzai_' +'Kapitalmärkte_' +'Kapitalflu' +'Kanäle_' +'Kana' +'Kammer' +'Kamin' +'Kama' +'Kalk' +'Kaliningrad' +'Kale' +'Kade' +'Kaczyński_' +'Kabel' +'KW' +'KU' +'KMU_' +'KLM_' +'KING' +'KEI' +'KB' +'Jury' +'Juris' +'Jugendlicher_' +'Jugendherberg' +'Juden_' +'Journalist' +'Jord' +'Jong_' +'Jonas' +'Johnson' +'Johanne' +'Jog' +'Joan' +'Jim_' +'Jet' +'Jes' +'Jem_' +'Javi' +'Jaro' +'Jar' +'Japaner_' +'Jan' +'Jahrtausend' +'Jahrhunderte' +'Jahresz' +'Jag' +'Jacob_' +'JRE_' +'JE' +'Ivan' +'Iv' +'Ite' +'Italiens_' +'Italien' +'Issu' +'Iraker_' +'Investor' +'Internetverbindung_' +'Interneta' +'International' +'Intensität' +'Int' +'Insur' +'Insti' +'Inse' +'Innovation' +'Innenpoliti' +'Initiati' +'Inhalts' +'Inhaber_' +'Infrastructure_' +'Infos_' +'Informationss' +'Informationsa' +'Informati' +'Inflationsr' +'Infineon' +'Infektions' +'Infekt' +'Infe' +'Industriesta' +'Industrielle' +'Industrial_' +'Industri' +'Indoor_' +'Indo' +'Indiz' +'Individual' +'Indic' +'Increased_' +'Inco' +'Inclu' +'Inci' +'Improvi' +'Impres' +'Impfstoffe_' +'Immerhin_' +'Immer_' +'Ign' +'Ideologie_' +'Ideolog' +'Ideally_' +'Ideale' +'Ideal' +'Icon' +'Ibn_' +'IR_' +'INE' +'INCLUD' +'ILA' +'IFA' +'IES_' +'IEN' +'ICT' +'Hügel_' +'Hör' +'Höl' +'Höf' +'Hôtel_' +'Händler' +'Hypo' +'Hyg' +'Hydr' +'Hybri' +'Hv' +'Hungarian_' +'Hunderttausende' +'Hunderte' +'Hund' +'Hul' +'Hubschrauber_' +'Hua' +'House' +'Hotelzimmer_' +'Hot_' +'Horizonte' +'Horizont' +'Hond' +'Hom' +'Holy_' +'Hoff' +'Hochwasser' +'Hochgeschwindigkeits' +'His' +'Hinr' +'Hindus_' +'Himmel_' +'Hilfsmittel_' +'Hilfe' +'Highway' +'Hig' +'Heut' +'Het_' +'Herzog' +'Herv' +'Herrscher_' +'Herman' +'Herangehensweise_' +'Heilige' +'Hed' +'Hebr' +'Header' +'Hay' +'Hava' +'Haustür' +'Haushaltspolitik_' +'Haushaltsplan' +'Haushaltsmittel' +'Haushaltskon' +'Haushaltsausschusses_' +'Haushaltsaus' +'Hauptt' +'Hauptstr' +'Hauptp' +'Hauptau' +'Hatoyama_' +'Hass_' +'Harris_' +'Harr' +'Harm' +'Hariri' +'Hara' +'Happ' +'Hanse' +'Handl' +'Handelsa' +'Hande' +'Hamm' +'Halte' +'Hack' +'Haben' +'Haag' +'HR' +'HOT' +'HER' +'HEN' +'HC_' +'HAVEN_' +'H1' +'Gültigkeit_' +'Gül' +'Göttin' +'Gän' +'Gutes_' +'Gunsten_' +'Gues' +'Guardi' +'Guard' +'Größen_' +'Größ' +'Grundzüge' +'Grundwasser' +'Grundsätzlich' +'Grundsätzen_' +'Grundsatze' +'Grunds' +'Grundrechte' +'Grundl' +'Großka' +'Große' +'Grou' +'Griechen' +'Grey' +'Grenzwert' +'Gremien_' +'Gregori' +'Grego' +'Gramm' +'Graci' +'Gourmet_' +'Gothic_' +'Goth' +'Gos' +'Gore_' +'Good' +'Golfplätze' +'Golds' +'Gob' +'Glück' +'Gloucester' +'Glied' +'Glen' +'Gleichg' +'Gleichbehandlung_' +'Gle' +'Girl' +'Gio' +'Gibraltar_' +'Gia' +'Gewinne' +'Gewin' +'Gewerkschaft' +'Gewerbe' +'Gewalttaten_' +'Getränke' +'Getränk_' +'Gesundheitsz' +'Gesundheitsv' +'Gesundheitssystem' +'Gesundheitsm' +'Geste' +'Gesicht' +'Gesetz' +'Geschäftsv' +'Geschäftsbereich' +'Geschäften_' +'Geschw' +'Geschlecht' +'Geschirr' +'Geschichten_' +'Gesamth' +'Gesamtbe' +'Geräte' +'Germani' +'Germ' +'Gerichtsh' +'Georgi' +'George' +'Geogra' +'Genießen_' +'Generalsekret' +'Genauigkeit' +'Genau' +'Gen_' +'Gemäß' +'Gemeinschaftsin' +'Gelände_' +'Gele' +'Geldes_' +'Gei' +'Gehälter' +'Gehirn_' +'Gehirn' +'Geheimnis' +'Gehei' +'Gehalt_' +'Gegenden_' +'Gefüh' +'Gefängnisse' +'Gefahren' +'Gedächtnis_' +'Gedicht' +'Geburtstag' +'Geburten' +'Gebot' +'Gate_' +'Gastge' +'Garden' +'Garan' +'Ganz' +'Gamm' +'Gaming_' +'Gallery_' +'Galerie_' +'Gale' +'Gala' +'GW_' +'GUI' +'GUE_' +'GP_' +'GO_' +'GMT_' +'GMOs_' +'GL' +'GE_' +'Fülle' +'Fä' +'Fut' +'Fundament' +'Frühstücks' +'Frustration_' +'Frontier' +'Frist' +'Freud' +'Frequenz' +'Fremdenverkehr_' +'Fremdenfeindlichkeit_' +'Freizeita' +'Freilich' +'Freigabe_' +'Freedoms_' +'Fred_' +'Fred' +'François_' +'Franco' +'Fragment' +'Fragestunde_' +'Frage' +'Founde' +'Fotografie_' +'Fortschr' +'Fort_' +'Forschungss' +'Forschungsergebnisse_' +'Forschungsa' +'Formular' +'Formel_' +'Forest_' +'Fore' +'Football_' +'Fonta' +'Folglich_' +'Focus' +'Flut_' +'Flusse' +'Flus' +'Flie' +'Flemi' +'Flasche' +'Flam' +'Flagg' +'Fixed_' +'Fit' +'Fischereiabkommen_' +'Fische_' +'Firmware_' +'Firm' +'Firew' +'Finger_' +'Finanzwesen_' +'Finanzst' +'Finanzp' +'Finanzmittel_' +'Finanzmi' +'Finanzinstrument' +'Finanzb' +'Fina' +'Filme_' +'Filme' +'Fig' +'Fic' +'Feuerwe' +'Feststellung' +'Festiv' +'Feste' +'Fertigkeit' +'Ferna' +'Ferienhäuser_' +'Fed' +'Fatah_' +'Fans_' +'Familienzimmer_' +'Falle' +'Faktum_' +'Fahrzeug' +'Fahrt_' +'Fahrplan_' +'Fahren_' +'Fact' +'Faci' +'Fachk' +'Fabrik' +'Fab' +'FRE' +'FAQ' +'Extreme_' +'Extras_' +'Exten' +'Exporteure' +'Experts_' +'Experte' +'Experimente' +'Exper' +'Exo' +'Exist' +'Exhibit' +'Exekutiv' +'Except' +'Ew' +'Evidence_' +'Everest_' +'Even' +'Evangeli' +'Eurojust_' +'Eurocopter_' +'Euroc' +'Eurobonds_' +'EuroM' +'Eurasi' +'Eur' +'Euph' +'Establishment_' +'Essential_' +'Esse' +'Especially_' +'Erziehung_' +'Erzeugung' +'Erzeugnisse_' +'Erwähnung_' +'Erwägungen_' +'Erweiterungen_' +'Erwachsenen_' +'Erwach' +'Erträge_' +'Ertr' +'Erstelle' +'Erste_' +'Erstau' +'Ersparnissen_' +'Erscheinungsbild_' +'Ersatz' +'Err' +'Ernähr' +'Ernst_' +'Erleichterung_' +'Erleb' +'Erheb' +'Erha' +'Erh' +'Ergänz' +'Erg' +'Erfolgsgeschichte' +'Erfolg' +'Erbr' +'Eras' +'Equip' +'Equ' +'Entwicklungsziele' +'Entwicklungs_' +'Entstehung' +'Entspann' +'Entscheidungsprozess' +'Entführung' +'Entf' +'Enter' +'Enr' +'Englischen_' +'Englische' +'Energietechnologie' +'Energier' +'Energiepolitik_' +'Endp' +'Endl' +'Ende' +'Employ' +'Empfang' +'Emotion' +'Emirate' +'Embr' +'Email_' +'Elysées_' +'Ell' +'Elizabeth_' +'Elis' +'Elend_' +'Elemente' +'Eleganz_' +'Elefanten' +'Elde' +'Eisenbahnver' +'Einzelpersonen_' +'Einwi' +'Einwanderungspolitik_' +'Einwanderungs' +'Eint' +'Einsp' +'Einse' +'Einrei' +'Einmischung' +'Einmarsch' +'Einm' +'Einkommens_' +'Einkaufszentr' +'Einflüsse' +'Einfach_' +'Eindämmung' +'Einblick_' +'Einbindung_' +'Eid' +'Editor_' +'Edit_' +'Echtzeit_' +'Early_' +'EUR' +'ESM' +'ENI' +'EMAS_' +'Düsseldorf_' +'Dür' +'Dü' +'Dynast' +'Dynamik' +'Dutzende_' +'Dus' +'Durchschnitts' +'Durchführ' +'Durchb' +'Duomo_' +'Duff_' +'Dub' +'Ds_' +'Drücke' +'Drum' +'Drucker' +'Droh' +'Drittl' +'Dritte' +'Dringlichkeits' +'Dri' +'Dream' +'Drago' +'Dr' +'Doyle_' +'Downloads' +'Down' +'Dornik_' +'Dorn_' +'Dorf' +'Doppelzimmer_' +'Doo' +'Dominikan' +'Dolomit' +'Dolmetsch' +'Dokumentation_' +'Divers' +'DivX_' +'Div' +'Distributoren_' +'Disku' +'Diskriminierung' +'Directory_' +'Director' +'Directi' +'Diplomaten_' +'Diktatur_' +'Different_' +'Dienststelle' +'Dienstleistungssektor' +'Dienstleist' +'Diensta' +'Dienst' +'Dictionary_' +'Dichte' +'Dich' +'Dialogs_' +'Diabetes_' +'Deutsche' +'Deut' +'Detail_' +'Designs' +'Designer_' +'Desi' +'Deregul' +'Derartige_' +'Denkweise' +'Denis' +'Demokratisierung_' +'Demogra' +'Demagog' +'Delu' +'Delta_' +'Delo' +'Delegation' +'Dele' +'Dela' +'Deine' +'Deckmantel_' +'Death_' +'Daw' +'Datenschutz_' +'Datenbl' +'Date' +'Dasselbe_' +'Daseins' +'Das' +'Darwi' +'Darlehen' +'Darauf_' +'Dara' +'Dampfb' +'Damas' +'Dalma' +'Dai' +'Dafürhalten_' +'DVDs_' +'DK' +'DJ_' +'DI_' +'DIC' +'DES' +'DEN_' +'DC' +'DAX_' +'Cycl' +'Cyber' +'Curt' +'Cul' +'Cott' +'Cord' +'Copy' +'Cop' +'Cooper' +'Cool' +'Controller_' +'Conti' +'Constant' +'Conservati' +'Congress' +'Confedera' +'Condo' +'Conditions_' +'Conci' +'Concern' +'Computern_' +'Compr' +'Compani' +'Communication' +'Commen' +'Comi' +'Comfort' +'Combi' +'Colomb' +'Collect' +'Cohe' +'Coelho_' +'Coch' +'Cob' +'Clu' +'Close_' +'Clip' +'Clif' +'Cleverl' +'Cleaning_' +'Clean_' +'Classic' +'Clas' +'Clark' +'Circle_' +'Circ' +'Cind' +'Chrom' +'Christus_' +'Christopher_' +'Christo' +'Christie_' +'Christiani' +'Christen_' +'Christdemokraten_' +'Chr_' +'Chev' +'Cherno' +'Chechen' +'Charle' +'Chapel_' +'Chap' +'Champ' +'Ces' +'Center' +'Cav' +'Caucas' +'Castil' +'Cassi' +'Casio' +'Casino' +'Cash' +'Carrie_' +'Carp' +'Carlo' +'Carl_' +'Caribbean_' +'Care' +'Cardi' +'Capt' +'Canc' +'Canari' +'Canal' +'Canadian_' +'Campingpl' +'Camera' +'Cambodia_' +'Calendar_' +'Cairo' +'Caesar' +'COS' +'CONT' +'CHF_' +'CHE' +'CGI_' +'CET_' +'Bürgerkrieg_' +'Bürgerkrieg' +'Bürgerbe' +'Bündnis_' +'Bünd' +'Bücher_' +'Böge_' +'Buy' +'Busse' +'Busc' +'Bundestag_' +'Bundesstaaten_' +'Bundesp' +'Bum' +'Buddh' +'Buchungs' +'Buchst' +'Buchführung' +'Bucher_' +'Bry' +'Brunnen_' +'Bruc' +'Brow' +'Brooklyn_' +'Bronze' +'Broc' +'Britis' +'Brigade' +'Brian_' +'Brew' +'Bretton_' +'Bret' +'Brenner' +'Bremen_' +'Breitband' +'Brei' +'Bree' +'Brav' +'Braun' +'Branc' +'Box' +'Boul' +'Botschafter' +'Boston' +'Bosnien_' +'Bosnia_' +'Boots' +'Boot' +'Bond' +'Bombardier' +'Boeing' +'Boe' +'Bodensch' +'Bode' +'Boar' +'Blog' +'Blingee_' +'Blick' +'Blaž_' +'Birma' +'Biot' +'Biog' +'Binnenm' +'Bindungen_' +'Bin_' +'Billig' +'Billi' +'Bildern_' +'Bhutan_' +'Bezirks' +'Bey' +'Bewä' +'Bewu' +'Bewertungs' +'Bewert' +'Bewerber' +'Better_' +'Betrü' +'Beträge' +'Betreuer' +'Betracht' +'Besuchern_' +'Bestra' +'Besten' +'Bestell' +'Besonderheiten_' +'Besetzung_' +'Beschwerde_' +'Beschlussfassung' +'Beschlusse' +'Beschleunigung' +'Beschaff' +'Berufsbildung' +'Berufe' +'Bert' +'Berl' +'Berichterstattung' +'Berb' +'Beobachtungsst' +'Benz_' +'Belohnung_' +'Beliebt' +'Belgrade_' +'Belastung_' +'Belast' +'Belarus' +'Beitrittsl' +'Beitrittskandidaten_' +'Beis' +'Being_' +'Beine' +'Bein' +'Beihilfe' +'Behau' +'Behandlungs' +'Begrenzung_' +'Bege' +'Beförderungs' +'Befu' +'Befr' +'Bedürf' +'Bedienung_' +'Bede' +'Bec' +'Beauf' +'Beatri' +'Beachten_' +'Bayer' +'Bavarian_' +'Baute' +'Baustein' +'Baum_' +'Batt' +'Bath' +'Basket' +'Basis' +'Base' +'Barry_' +'Barro' +'Baro' +'Bargeld' +'Barbe' +'Bann' +'Banker' +'Bankensystem' +'Bande' +'Banc' +'Ballo' +'Bald_' +'Bajor_' +'Bahnstation_' +'Bah' +'Baghdad' +'Baden_' +'Bachelo' +'Bach_' +'Babys' +'BOJ_' +'BN' +'BJ' +'BF' +'BERKELEY_' +'Azu' +'Ax' +'Aw' +'Avatar' +'Außenbe' +'Autoren_' +'Autonomiebehörde_' +'Autobahn_' +'Ausübung' +'Auswärtige' +'Ausweg_' +'Auswe' +'Austausch' +'Aust' +'Ausspr' +'Ausschü' +'Ausschuß_' +'Ausscheiden' +'Ausscha' +'Auskunft' +'Ausgrenzung_' +'Ausgew' +'Ausgehend_' +'Ausgabe' +'Ausflu' +'Ausd' +'Aurora_' +'Aug' +'Aufträge_' +'Auft' +'Aufstände' +'Aufstand_' +'Aufse' +'Aufge' +'Auffü' +'Auffassungen_' +'Attraktionen_' +'Attent' +'Atta' +'Atlantis' +'Atla' +'Asylsuchende' +'Asylant' +'Assist' +'Asset' +'Asians_' +'Arzt' +'Aru' +'Articles_' +'Arsenal_' +'Ars' +'Arou' +'Armenian_' +'Ark' +'Argumen' +'Argentine' +'Argen' +'Arena' +'Are' +'Arbeitszeit_' +'Arbeitsver' +'Arbeitsrecht' +'Arbeitspro' +'Arbeitsp' +'Arbeitsgruppe_' +'Arbeitsg' +'Arbeitsbe' +'Arbeit' +'Apr' +'Appro' +'Anzeigen' +'Anzeige' +'Anz' +'Anyone_' +'Anwesenheit_' +'Anwendungsbereich_' +'Anwender_' +'Anwa' +'Antrags' +'Anteils' +'Anteile_' +'Ansä' +'Ansprüchen_' +'Ansprech' +'Anson' +'Anschrift' +'Anschließend_' +'Ansatzes_' +'Anreise' +'Anregungen_' +'Anpassungen_' +'Annex' +'Annahmen_' +'Anmerkungen_' +'Anmerkung_' +'Anleihe' +'Ankunft_' +'Anku' +'Anklage_' +'Ani' +'Anhörung_' +'Angr' +'Angeles_' +'Angel' +'Angebots' +'Angeb' +'Ane' +'Andria_' +'Andorra' +'Anden' +'Andalusien' +'Andalusia' +'Anda' +'Anbau_' +'Anato' +'Analys' +'Amu' +'Amo' +'Ambitionen_' +'Amazon_' +'Ama' +'Aly' +'Alum' +'Altern' +'Alta_' +'Alr' +'Alps_' +'Alpi' +'Alltags' +'Alkohol' +'Algor' +'Algerie' +'Algar' +'Alegr' +'Alber' +'Albanian' +'Alb' +'Aktuell' +'Aktualisierung_' +'Aktivisten_' +'Aktionär' +'Aktionsprogramm_' +'Akk' +'Aki' +'Aka' +'Airp' +'Airconditioning_' +'Aid_' +'Agrars' +'Agenturen_' +'Agent_' +'Afri' +'Advanced_' +'Administrat' +'Ade' +'Addis_' +'Ada' +'Active_' +'Acid' +'Acht' +'Achsen' +'Accord' +'Abzug_' +'Abwicklung_' +'Abweichung' +'Abtreibung' +'Abtei' +'Abstimmungs' +'Absti' +'Absolvent' +'Abso' +'Absen' +'Abschl' +'Abl' +'Abhängig' +'Abh' +'Abgeordnete' +'Abfälle_' +'Abdullah' +'Abbe' +'Aa' +'ATM' +'AR_' +'AP_' +'AO' +'AMS' +'AMR_' +'ALE_' +'AD_' +'ADE' +'ACI' +'ACCE' +'ABS' +'A2' +'=_' +';&' +'90er_' +'83' +'750' +'70er_' +'681' +'67' +'63' +'5th_' +'520' +'52' +'4th_' +'45' +'43' +'3G_' +'3G' +'39' +'370' +'37' +'270_' +'220_' +'21s' +'202' +'201' +'199' +'1972_' +'1961_' +'1960s_' +'1960' +'1959_' +'1950er_' +'1946_' +'1939_' +'1936_' +'1907_' +'171' +'170' +'16th_' +'145_' +'142' +'127_' +'124_' +'121_' +'117' +'116' +'105_' +'102' +'101' +'0er_' +'07' +'020' +'007' +'/+_' +'/ ' +'......' +'....' +'.'_' +'->' +',..._' +',- ' +',,_' +', (_' +', $_' +'++' +'* ' +'):_' +'), ' +')) (' +'))' +'() ._' +'': _' +'')' +'''.' +''''_' +'%\\' +'$ ' +'">- _' +'"...' +'". _' +'" ._' +'" -' +'!! _' +'!! !' +' …' +' ”' +' ’_' +' ­' +' £_' +' [...]' +' = {_' +' = ' +' ;' +' -> _' +' ***' +' ). _' +' (“_' +' (.' +' ('' +' &#_' +' !!' +'−' +'ي' +'ט' +'ג' +'ь' +'щ' +'ц' +'σ' +'ş' +'œ' +'ě' +'ę' +'ā' +'õ' +'ñ' +'¿' +'º' +'~' +'$' +'™' +'†' +'–' +'ن' +'ل' +'ف' +'ر' +'ר' +'נ' +'Ж' +'Д' +'υ' +'ν' +'λ' +'ś' +'ń' +'ù' +'ì' +'Ñ' +'É' +'Ã' +'Á' +'§' +'–' +'&' +'ー' +'‚' +'م' +'ק' +'ד' +'Я' +'П' +'О' +'Л' +'Е' +'А' +'π' +'κ' +'θ' +'β' +'ū' +'Ś' +'ō' +'ć' +'æ' +'Ê' +'Â' +'¼' +'·' +'¶' +'´' +'¥' +'`' +'@' +'#' +'' +'년' +'語' +'简' +'本' +'日' +'文' +'年' +'中' +'•' +'ṳ' +'ศ' +'พ' +'ा' +'र' +'ى' +'ه' +'ص' +'ت' +'ب' +'פ' +'ס' +'ן' +'ו' +'ֿ' +'В' +'ω' +'χ' +'δ' +'Ω' +'̤' +'ư' +'ů' +'ř' +'ľ' +'ė' +'ĕ' +'ą' +'û' +'À' +'½' +'¹' +'­' +'¤' +'¡' +'’' +'\' +':' +'' +'fi' +'黵' +'黃' +'鰀' +'鋘' +'鋓' +'遝' +'蒸' +'致' +'美' +'网' +'紙' +'熨' +'斗' +'応' +'女' +'味' +'友' +'信' +'介' +'丨' +'一' +'ャ' +'バ' +'チ' +'ジ' +'カ' +'ん' +'ら' +'め' +'●' +'▼' +'→' +'※' +'ớ' +'ọ' +'ị' +'ẽ' +'ẻ' +'ấ' +'ी' +'ि' +'य' +'ब' +'त' +'छ' +'आ' +'ِ' +'ك' +'غ' +'ع' +'د' +'ج' +'إ' +'،' +'צ' +'ל' +'ה' +'Қ' +'Ғ' +'Э' +'Ш' +'Ц' +'Х' +'Р' +'М' +'φ' +'ζ' +'γ' +'Χ' +'Τ' +'Ι' +'Ε' +'̯' +'̆' +'ː' +'ˈ' +'ɾ' +'ɛ' +'ɐ' +'ſ' +'ű' +'ŭ' +'ő' +'Ő' +'ŏ' +'ň' +'İ' +'ī' +'đ' +'Đ' +'ă' +'ý' +'ã' +'à' +'Ô' +'Ó' +'È' +'Å' +'¾' +'µ' +'³' +'°' +'¬' +'¢' +'' +'™' +'—' +'“' +'' +'^' +'—' +'²' +'£' +'<' diff --git a/tensor2tensor/utils/beam_search.py b/tensor2tensor/utils/beam_search.py index b42503cbf..3841b5953 100644 --- a/tensor2tensor/utils/beam_search.py +++ b/tensor2tensor/utils/beam_search.py @@ -81,8 +81,16 @@ def _expand_to_beam_size(tensor, beam_size): return tf.tile(tensor, tile_dims) +def get_state_shape_invariants(tensor): + """Returns the shape of the tensor but sets middle dims to None.""" + shape = tensor.shape.as_list() + for i in range(1, len(shape) - 1): + shape[i] = None + return tf.TensorShape(shape) + + def log_prob_from_logits(logits): - return logits - tf.reduce_logsumexp(logits, axis=2, keep_dims=True) + return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True) def compute_batch_indices(batch_size, beam_size): @@ -200,6 +208,10 @@ def beam_search(symbols_to_logits_fn, capturing observed from these operations, tensors, clients can make assumptions about which step is being recorded. + WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this + means that the shape of the 2nd dimension of these tensors will not be + available (i.e. set to None) inside symbols_to_logits_fn. + Args: symbols_to_logits_fn: Interface to the model, to provide logits. Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size] @@ -513,8 +525,7 @@ def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq, tf.TensorShape([None, None, None]), finished_scores.get_shape(), finished_flags.get_shape(), - nest.map_structure( - lambda tensor: tf.TensorShape(tensor.shape), states), + nest.map_structure(get_state_shape_invariants, states), ], parallel_iterations=1, back_prop=False) diff --git a/tensor2tensor/utils/beam_search_test.py b/tensor2tensor/utils/beam_search_test.py index ec911f051..13a4e64c5 100644 --- a/tensor2tensor/utils/beam_search_test.py +++ b/tensor2tensor/utils/beam_search_test.py @@ -303,7 +303,8 @@ def symbols_to_logits(ids, _, states): states = { "state": tf.zeros((batch_size, 1)), } - states["state"]._shape = tf.TensorShape((None, 1)) + states["state"] = tf.placeholder_with_default( + states["state"], shape=(None, 1)) final_ids, _ = beam_search.beam_search( symbols_to_logits, @@ -352,7 +353,8 @@ def symbols_to_logits(ids, _, states): states = { "state": tf.zeros((batch_size, 1)), } - states["state"]._shape = tf.TensorShape((None, 1)) + states["state"] = tf.placeholder_with_default( + states["state"], shape=(None, 1)) final_ids, _ = beam_search.beam_search( symbols_to_logits, diff --git a/tensor2tensor/utils/bleu_hook.py b/tensor2tensor/utils/bleu_hook.py index 9a7985045..3974244ec 100644 --- a/tensor2tensor/utils/bleu_hook.py +++ b/tensor2tensor/utils/bleu_hook.py @@ -24,7 +24,6 @@ import re import sys import time -import glob import unicodedata # Dependency imports @@ -159,8 +158,10 @@ def property_chars(self, prefix): return "".join(six.unichr(x) for x in range(sys.maxunicode) if unicodedata.category(six.unichr(x)).startswith(prefix)) + uregex = UnicodeRegex() + def bleu_tokenize(string): r"""Tokenize a string following the official BLEU implementation. @@ -206,24 +207,38 @@ def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False): StepFile = collections.namedtuple("StepFile", "filename mtime ctime steps") + def _try_twice_tf_glob(pattern): - """tf.gfile.Glob may crash with + """Glob twice, first time possibly catching `NotFoundError`. + + tf.gfile.Glob may crash with + + ``` tensorflow.python.framework.errors_impl.NotFoundError: xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40; No such file or directory + ``` - Standard glob.glob does not have this bug, but does not hangle gs://... - So let's use tf.gfile.Glob twice to handle most concurrency problems. + Standard glob.glob does not have this bug, but does not handle multiple + filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly + catching the `NotFoundError`. + + Args: + pattern: str, glob pattern. + + Returns: + list matching filepaths. """ try: return tf.gfile.Glob(pattern) except tf.errors.NotFoundError: return tf.gfile.Glob(pattern) + def _read_stepfiles_list(path_prefix, path_suffix=".index", min_steps=0): """Return list of StepFiles sorted by step from files at path_prefix.""" stepfiles = [] - for filename in _try_twice_tf_glob(path_prefix + '*-[0-9]*' + path_suffix): + for filename in _try_twice_tf_glob(path_prefix + "*-[0-9]*" + path_suffix): basename = filename[:-len(path_suffix)] if len(path_suffix) else filename try: steps = int(basename.rsplit("-")[-1]) diff --git a/tensor2tensor/utils/cloud.py b/tensor2tensor/utils/cloud.py new file mode 100644 index 000000000..937c6ee46 --- /dev/null +++ b/tensor2tensor/utils/cloud.py @@ -0,0 +1,382 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch on GCP.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import json +import multiprocessing.pool as mp +import os +import random +import signal +import socket +import subprocess as sp +import time + +from six.moves import input # pylint: disable=redefined-builtin +import tensorflow as tf + +TPU_IP = "10.240.%d.2" +TPU_PORT = 8470 +TPU_PROFILE_PORT = 8466 +TB_PORT = 6006 + +# TODO(rsepassi): +# --cloud_zone +# --cloud_project + + +class CloudState(object): + """Manage state across multiple trainer runs.""" + + def __init__(self): + self._tmp_dir = os.path.expanduser("~/.t2t/cloud_state") + tf.gfile.MakeDirs(self._tmp_dir) + + def cleanup(self, current_vm_name=None, current_tpu_name=None): + process_pids = os.listdir(self._tmp_dir) + for pid in process_pids: + try: + # Check if trainer pid is still running + os.kill(int(pid), 0) + except OSError: + # Trainer died ungracefully + pid_file = os.path.join(self._tmp_dir, pid) + with tf.gfile.Open(pid_file) as f: + info = json.loads(f.read()) + + # Kill possibly zombie tunnel process + try: + os.kill(info["tunnel_pid"], signal.SIGTERM) + except OSError: + pass + + # Delete VM and TPU if requested + del_vm = False + del_tpu = False + if info["delete_on_done"]: + if (info["vm_name"] != current_vm_name and + info["vm_name"] in zip(*list_vm_names_and_ips())[0]): + print("Old VM %s found. Delete?" % info["vm_name"]) + if confirm(): + del_vm = True + if (info["tpu_name"] != current_tpu_name and + info["tpu_name"] in zip(*list_tpu_names_and_ips())[0]): + print("Old TPU %s found. Delete?" % info["tpu_name"]) + if confirm(): + del_tpu = True + + results = [] + pool = mp.Pool(2) + if del_vm: + results.append(pool.apply_async(delete_vm, (info["vm_name"],))) + if del_tpu: + results.append(pool.apply_async(delete_tpu, (info["tpu_name"],))) + _ = [res.get() for res in results] + + # Remove the now cleaned up state file + tf.gfile.Remove(pid_file) + + def delete_current(self): + pid_file = os.path.join(self._tmp_dir, str(os.getpid())) + if tf.gfile.Exists(pid_file): + tf.gfile.Remove(pid_file) + + def add_current(self, tunnel_pid, vm_name, tpu_name, delete_on_done): + state = { + "tunnel_pid": tunnel_pid, + "vm_name": vm_name, + "tpu_name": tpu_name, + "delete_on_done": delete_on_done, + } + + with tf.gfile.Open(os.path.join(self._tmp_dir, str(os.getpid())), "w") as f: + f.write(json.dumps(state)) + + +@contextlib.contextmanager +def cloud_tpu(vm_name, tpu_name, delete_on_done=False): + """Gets or creates a VM and TPU instance, and forwards ports. + + Args: + vm_name: str, name of VM. + tpu_name: str, name of TPU instance. + delete_on_done: bool, whether to delete the instances when done. + + Yields: + master: str, grpc master pointing to the TPU instance. + """ + state = CloudState() + # Read state from previous processes and possibly cleanup + state.cleanup(current_vm_name=vm_name, current_tpu_name=tpu_name) + + done_str = "" if delete_on_done else "NOT " + print("Will %sdelete VM and TPU instance on done." % done_str) + assert confirm() + _, tpu_ip = create_vm_tpu_pair(vm_name, tpu_name) + with tpu_tunnel(vm_name, tpu_ip) as (local_ports, tunnel_pid): + master = "grpc://localhost:%d" % local_ports["tpu"] + + state.add_current(tunnel_pid, vm_name, tpu_name, delete_on_done) + + yield master + + if delete_on_done: + pool = mp.Pool(2) + vm_res = pool.apply_async(delete_vm, (vm_name,)) + tpu_res = pool.apply_async(delete_tpu, (tpu_name,)) + vm_res.get() + tpu_res.get() + + # Cleanup state from this process + state.delete_current() + + +class Gcloud(object): + """gcloud command strings.""" + # Note these can be modified by set_versions + VM_VERSION = "tf-1-5" + TPU_VERSION = "1.5" + + @classmethod + def set_versions(cls, vm, tpu): + cls.VM_VERSION = vm + cls.TPU_VERSION = tpu + + @classmethod + def create_vm(cls): + create_vm_str = """ + gcloud compute instances create {name} \ + --machine-type=n1-standard-8 \ + --image-family=%s \ + --image-project=ml-images \ + --scopes=https://www.googleapis.com/auth/cloud-platform + """ % cls.VM_VERSION + return create_vm_str + + DELETE_VM = "gcloud compute instances delete {name} --quiet" + + @classmethod + def create_tpu(cls): + create_tpu_str = """ + gcloud alpha compute tpus create \ + {name} \ + --range={tpu_ip}/29 \ + --version=%s + """ % cls.TPU_VERSION + return create_tpu_str + + DELETE_TPU = "gcloud alpha compute tpus delete {name} --quiet" + + LIST_TPU = "gcloud alpha compute tpus list" + LIST_VM = "gcloud compute instances list" + + SSH_LOCAL_PORT_FORWARD = "-L {local_port}:{host}:{remote_port}" + SSH_TUNNEL = """ + gcloud compute ssh {name} -- -N + """ + + +@contextlib.contextmanager +def shell_background(cmd_, **kwargs): + """Run process in background, join on exit.""" + args = format_cmd(cmd_, **kwargs) + process = sp.Popen(args) + try: + yield process + finally: + if process.poll() is None: + process.terminate() + time.sleep(1) + if process.poll() is None: + process.kill() + time.sleep(1) + if process.poll() is None: + raise ValueError( + "Cannot kill process %d - please kill manually" % process.pid) + time.sleep(1) + + +def shell_output(cmd_, **kwargs): + return sp.check_output(format_cmd(cmd_, **kwargs)) + + +def shell_run(cmd_, **kwargs): + return sp.check_call(format_cmd(cmd_, **kwargs)) + + +def format_cmd(cmd_, **kwargs): + return cmd_.format(**kwargs).strip().split() + + +def create_vm(vm_name): + out = shell_output(Gcloud.create_vm(), name=vm_name) + return out.split("\n")[1:-1][0].split()[4] + + +def list_tpu_names_and_ips(): + list_out = shell_output(Gcloud.LIST_TPU) + lines = [l.split() for l in list_out.split("\n")[1:-1]] + names_and_ips = [(l[0].strip(), l[3].strip().split(":")[0]) for l in lines] + return names_and_ips + + +def list_vm_names_and_ips(): + list_out = shell_output(Gcloud.LIST_VM) + lines = [l.split() for l in list_out.split("\n")[1:-1]] + names_and_ips = [(l[0].strip(), l[4].strip()) for l in lines] + return names_and_ips + + +def unique_tpu_ip(tpu_names_and_ips): + inuse = [el[1].split(".")[2] for el in tpu_names_and_ips] + selection = random.choice(list(set(range(256)) - set(inuse))) + return TPU_IP % selection + + +def delete_tpu(tpu_name): + shell_run(Gcloud.DELETE_TPU, name=tpu_name) + + +def delete_vm(vm_name): + shell_run(Gcloud.DELETE_VM, name=vm_name) + + +def create_tpu(tpu_name, tpu_names_and_ips=None): + tpu_names_and_ips = tpu_names_and_ips or list_tpu_names_and_ips() + tpu_ip = unique_tpu_ip(tpu_names_and_ips) + + rounded_tpu_ip = tpu_ip + if rounded_tpu_ip.endswith("2"): + rounded_tpu_ip = rounded_tpu_ip[:-1] + "0" + + shell_run(Gcloud.create_tpu(), name=tpu_name, tpu_ip=rounded_tpu_ip) + return tpu_ip + + +@contextlib.contextmanager +def tpu_tunnel(vm_name, tpu_ip): + """Forward TPU and TPU profiling ports.""" + local_ports = { + "tpu": get_open_port(), + "tpu_profile": get_open_port(), + } + + tpu = format_cmd( + Gcloud.SSH_LOCAL_PORT_FORWARD, + local_port=local_ports["tpu"], + host=tpu_ip, + remote_port=TPU_PORT) + tpu_profile = format_cmd( + Gcloud.SSH_LOCAL_PORT_FORWARD, + local_port=local_ports["tpu_profile"], + host=tpu_ip, + remote_port=TPU_PROFILE_PORT) + + args = format_cmd(Gcloud.SSH_TUNNEL, name=vm_name) + tpu + tpu_profile + # Launch process running in background + with shell_background(" ".join(args)) as tunnel_process: + time.sleep(1) + if tunnel_process.poll() is not None: + raise ValueError("SSH failed") + tf.logging.info("Set up port fowarding. Local ports: %s", local_ports) + yield local_ports, tunnel_process.pid + + +def create_vm_tpu_pair(vm_name, tpu_name, reuse_if_exists=True): + """Create a VM and paired TPU instance. + + Args: + vm_name: str, name for VM. + tpu_name: str, name for TPU instance. + reuse_if_exists: bool, if True, this will act as a get or create. If False + and vm_name or tpu_name already exists, will error. + + Returns: + tuple: (vm_ip, tpu_ip) + + Raises: + ValueError: if instance exists but reuse_if_exists=False. + """ + vm_info = list_vm_names_and_ips() + tpu_info = list_tpu_names_and_ips() + + vm_names = zip(*vm_info)[0] + tpu_names = zip(*tpu_info)[0] + + make_vm = False + vm_ip = None + if vm_name in vm_names: + if not reuse_if_exists: + raise ValueError( + "VM %s already exists and reuse_if_exists=False" % vm_name) + tf.logging.info("VM %s already exists, reusing.", vm_name) + vm_ip = vm_info[vm_names.index(vm_name)][1] + else: + print("Creating VM %s" % vm_name) + assert confirm() + make_vm = True + + make_tpu = False + tpu_ip = None + if tpu_name in tpu_names: + if not reuse_if_exists: + raise ValueError( + "TPU instance %s already exists and reuse_if_exists=False" % tpu_name) + tf.logging.info("TPU %s already exists, reusing.", tpu_name) + tpu_ip = tpu_info[tpu_names.index(tpu_name)][1] + else: + print("Creating TPU instance %s" % tpu_name) + assert confirm() + make_tpu = True + + # Create VM and TPU in parallel + pool = mp.Pool(2) + vm_res = None + tpu_res = None + if make_vm: + vm_res = pool.apply_async(create_vm, (vm_name,)) + if make_tpu: + tpu_res = pool.apply_async(create_tpu, (tpu_name, tpu_info)) + if vm_res is not None: + vm_ip = vm_res.get() + if tpu_res is not None: + tpu_ip = tpu_res.get() + + tf.logging.info("VM (Name, IP): %s, %s", vm_name, vm_ip) + tf.logging.info("TPU (Name, IP): %s, %s", tpu_name, tpu_ip) + tf.logging.info( + "To delete the VM, run: %s", Gcloud.DELETE_VM.format(name=vm_name)) + tf.logging.info( + "To delete the TPU instance, run: %s", + Gcloud.DELETE_TPU.format(name=tpu_name)) + return vm_ip, tpu_ip + + +def get_open_port(): + s = socket.socket() + s.bind(("", 0)) + s.listen(1) + port = s.getsockname()[1] + s.close() + return port + + +def confirm(): + out = input("Confirm (Y/n)? > ") + return out == "Y" diff --git a/tensor2tensor/utils/decoding.py b/tensor2tensor/utils/decoding.py index 9f3f5a131..8133eeca4 100644 --- a/tensor2tensor/utils/decoding.py +++ b/tensor2tensor/utils/decoding.py @@ -29,6 +29,7 @@ from six.moves import input # pylint: disable=redefined-builtin from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators.problem import problem_hparams_to_features import tensorflow as tf FLAGS = tf.flags.FLAGS @@ -68,6 +69,7 @@ def log_decode_results(inputs, identity_output=False): """Log inference results.""" is_image = "image" in problem_name + decoded_inputs = None if is_image and save_images: save_path = os.path.join(model_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx)) @@ -81,6 +83,7 @@ def log_decode_results(inputs, tf.logging.info("Inference results INPUT: %s" % decoded_inputs) decoded_targets = None + decoded_outputs = None if identity_output: decoded_outputs = "".join(map(str, outputs.flatten())) if targets is not None: @@ -93,7 +96,7 @@ def log_decode_results(inputs, tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs) if targets is not None: tf.logging.info("Inference results TARGET: %s" % decoded_targets) - return decoded_outputs, decoded_targets + return decoded_inputs, decoded_outputs, decoded_targets def decode_from_dataset(estimator, @@ -138,9 +141,12 @@ def decode_from_dataset(estimator, parts = output_filepath.split(".") parts[-1] = "targets" target_filepath = ".".join(parts) + parts[-1] = "inputs" + input_filepath = ".".join(parts) output_file = tf.gfile.Open(output_filepath, "w") target_file = tf.gfile.Open(target_filepath, "w") + input_file = tf.gfile.Open(input_filepath, "w") problem_hparams = hparams.problems[problem_idx] # Inputs vocabulary is set to targets if there are no inputs in the problem, @@ -196,13 +202,14 @@ def decode_from_dataset(estimator, # Write out predictions if decode_to_file passed if decode_to_file: - for i, (decoded_output, decoded_target) in enumerate(decoded_outputs): + for i, (d_input, d_output, d_target) in enumerate(decoded_outputs): beam_score_str = "" if decode_hp.write_beam_scores: beam_score_str = "\t%.2f" % decoded_scores[i] output_file.write( - str(decoded_output) + beam_score_str + decode_hp.delimiter) - target_file.write(str(decoded_target) + decode_hp.delimiter) + str(d_output) + beam_score_str + decode_hp.delimiter) + target_file.write(str(d_target) + decode_hp.delimiter) + input_file.write(str(d_input) + decode_hp.delimiter) if (decode_hp.num_samples >= 0 and num_predictions >= decode_hp.num_samples): @@ -211,6 +218,7 @@ def decode_from_dataset(estimator, if decode_to_file: output_file.close() target_file.close() + input_file.close() tf.logging.info("Completed inference on %d samples." % num_predictions) # pylint: disable=undefined-loop-variable @@ -261,9 +269,9 @@ def input_fn(): for k, beam in enumerate(output_beams): tf.logging.info("BEAM %d:" % k) score = scores and scores[k] - decoded_outputs, _ = log_decode_results(result["inputs"], beam, - problem_name, None, - inputs_vocab, targets_vocab) + _, decoded_outputs, _ = log_decode_results(result["inputs"], beam, + problem_name, None, + inputs_vocab, targets_vocab) beam_decodes.append(decoded_outputs) if decode_hp.write_beam_scores: beam_scores.append(score) @@ -274,9 +282,9 @@ def input_fn(): else: decodes.append("\t".join(beam_decodes)) else: - decoded_outputs, _ = log_decode_results(result["inputs"], - result["outputs"], problem_name, - None, inputs_vocab, targets_vocab) + _, decoded_outputs, _ = log_decode_results( + result["inputs"], result["outputs"], problem_name, + None, inputs_vocab, targets_vocab) decodes.append(decoded_outputs) # Reversing the decoded inputs and outputs because they were reversed in @@ -472,23 +480,26 @@ def _interactive_input_fn(hparams): x = [num_samples, decode_length, len(input_ids)] + input_ids assert len(x) < const_array_size x += [0] * (const_array_size - len(x)) - yield { + features = { "inputs": np.array(x).astype(np.int32), } elif input_type == "image": input_path = input_string img = vocabulary.encode(input_path) - yield { + features = { "inputs": img.astype(np.int32), } elif input_type == "label": input_ids = [int(input_string)] x = [num_samples, decode_length, len(input_ids)] + input_ids - yield { + features = { "inputs": np.array(x).astype(np.int32), } else: raise Exception("Unsupported input type.") + for k, v in six.iteritems(problem_hparams_to_features(p_hparams)): + features[k] = np.array(v).astype(np.int32) + yield features def show_and_save_image(img, save_path): diff --git a/tensor2tensor/utils/diet.py b/tensor2tensor/utils/diet.py index 7ecfba693..19702338b 100644 --- a/tensor2tensor/utils/diet.py +++ b/tensor2tensor/utils/diet.py @@ -193,10 +193,10 @@ def update_variable(self, var, grad_var): beta2_pow = tf.pow(params.beta2, global_step) if params.factored_second_moment_accumulator and len(var.shape) == 2: vr_update = tf.assign(slots["adam_vr"], slots["adam_vr"] * params.beta2 + - tf.reduce_mean(grad_squared, 1, keep_dims=True) * + tf.reduce_mean(grad_squared, 1, keepdims=True) * (1.0 - params.beta2)) vc_update = tf.assign(slots["adam_vc"], slots["adam_vc"] * params.beta2 + - tf.reduce_mean(grad_squared, 0, keep_dims=True) * + tf.reduce_mean(grad_squared, 0, keepdims=True) * (1.0 - params.beta2)) with tf.control_dependencies([vr_update, vc_update]): vr = tf.sqrt(slots["adam_vr"] / (1.0 - beta2_pow)) + params.epsilon diff --git a/tensor2tensor/utils/flags.py b/tensor2tensor/utils/flags.py index 23da5ed3c..f5d28e9f3 100644 --- a/tensor2tensor/utils/flags.py +++ b/tensor2tensor/utils/flags.py @@ -74,21 +74,22 @@ "Whether to use the '-test' data for EVAL (and PREDICT).") flags.DEFINE_integer("keep_checkpoint_max", 20, "How many recent checkpoints to keep.") -flags.DEFINE_bool("experimental_optimize_placement", False, - "Optimize ops placement with experimental session options.") +flags.DEFINE_bool("enable_graph_rewriter", False, + "Enable graph optimizations that are not on by default.") flags.DEFINE_integer("keep_checkpoint_every_n_hours", 10000, "Number of hours between each checkpoint to be saved. " "The default value 10,000 hours effectively disables it.") flags.DEFINE_integer("save_checkpoints_secs", 0, "Save checkpoints every this many seconds. " "Default=0 means save checkpoints each x steps where x " - "depends on iterations_per_loop and local_eval_frequency.") + "is max(iterations_per_loop, local_eval_frequency).") flags.DEFINE_bool("log_device_placement", False, "Whether to log device placement.") # Distributed training flags -flags.DEFINE_integer("local_eval_frequency", 2000, - "Run evaluation every this steps during local training.") +flags.DEFINE_integer("local_eval_frequency", 1000, + "Save checkpoints and run evaluation every N steps during " + "local training.") flags.DEFINE_bool("locally_shard_to_cpu", False, "Use CPU as a sharding device running locally. This allows " "to test sharded model construction on a machine with 1 GPU.") diff --git a/tensor2tensor/utils/optimize.py b/tensor2tensor/utils/optimize.py index c0cbf5677..a497d56bd 100644 --- a/tensor2tensor/utils/optimize.py +++ b/tensor2tensor/utils/optimize.py @@ -33,19 +33,27 @@ def optimize(loss, learning_rate, hparams, use_tpu=False): """Minimize loss.""" loss = weight_decay_and_noise(loss, hparams, learning_rate) loss = tf.identity(loss, name="total_loss") - log_variable_sizes() + log_variable_sizes(verbose=hparams.summarize_vars) diet_vars = [ v for v in tf.global_variables() if v.dtype == dtypes.float16_ref ] - log_variable_sizes(diet_vars, "Diet Variables") - opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams) + log_variable_sizes( + diet_vars, "Diet Variables", verbose=hparams.summarize_vars) + opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams, use_tpu) if use_tpu: opt = tf.contrib.tpu.CrossShardOptimizer(opt) tf.summary.scalar("learning_rate", learning_rate) - opt_summaries = ["loss"] + opt_summaries = ["loss", "global_gradient_norm"] if hparams.summarize_grads: - opt_summaries.extend(["gradients", "gradient_norm", "global_gradient_norm"]) + tf.logging.info("Summarizing gradients") + opt_summaries.extend(["gradients", "gradient_norm"]) + + if hparams.clip_grad_norm: + tf.logging.info("Clipping gradients, norm: %0.5f", hparams.clip_grad_norm) + if hparams.grad_noise_scale: + tf.logging.info("Adding noise to gradients, noise scale: %0.5f", + hparams.grad_noise_scale) train_op = tf.contrib.layers.optimize_loss( name="training", @@ -63,7 +71,13 @@ def optimize(loss, learning_rate, hparams, use_tpu=False): class ConditionalOptimizer(tf.train.Optimizer): """Conditional optimizer.""" - def __init__(self, optimizer_name, lr, hparams): + def __init__(self, optimizer_name, lr, hparams, use_tpu=False): + if optimizer_name == "Adam" and use_tpu: + # LazyAdamOptimizer does not work on TPU + optimizer_name = "TrueAdam" + + tf.logging.info("Using optimizer %s", optimizer_name) + if optimizer_name == "Adam": # We change the default epsilon for Adam and re-scale lr. # Using LazyAdam as it's much faster for large vocabulary embeddings. @@ -74,9 +88,10 @@ def __init__(self, optimizer_name, lr, hparams): epsilon=hparams.optimizer_adam_epsilon) elif optimizer_name == "Momentum": self._opt = tf.train.MomentumOptimizer( - lr, momentum=hparams.optimizer_momentum_momentum) + lr, + momentum=hparams.optimizer_momentum_momentum, + use_nesterov=hparams.optimizer_momentum_nesterov) elif optimizer_name == "YellowFin": - tf.logging.info("Init YellowFin Optimizer.") self._opt = yellowfin.YellowFinOptimizer( learning_rate=lr, momentum=hparams.optimizer_momentum_momentum) elif optimizer_name == "TrueAdam": @@ -86,8 +101,7 @@ def __init__(self, optimizer_name, lr, hparams): beta2=hparams.optimizer_adam_beta2, epsilon=hparams.optimizer_adam_epsilon) elif optimizer_name == "Adafactor": - self._opt = AdafactorOptimizer( - lr / 500.0, epsilon=hparams.optimizer_adam_epsilon) + self._opt = AdafactorOptimizer(lr / 500.0) else: self._opt = tf.contrib.layers.OPTIMIZER_CLS_NAMES[optimizer_name](lr) @@ -131,66 +145,83 @@ def piecewise_learning_rate(step, boundaries, values): step, boundaries, values, name="piecewise_lr") -def learning_rate_decay(hparams, num_worker_replicas=1): - """Inverse-decay learning rate until warmup_steps, then decay.""" - if hparams.learning_rate_decay_scheme == "piecewise": - return piecewise_learning_rate(tf.train.get_or_create_global_step(), +def learning_rate_decay(hparams, warmup_steps=0): + """Learning rate decay multiplier.""" + scheme = hparams.learning_rate_decay_scheme + warmup_steps = tf.to_float(warmup_steps) + global_step = tf.to_float(tf.train.get_or_create_global_step()) + + if not scheme or scheme == "none": + return tf.constant(1.) + + tf.logging.info("Applying learning rate decay: %s.", scheme) + + if scheme == "exp": + decay_steps = hparams.learning_rate_decay_steps + p = (global_step - warmup_steps) / decay_steps + if hparams.learning_rate_decay_staircase: + p = tf.floor(p) + return tf.pow(hparams.learning_rate_decay_rate, p) + + if scheme == "piecewise": + return piecewise_learning_rate(global_step, hparams.learning_rate_boundaries, hparams.learning_rate_multiples) - warmup_steps = tf.to_float( - hparams.learning_rate_warmup_steps * num_worker_replicas) - num_train_steps = hparams.train_steps - step = tf.to_float(tf.train.get_or_create_global_step()) - - if hparams.learning_rate_decay_scheme == "noam": + if scheme == "noam": return 5000.0 * hparams.hidden_size**-0.5 * tf.minimum( - (step + 1) * warmup_steps**-1.5, (step + 1)**-0.5) - elif hparams.learning_rate_decay_scheme == "exp100k": - return 0.94**(step // 100000) - elif hparams.learning_rate_decay_scheme == "cosine": + (global_step + 1) * warmup_steps**-1.5, (global_step + 1)**-0.5) + + if scheme == "cosine": cycle_steps = hparams.learning_rate_cosine_cycle_steps - return 0.5 * (1 + tf.cos(np.pi * (step % cycle_steps) / cycle_steps)) - elif hparams.learning_rate_decay_scheme == "cyclelinear10x": + cycle_position = global_step % (2 * cycle_steps) + cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position) + return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps)) + + if scheme == "cyclelinear10x": # Cycle the rate linearly by 10x every warmup_steps, up and down. - cycle_steps = hparams.learning_rate_warmup_steps - cycle_position = step % (2 * cycle_steps) + cycle_steps = warmup_steps + cycle_position = global_step % (2 * cycle_steps) cycle_position = tf.to_float( # Normalize to the interval [-1, 1]. cycle_position - cycle_steps) / float(cycle_steps) cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0. return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3). - inv_base = tf.exp(tf.log(0.01) / warmup_steps) - inv_decay = inv_base**(warmup_steps - step) - if hparams.learning_rate_decay_scheme == "sqrt": - decay = _sqrt_decay(step - warmup_steps) - elif hparams.learning_rate_decay_scheme == "exp": - total_steps = num_train_steps - warmup_steps - assert num_train_steps > hparams.learning_rate_warmup_steps - assert hparams.learning_rate_minimum is not None, "Must specify final LR" - total_steps = num_train_steps - hparams.learning_rate_warmup_steps - decay_needed = hparams.learning_rate_minimum / hparams.learning_rate - decay_rate = decay_needed**(1.0 / total_steps) - tf.logging.info("Decay rate: %f. LR %f -> %f", decay_rate, - hparams.learning_rate, hparams.learning_rate_minimum) - decay = _exp_decay_after(step, decay_rate, - hparams.learning_rate_warmup_steps) - return decay - elif hparams.learning_rate_decay_scheme == "exp10k": - decay = _exp_decay_after(step - warmup_steps, 0.9995, - num_train_steps - warmup_steps - 10000) - elif hparams.learning_rate_decay_scheme == "exp50k": - decay = _exp_decay_after(step - warmup_steps, 0.99995, - num_train_steps - warmup_steps - 50000) - elif hparams.learning_rate_decay_scheme == "exp500k": - decay = _exp_decay_after(step - warmup_steps, 0.9999955, - num_train_steps - warmup_steps - 500000) - elif hparams.learning_rate_decay_scheme == "none": - decay = tf.constant(1.0) + if scheme == "sqrt": + return _sqrt_decay(global_step - warmup_steps) + + raise ValueError("Unrecognized learning rate decay scheme: %s" % + hparams.learning_rate_decay_scheme) + + +def learning_rate_warmup(warmup_steps, warmup_schedule="exp"): + """Learning rate warmup multiplier.""" + if not warmup_steps: + return tf.constant(1.) + + tf.logging.info("Applying %s learning rate warmup for %d steps", + warmup_schedule, warmup_steps) + + warmup_steps = tf.to_float(warmup_steps) + global_step = tf.to_float(tf.train.get_or_create_global_step()) + + if warmup_schedule == "exp": + return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step) else: - raise ValueError("Unrecognized learning rate decay scheme: %s" % - hparams.learning_rate_decay_scheme) - return tf.where(step < warmup_steps, inv_decay, decay) + assert warmup_schedule == "linear" + start = tf.constant(0.35) + return ((tf.constant(1.) - start) / warmup_steps) * global_step + start + + +def learning_rate_decay_with_warmup(hparams, num_worker_replicas=1): + """Learning rate decay rate with warmup based on hparams.""" + warmup_steps = hparams.learning_rate_warmup_steps * num_worker_replicas + warmup = learning_rate_warmup(warmup_steps) + + decay = learning_rate_decay(hparams, warmup_steps) + + global_step = tf.train.get_or_create_global_step() + return tf.where(global_step < warmup_steps, warmup, decay) def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None): @@ -198,7 +229,7 @@ def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None): if var_list is None: var_list = tf.trainable_variables() - decay_vars = [v for v in var_list if len(v.shape.as_list()) > 1] + decay_vars = [v for v in var_list] noise_vars = [v for v in var_list if "/body/" in v.name] weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars) @@ -218,6 +249,9 @@ def weight_noise(noise_rate, learning_rate, var_list): if not noise_rate: return [tf.no_op()] + tf.logging.info("Applying weight noise scaled by learning rate, " + "noise_rate: %0.5f", noise_rate) + noise_ops = [] for v in var_list: @@ -231,16 +265,19 @@ def weight_noise(noise_rate, learning_rate, var_list): return noise_ops -def weight_decay(decay_rate, var_list): +def weight_decay(decay_rate, var_list, skip_biases=True): """Apply weight decay to vars in var_list.""" if not decay_rate: return 0. + tf.logging.info("Applying weight decay, decay_rate: %0.5f", decay_rate) + weight_decays = [] for v in var_list: - # Weight decay - is_bias = len(v.shape.as_list()) <= 1 - if not is_bias: + # Weight decay. + # This is a heuristic way to detect biases that works for main tf.layers. + is_bias = len(v.shape.as_list()) == 1 and v.name.endswith("bias:0") + if not (skip_biases and is_bias): with tf.device(v.device): v_loss = tf.nn.l2_loss(v) weight_decays.append(v_loss) @@ -248,12 +285,13 @@ def weight_decay(decay_rate, var_list): return tf.add_n(weight_decays) * decay_rate -def log_variable_sizes(var_list=None, tag=None): +def log_variable_sizes(var_list=None, tag=None, verbose=False): """Log the sizes and shapes of variables, and the total size. Args: var_list: a list of variables; defaults to trainable_variables tag: a string; defaults to "Trainable Variables" + verbose: bool, if True, log every weight; otherwise, log total size only. """ if var_list is None: var_list = tf.trainable_variables() @@ -268,15 +306,20 @@ def log_variable_sizes(var_list=None, tag=None): for v_name in sorted(list(name_to_var)): v = name_to_var[v_name] v_size = int(np.prod(np.array(v.shape.as_list()))) - tf.logging.info("Weight %s\tshape %s\tsize %d", - v.name[:-2].ljust(80), - str(v.shape).ljust(20), v_size) + if verbose: + tf.logging.info("Weight %s\tshape %s\tsize %d", + v.name[:-2].ljust(80), + str(v.shape).ljust(20), v_size) total_size += v_size tf.logging.info("%s Total size: %d", tag, total_size) def get_variable_initializer(hparams): """Get variable initializer from hparams.""" + if not hparams.initializer: + return None + + tf.logging.info("Using variable initializer: %s", hparams.initializer) if hparams.initializer == "orthogonal": return tf.orthogonal_initializer(gain=hparams.initializer_gain) elif hparams.initializer == "uniform": @@ -295,61 +338,97 @@ def get_variable_initializer(hparams): class AdafactorOptimizer(tf.train.Optimizer): """Optimizer that implements the Adafactor algorithm. - Adafactor is similar to Adam, but seeks to reduce the memory - requirements due to the moment estimates. The auxiliary memory - requirements for an `AxB` weight matrix are `A+B` for Adafactor, - versus `2AB` for Adam. - - Adam is described in [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) - ([pdf](http://arxiv.org/pdf/1412.6980.pdf)). - - The differences are as follows: - - 1. No momentum - this removes the first-moment estimate. - 2. For an AxB weight matrix, instead of keeping a full AxB second-moment - estimate matrix, Adafactor keeps only the row and column means of that - estimate matrix, and estimate the full second-moment estimate matrix - from on the fly, based on the means. - 3. Adafactor uses a variable decay rate for the second-moment estaimtes - - faster decay at the start of training and slower decay later. This - elimnates the awkwardness in Adam related to having biased moment - estimates at the start of training. - - For non-2d variables: - We initialize - ``` - t <- 0 - v <- zeros(shape(var)) - ``` - - The update rule is as follows: - ``` - t <- t + 1 - decay_horizon = min(t, t * relative_decay_horizon + absolute_decay_horizon) - decay_rate = 1 - 1 / decay_horizon - v <- decay_rate * v + (1 - decay_rate) * grad^2 - var <- var - lr * grad / (sqrt(v) + epsilon) - ``` - - For 2d variables: - We initialize - ``` - t <- 0 + Adafactor is similar to RMSProp (ADAM, etc.), but takes advantage of the + structure of weight matrices to use less memory and to be more resilient to + sudden large gradients. + + The RMSProp algorithm works on each component independently as follows: + w -= grad * learning_rate / sqrt(estimated_mean_square_grad) + + learning_rate is the desired update magnitude, and + estimated_mean_square_grad is computed by exponential smoothing of the + square of the gradient. + + Adafactor addresses two shortcomings of RMSProp: + + 1. In RMSProp (ADAM, etc), maintaining estimated_mean_square_grad requires + memory equal to the number of parameters. This can be an impediment to + training large models on GPU/TPU systems with limited memory. + + Adafactor uses less memory. + For an AxB weight matrix, instead of keeping a full AxB + estimated_mean_square_grad matrix, Adafactor keeps only + exponentially-smoothed row and column means, and bases its estimates on + those means. Thus the memory requirements drop from `2AB` to `A+B`. + + 2. Depending on the decay rate of the exponential smoothing, we run into one + of two problems. + + If the decay rate is high (short memory), we see the problem described + here - worse final quality: + On the Convergence of Adam and Beyond + https://openreview.net/forum?id=ryQu7f-RZ + + If the decay rate is low (long memory), then the estimate does not adjust + rapidly to suddenly large gradients, and the model diverges. + Suddenly large gradients (which we will call anomalies), may happen either + due to weird training data, or because the model has just learned something + important and can now rush to exploit it. Momentum (as in ADAM) can help + prevent divergence, but it also requires more memory. Gradient clipping + can also help prevent divergence, but it is irritating in that setting + the right threshold depends on the knowing the scale of the gradients. + + Adafactor uses a relatively long memory (setting the decay rate to + step_num^-0.8), but detects and corrects for anomalies. An anomaly + is detected if the mean-square gradient for the current step + (across the entire weight matrix) is much greater than the historical + average. When this occurs, we increase estimated_mean_square_grad + for the current step for all weights in the matrix. Note: it is important + to detect anomalies based on entire matrices, rather than individual + weights, since any individual weight may legitimately have a pattern + of many small gradients and occasional very large ones. + + HYPERPARAMETERS: + learning_rate: desired magnitude of variable updates. a scalar - can be a + constant, but more likely should have a warmup and then decay + proportionally to rsqrt(step_num) + epsilon: 1e-20 - a small floating point value to avoid division by zero. + horizon_exponent: 0.8 - a value between 0 and 1 - The effective decay + horizon of the second-moment estimator is step_num^horizon_exponent. + anomaly_threshold: 2.0 - a value greater than 1. Suppress anomalies + where the mean-square-gradients for a step exceed the long-term average + by at least this factor. + + ALGORITHM: + + We initialize + ``` + t <- 0 + if var is 2-dimensional: v_r <- zeros([num_rows]) v_c <- zeros([num_cols]) - ``` - - The update rule is as follows: - ``` - t <- t + 1 - decay_horizon = min(t, t * relative_decay_horizon + absolute_decay_horizon) - decay_rate = 1 - 1 / decay_horizon - v_r <- decay_rate * v_r + (1 - decay_rate) * reduce_mean(grad^2, 1) - v_c <- decay_rate * v_c + (1 - decay_rate) * reduce_mean(grad^2, 0) - approx_v = expand_dims(v_r, 1) * expand_dims(v_c, 0) / reduce_mean(v_c) - var <- var - lr * grad / (sqrt(approx_v) + epsilon) - ``` - + else: + v <- zeros(shape(var)) + ``` + + The update rule is as follows: + ``` + t <- t + 1 + decay_rate = 1 - t ^ (-horizon_exponent) + grad_squared = tf.square(grad) + epsilon + if var is 2-dimensional: + v_r <- decay_rate * v_r + (1 - decay_rate) * reduce_mean(grad_squared, 1) + v_c <- decay_rate * v_c + (1 - decay_rate) * reduce_mean(grad_squared, 0) + anomaly_factor = max(1.0, + reduce_mean(grad_squared) / reduce_mean(v_r) / anomaly_threshold) + est_v = anomaly_factor * outer_prod(v_r, v_c) / reduce_mean(v_r) + else: + v <- decay_rate * v + (1 - decay_rate) * grad_squared + anomaly_factor = max(1.0, + reduce_mean(grad_squared) / reduce_mean(v) / anomaly_threshold) + est_v = v * anomaly_factor + var <- var - lr * grad / sqrt(est_v) + ``` TODO(noam): write a paper. TODO(noam): we should also apply the 2d logic to the two final dimensions. of >2d convolutional kernels. @@ -357,9 +436,9 @@ class AdafactorOptimizer(tf.train.Optimizer): def __init__(self, learning_rate=0.001, - epsilon=1e-8, - relative_decay_horizon=0.2, - absolute_decay_horizon=100.0, + epsilon=1e-20, + horizon_exponent=0.8, + anomaly_threshold=2.0, use_locking=False, name="Adafactor"): """Construct a new Adafactor optimizer. @@ -369,27 +448,17 @@ def __init__(self, Args: learning_rate: A Tensor or a floating point value. The learning rate. epsilon: A small constant for numerical stability. - relative_decay_horizon: a floating point value <= 1 - absolute_decay_horizon: a floating point value (representing a step count) + horizon_exponent: a floating point value between 0 and 1 + anomaly_threshold: a floating point value >= 1.0 use_locking: If True use locks for update operations. name: Optional name for the operations created when applying gradients. Defaults to "AdafactorOptimizer". """ super(AdafactorOptimizer, self).__init__(use_locking, name) self._lr = learning_rate - self._relative_decay_horizon = relative_decay_horizon - self._absolute_decay_horizon = absolute_decay_horizon self._epsilon = epsilon - - def _prepare(self): - global_step = tf.to_float(tf.train.get_or_create_global_step()) + 1.0 - decay_horizon = tf.minimum(global_step, - global_step * self._relative_decay_horizon + - self._absolute_decay_horizon) - self._mixing_rate = 1.0 / decay_horizon - self._decay_rate = 1.0 - self._mixing_rate - self._epsilon = tf.to_float(self._epsilon) - self._lr = tf.to_float(self._lr) + self._horizon_exponent = horizon_exponent + self._anomaly_threshold = anomaly_threshold def _should_use_factored_second_moment_estimate(self, shape): """Should we use a factored second moment estimator. @@ -418,30 +487,78 @@ def _apply_dense(self, grad, var): return self._resource_apply_dense(grad, var) def _resource_apply_dense(self, grad, var): + grad_squared = tf.square(grad) + self._epsilon + grad_squared_mean = tf.reduce_mean(grad_squared) + lr = tf.to_float(self._lr) + global_step = tf.to_float(tf.train.get_or_create_global_step()) + 1.0 + # HACK: Make lr and global_step dependent on grad. + # This confounds the XLA rewriter and keeps it from fusing computations + # across different variables. This fusion is a bad for HBM usage, since + # it causes the gradients to persist in memory. + lr += grad_squared_mean * 1e-30 + global_step += grad_squared_mean * 1e-30 + # END HACK + mixing_rate = tf.pow(global_step, -self._horizon_exponent) + decay_rate = 1.0 - mixing_rate shape = var.get_shape().as_list() - grad_squared = tf.square(grad) updates = [] if self._should_use_factored_second_moment_estimate(shape): + grad_squared_row_mean = tf.reduce_mean(grad_squared, 1) + grad_squared_col_mean = tf.reduce_mean(grad_squared, 0) vr = self.get_slot(var, "vr") - new_vr = (self._decay_rate * vr + - self._mixing_rate * tf.reduce_mean(grad_squared, 1)) + new_vr = (decay_rate * vr + mixing_rate * grad_squared_row_mean) vc = self.get_slot(var, "vc") - new_vc = (self._decay_rate * vc + - self._mixing_rate * tf.reduce_mean(grad_squared, 0)) + new_vc = (decay_rate * vc + mixing_rate * grad_squared_col_mean) vr_update = tf.assign(vr, new_vr, use_locking=self._use_locking) vc_update = tf.assign(vc, new_vc, use_locking=self._use_locking) updates = [vr_update, vc_update] - vr = tf.sqrt(new_vr) + self._epsilon - vc = tf.sqrt(new_vc) + self._epsilon - vc /= tf.reduce_mean(vc) - denom = tf.expand_dims(vr, 1) * tf.expand_dims(vc, 0) + long_term_mean = tf.reduce_mean(new_vr) + anomaly_factor = self._anomaly_factor(grad_squared_mean, long_term_mean) + # This is the computation we should do. + # est_v = (tf.expand_dims(new_vr, 1) * tf.expand_dims(new_vc, 0) + # * anomaly_factor / long_term_mean) + # subtrahend = grad * lr / tf.sqrt(est_v) + # Instead we do the following, which is mathematically equivalent. + r_factor = lr * tf.rsqrt(new_vr * anomaly_factor / long_term_mean) + c_factor = tf.rsqrt(new_vc) + subtrahend = ( + grad * tf.expand_dims(r_factor, 1) * tf.expand_dims(c_factor, 0)) else: v = self.get_slot(var, "v") - new_v = (self._decay_rate * v + self._mixing_rate * grad_squared) + new_v = decay_rate * v + mixing_rate * grad_squared v_update = tf.assign(v, new_v, use_locking=self._use_locking) updates = [v_update] - denom = tf.sqrt(new_v) + self._epsilon - subtrahend = self._lr * grad / denom + long_term_mean = tf.reduce_mean(new_v) + anomaly_factor = self._anomaly_factor(grad_squared_mean, long_term_mean) + # This is the computation we should do. + # est_v = (new_v * anomaly_factor) + # subtrahend = grad * lr / tf.sqrt(est_v) + # Instead we do the following, which is mathematically equivalent. + subtrahend = grad * (lr / tf.sqrt(anomaly_factor)) * tf.rsqrt(new_v) var_update = tf.assign_sub(var, subtrahend, use_locking=self._use_locking) updates = [var_update] + updates return tf.group(*updates) + + def _anomaly_factor(self, grad_squared_mean, long_term_mean): + """Multiplier for second-moment estimator, due to short-term anomalies. + + A step may have gradients with magnitudes much larger than the long-term + average. This can cause the model to diverge. In these cases, we want to + temoporarily increase the second-moment estimators to reflect that these + steps are anomalous. + + It is important to make these calculations on whole weight matrices, rather + than on individual parameters, since we want to allow individual parameters + to have occasional large updates. + + Args: + grad_squared_mean: A scalar. The mean square gradient on the varaible + for the current step. + long_term_mean: A scalar. The mean of the long-term second-moment + estimator. + Returns: + a scalar that should be multiplied into the second-moment-estimator for + this step. + """ + ratio = grad_squared_mean / long_term_mean + return tf.maximum(1.0, ratio / self._anomaly_threshold) diff --git a/tensor2tensor/utils/t2t_model.py b/tensor2tensor/utils/t2t_model.py index 8e5a76d67..0623a975e 100644 --- a/tensor2tensor/utils/t2t_model.py +++ b/tensor2tensor/utils/t2t_model.py @@ -29,6 +29,7 @@ import six from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators.problem import problem_hparams_to_features from tensor2tensor.layers import common_layers from tensor2tensor.utils import beam_search from tensor2tensor.utils import decoding @@ -107,8 +108,8 @@ def __init__(self, self._original_hparams = hparams self.set_mode(mode) - self._decode_hparams = copy.copy( - decode_hparams or decoding.decode_hparams()) + self._decode_hparams = copy.copy(decode_hparams or + decoding.decode_hparams()) self._data_parallelism = data_parallelism or eu.Parallelism([""]) self._num_datashards = self._data_parallelism.n self._ps_devices = self._data_parallelism.ps_devices @@ -149,7 +150,6 @@ def model_fn_sharded(self, sharded_features): dp = self._data_parallelism summarize_features(sharded_features, num_shards=dp.n) datashard_to_features = self._to_features_per_datashard(sharded_features) - if self.use_body_sharded: # MoE models override body_sharded transformed_features = dp(self.bottom, datashard_to_features) @@ -157,8 +157,8 @@ def model_fn_sharded(self, sharded_features): self._to_single_features_dict(transformed_features)) body_out, losses = self._normalize_body_output(body_out) if "training" in losses: - # If body has returned the training loss, the body outputs are - # considered the logits and no further work is done. + tf.logging.info("Skipping T2TModel top and loss because training loss " + "returned from body") sharded_logits = body_out else: sharded_logits = dp(self.top, body_out, datashard_to_features) @@ -189,10 +189,13 @@ def model_fn(self, features): transformed_features = self.bottom(features) with tf.variable_scope("body"): + tf.logging.info("Building model body") body_out = self.body(transformed_features) output, losses = self._normalize_body_output(body_out) if "training" in losses: + tf.logging.info("Skipping T2TModel top and loss because training loss " + "returned from body") logits = output else: logits = self.top(output, features) @@ -213,12 +216,16 @@ def bottom(self, features): self._problem_hparams.input_modality): do_reuse = input_modality.name in all_previous_modalities with tf.variable_scope(input_modality.name, reuse=do_reuse): + tf.logging.info("Transforming feature '%s' with %s.bottom", key, + input_modality.name) transformed_features[key] = input_modality.bottom(features[key]) all_previous_modalities.append(input_modality.name) # Transform the targets (for autoregressive models) target_modality = self._problem_hparams.target_modality with tf.variable_scope(target_modality.name): + tf.logging.info("Transforming 'targets' with %s.targets_bottom", + target_modality.name) transformed_features["targets"] = target_modality.targets_bottom( features["targets"]) @@ -256,6 +263,8 @@ def top(self, body_output, features): target_modality = self._problem_hparams.target_modality with tf.variable_scope(target_modality.name): + tf.logging.info("Transforming body output with %s.top", + target_modality.name) last_only = ( target_modality.top_is_pointwise and self.hparams.mode == tf.estimator.ModeKeys.PREDICT and @@ -285,20 +294,32 @@ def loss(self, logits, features): def optimize(self, loss, num_async_replicas=1): """Return a training op minimizing loss.""" - use_tpu = self.hparams.use_tpu - lr = self.hparams.learning_rate * optimize.learning_rate_decay(self.hparams) + tf.logging.info("Base learning rate: %f", self.hparams.learning_rate) + lr = self.hparams.learning_rate + decay_rate = optimize.learning_rate_decay_with_warmup(self.hparams) + lr *= decay_rate + if self.hparams.learning_rate_minimum: + lr_min = float(self.hparams.learning_rate_minimum) + tf.logging.info("Applying learning rate minimum: %f", lr_min) + lr = tf.max(lr, tf.to_float(lr_min)) + if num_async_replicas > 1: + tf.logging.info("Dividing learning rate by num_async_replicas: %d", + num_async_replicas) lr /= math.sqrt(float(num_async_replicas)) - train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu) + train_op = optimize.optimize( + loss, lr, self.hparams, use_tpu=common_layers.is_on_tpu()) return train_op def set_mode(self, mode): """Set hparams with the given mode.""" + tf.logging.info("Setting T2TModel mode to '%s'", mode) hparams = copy.copy(self._original_hparams) hparams.add_hparam("mode", mode) # When not in training mode, set all forms of dropout to zero. if mode != tf.estimator.ModeKeys.TRAIN: for key in hparams.values(): if key.endswith("dropout"): + tf.logging.info("Setting hparams.%s to 0.0", key) setattr(hparams, key, 0.0) self._hparams = hparams @@ -352,27 +373,15 @@ def eval_autoregressive(self, features=None, decode_length=50): losses: a dictionary: {loss-name (string): floating point `Scalar`}. Contains a single key "training". """ - _, logits, losses = self._slow_greedy_infer( - features, decode_length=decode_length) - return logits, losses + results = self._slow_greedy_infer(features, decode_length=decode_length) + return results["logits"], results["losses"] def _fill_problem_hparams_features(self, features): - if features is None: - return - - input_space_id, target_space_id = 0, 0 - if self._problem_hparams: - input_space_id = self._problem_hparams.input_space_id - target_space_id = self._problem_hparams.target_space_id - - if "problem_choice" not in features: - features["problem_choice"] = tf.constant(0, name="problem_choice") - if "input_space_id" not in features: - features["input_space_id"] = tf.constant( - input_space_id, name="input_space_id") - if "target_space_id" not in features: - features["target_space_id"] = tf.constant( - target_space_id, name="target_space_id") + if features is not None: + for k, v in six.iteritems( + problem_hparams_to_features(self._problem_hparams)): + if k not in features: + features[k] = tf.constant(v, name=k) def infer(self, features=None, @@ -393,7 +402,17 @@ def infer(self, the preference for slonger translations. Returns: - samples: an integer `Tensor`. + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + if slow greedy decoding is used then the dict will also contain { + "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + "losses": a dictionary: {loss-name (string): floating point `Scalar` + } """ with self._eager_var_store.as_default(): # TODO(rsepassi): Make decoding work with real-valued model outputs @@ -411,12 +430,13 @@ def infer(self, beam_size = 1 # No use to run beam-search for a single class. if beam_size == 1: tf.logging.info("Greedy Decoding") - samples, _, _ = self._greedy_infer(features, decode_length) + results = self._greedy_infer(features, decode_length) else: tf.logging.info("Beam Decoding with beam size %d" % beam_size) - samples = self._beam_decode( + results = self._beam_decode( features, decode_length, beam_size, top_beams, alpha) - return samples + + return results def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): """Beam search decoding. @@ -515,15 +535,13 @@ def symbols_to_logits_fn(ids): features["inputs"] = inputs_old # Return `top_beams` decodings (also remove initial id from the beam search) - return_scores = True # TODO(lukaszkaiser): make it work multi-problem. + # TODO(lukaszkaiser): make it work multi-problem. if top_beams == 1: - if return_scores: - return {"outputs": ids[:, 0, 1:], "scores": scores} - return ids[:, 0, 1:] + samples = ids[:, 0, 1:] else: - if return_scores: - return {"outputs": ids[:, :top_beams, 1:], "scores": scores} - return ids[:, :top_beams, 1:] + samples = ids[:, :top_beams, 1] + + return {"outputs": samples, "scores": scores} def _greedy_infer(self, features, decode_length): """A greedy inference method. @@ -535,9 +553,14 @@ def _greedy_infer(self, features, decode_length): decode_length: an integer. How many additional timesteps to decode. Returns: - samples: an integer `Tensor`. - logits: `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. - losses: a dictionary: {loss-name (string): floating point `Scalar`} + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": None + "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + "losses": a dictionary: {loss-name (string): floating point `Scalar`} + } """ return self._slow_greedy_infer(features, decode_length) @@ -551,9 +574,14 @@ def _slow_greedy_infer(self, features, decode_length): decode_length: an integer. How many additional timesteps to decode. Returns: - samples: an integer `Tensor`. - logits: `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. - losses: a dictionary: {loss-name (string): floating point `Scalar`} + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": None + "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + "losses": a dictionary: {loss-name (string): floating point `Scalar`} + } """ if not features: features = {} @@ -673,7 +701,12 @@ def fn_not_eos(): features["partial_targets"])[1] result = tf.slice(result, [0, partial_target_length, 0, 0], [-1, -1, -1, -1]) - return result, logits, losses + return { + "outputs": result, + "scores": None, + "logits": logits, + "losses": losses, + } def sample(self, features): """Run the model and extract samples. @@ -714,9 +747,10 @@ def _shard_features(self, features): # pylint: disable=missing-docstring v_shape = [1] if v_shape == [1]: v = tf.tile(v, [self._num_datashards]) - sharded_features[k] = self._data_parallelism( - tf.identity, - tf.split(v, self._num_datashards, 0)) + sharded_features[k] = self._data_parallelism(tf.identity, + tf.split( + v, self._num_datashards, + 0)) return sharded_features def _to_features_per_datashard(self, features): @@ -782,14 +816,16 @@ def estimator_model_fn(cls, """ _create_dummy_vars() hparams = copy.deepcopy(hparams) - hparams.use_tpu = use_tpu # Instantiate model data_parallelism = None if not use_tpu and config: data_parallelism = config.data_parallelism - model = cls(hparams, mode, data_parallelism=data_parallelism, - decode_hparams=decode_hparams) + model = cls( + hparams, + mode, + data_parallelism=data_parallelism, + decode_hparams=decode_hparams) # PREDICT mode if mode == tf.estimator.ModeKeys.PREDICT: @@ -827,18 +863,16 @@ def estimator_model_fn(cls, # TRAIN mode assert mode == tf.estimator.ModeKeys.TRAIN - num_async_replicas = ( - 1 if (use_tpu or not config) - else config.t2t_device_info["num_async_replicas"]) - return model.estimator_spec_train(loss, - num_async_replicas=num_async_replicas) + num_async_replicas = (1 if (use_tpu or not config) else + config.t2t_device_info["num_async_replicas"]) + return model.estimator_spec_train( + loss, num_async_replicas=num_async_replicas) def estimator_spec_train(self, loss, num_async_replicas=1): """Construct EstimatorSpec for TRAIN mode.""" - use_tpu = self.hparams.use_tpu train_op = self.optimize(loss, num_async_replicas=num_async_replicas) - if use_tpu: + if common_layers.is_on_tpu(): _remove_summaries() # summaries not currently working on TPU return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) @@ -846,25 +880,21 @@ def estimator_spec_train(self, loss, num_async_replicas=1): return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) - def estimator_spec_eval(self, - features, - logits, - labels, - loss): + def estimator_spec_eval(self, features, logits, labels, loss): """Construct EstimatorSpec for EVAL mode.""" hparams = self.hparams - use_tpu = hparams.use_tpu if not hasattr(hparams, "problem_instances"): raise NotImplementedError(_no_problem_err("estimator_spec_eval")) problem = hparams.problem_instances[0] - if use_tpu: + if common_layers.is_on_tpu(): eval_metrics_fn = _create_tpu_eval_metrics_fn(problem, hparams) _remove_summaries() return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, - eval_metrics=(eval_metrics_fn, [logits, labels]), loss=loss) + eval_metrics=(eval_metrics_fn, [logits, labels]), + loss=loss) else: eval_metrics_fns = metrics.create_evaluation_metrics([problem], hparams) eval_metrics = {} @@ -883,20 +913,20 @@ def estimator_spec_predict(self, features): infer_out = self.infer( features, beam_size=decode_hparams.beam_size, - top_beams=( - decode_hparams.beam_size if decode_hparams.return_beams else 1), + top_beams=(decode_hparams.beam_size + if decode_hparams.return_beams else 1), alpha=decode_hparams.alpha, decode_length=decode_hparams.extra_length) if isinstance(infer_out, dict): - # Beam searching outputs = infer_out["outputs"] scores = infer_out["scores"] else: outputs = infer_out scores = None - batched_problem_choice = (features["problem_choice"] * tf.ones( - (common_layers.shape_list(features["inputs"])[0],), dtype=tf.int32)) + batched_problem_choice = ( + features["problem_choice"] * tf.ones( + (common_layers.shape_list(features["inputs"])[0],), dtype=tf.int32)) predictions = { "outputs": outputs, "scores": scores, @@ -1017,13 +1047,6 @@ def _remove_summaries(): assert not g.get_collection(key) -def _clip_gradients_by_norm(grads_and_vars, clip_gradients): - """Clips gradients by global norm.""" - gradients, variables = zip(*grads_and_vars) - clipped_gradients, _ = tf.clip_by_global_norm(gradients, clip_gradients) - return list(zip(clipped_gradients, variables)) - - def _del_dict_nones(d): for k in list(d.keys()): if d[k] is None: diff --git a/tensor2tensor/utils/trainer_lib.py b/tensor2tensor/utils/trainer_lib.py index abf01d428..039b06e68 100644 --- a/tensor2tensor/utils/trainer_lib.py +++ b/tensor2tensor/utils/trainer_lib.py @@ -47,10 +47,7 @@ def create_session_config(log_device_placement=False, else: if enable_graph_rewriter: rewrite_options = rewriter_config_pb2.RewriterConfig() - rewrite_options.optimizers.append("pruning") - rewrite_options.optimizers.append("constfold") - rewrite_options.optimizers.append("arithmetic") - rewrite_options.optimizers.append("layout") + rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON graph_options = tf.GraphOptions(rewrite_options=rewrite_options) else: graph_options = tf.GraphOptions( @@ -87,7 +84,7 @@ def create_run_config(master="", num_shards=8, log_device_placement=False, save_checkpoints_steps=1000, - save_checkpoints_secs=0, + save_checkpoints_secs=None, keep_checkpoint_max=20, keep_checkpoint_every_n_hours=10000, num_gpus=1, @@ -114,19 +111,19 @@ def create_run_config(master="", enable_graph_rewriter=enable_graph_rewriter, gpu_mem_fraction=gpu_mem_fraction, use_tpu=use_tpu) - session_config = tf.ConfigProto( - allow_soft_placement=True, log_device_placement=log_device_placement) run_config_args = { "master": master, "model_dir": model_dir, "session_config": session_config, "save_summary_steps": 100, "save_checkpoints_steps": save_checkpoints_steps, - "save_checkpoints_secs": save_checkpoints_secs, "keep_checkpoint_max": keep_checkpoint_max, "keep_checkpoint_every_n_hours": keep_checkpoint_every_n_hours, "tf_random_seed": random_seed, } + if save_checkpoints_secs: + del run_config_args["save_checkpoints_steps"] + run_config_args["save_checkpoints_secs"] = save_checkpoints_secs run_config_cls = tf.contrib.learn.RunConfig # If using TPU, use TPU RunConfig, add TPUConfig, and add additional args @@ -135,7 +132,7 @@ def create_run_config(master="", tpu_config = tf.contrib.tpu.TPUConfig( iterations_per_loop=iterations_per_loop, num_shards=num_shards, - per_host_input_for_training=(num_shards <= 8), + per_host_input_for_training=True, initial_infeed_sleep_secs=tpu_infeed_sleep_secs) run_config_args["tpu_config"] = tpu_config @@ -175,8 +172,9 @@ def create_estimator(model_name, model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu) if use_tpu: - batch_size = hparams.tpu_batch_size_per_shard - batch_size *= run_config.tpu_config.num_shards + problem = hparams.problem_instances[0] + batch_size = (problem.tpu_batch_size_per_shard(hparams) * + run_config.tpu_config.num_shards) return tf.contrib.tpu.TPUEstimator( model_fn=model_fn, model_dir=run_config.model_dir, @@ -203,16 +201,19 @@ def create_hooks(use_tfdbg=False, use_dbgprofile=False, dbgprofile_kwargs=None, if use_dbgprofile: # Recorded traces can be visualized with chrome://tracing/ # The memory/tensor lifetime is also profiled + tf.logging.info("Using ProfilerHook") defaults = dict(save_steps=10, show_dataflow=True, show_memory=True) defaults.update(dbgprofile_kwargs) train_monitors.append(tf.contrib.hooks.ProfilerHook(**defaults)) if use_validation_monitor: + tf.logging.info("Using ValidationMonitor") train_monitors.append( tf.contrib.learn.monitors.ValidationMonitor( hooks=eval_hooks, **validation_monitor_kwargs)) if use_early_stopping: + tf.logging.info("Using EarlyStoppingHook") hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs) # Adding to both training and eval so that eval aborts as well train_monitors.append(hook) @@ -243,6 +244,7 @@ def create_experiment(run_config, # HParams hparams.add_hparam("data_dir", data_dir) hparams.add_hparam("train_steps", train_steps) + hparams.add_hparam("eval_steps", eval_steps) add_problem_hparams(hparams, problem_name) # Estimator @@ -286,10 +288,13 @@ def create_experiment(run_config, every_n_steps=min_eval_frequency) # In-process eval (and possible early stopping) - local_schedules = ["train_and_evaluate", "continuous_train_and_eval"] + if schedule == "continuous_train_and_eval" and min_eval_frequency: + tf.logging.warn("ValidationMonitor only works with " + "--schedule=train_and_evaluate") use_validation_monitor = ( - schedule in local_schedules and min_eval_frequency) + schedule == "train_and_evaluate" and min_eval_frequency) # Distributed early stopping + local_schedules = ["train_and_evaluate", "continuous_train_and_eval"] use_early_stopping = ( schedule not in local_schedules and eval_early_stopping_steps) train_monitors, eval_hooks = create_hooks( diff --git a/tensor2tensor/visualization/TransformerVisualization.ipynb b/tensor2tensor/visualization/TransformerVisualization.ipynb index bec758327..91ae49ea1 100644 --- a/tensor2tensor/visualization/TransformerVisualization.ipynb +++ b/tensor2tensor/visualization/TransformerVisualization.ipynb @@ -15,33 +15,25 @@ { "cell_type": "code", "execution_count": 1, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ - "from __future__ import absolute_import\n", - "from __future__ import division\n", - "from __future__ import print_function\n", - "\n", - "import json\n", + "import os\n", "\n", "import tensorflow as tf\n", - "import numpy as np\n", "\n", - "from tensor2tensor.utils import t2t_model\n", - "from tensor2tensor.utils import decoding\n", - "from tensor2tensor.utils import devices\n", + "from tensor2tensor import problems\n", + "from tensor2tensor.bin import t2t_decoder # To register the hparams set\n", + "from tensor2tensor.utils import registry\n", "from tensor2tensor.utils import trainer_lib\n", - "from tensor2tensor.visualization import attention\n" + "from tensor2tensor.visualization import attention\n", + "from tensor2tensor.visualization import visualization" ] }, { "cell_type": "code", "execution_count": 2, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [ { "data": { @@ -73,376 +65,118 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Data" + "## HParams" ] }, { "cell_type": "code", "execution_count": 3, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/usr/local/google/home/llion/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ - "import os\n", "# PUT THE MODEL YOU WANT TO LOAD HERE!\n", - "\n", - "PROBLEM = 'translate_ende_wmt32k'\n", - "MODEL = 'transformer'\n", - "HPARAMS = 'transformer_base_single_gpu'\n", - "\n", - "DATA_DIR=os.path.expanduser('~/t2t_data')\n", - "TRAIN_DIR=os.path.expanduser('~/t2t_train/%s/%s-%s' % (PROBLEM, MODEL, HPARAMS))\n", - "print(TRAIN_DIR)\n", - "\n", - "FLAGS = tf.flags.FLAGS\n", - "FLAGS.problems = PROBLEM\n", - "FLAGS.hparams_set = HPARAMS\n", - "FLAGS.data_dir = DATA_DIR\n", - "FLAGS.model = MODEL\n", - "\n", - "FLAGS.schedule = 'train_and_evaluate'" + "CHECKPOINT = os.path.expanduser('~/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu')" ] }, { "cell_type": "code", "execution_count": 4, - "metadata": { - "collapsed": false, - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:datashard_devices: ['gpu:0']\n", - "INFO:tensorflow:caching_devices: None\n", - "INFO:tensorflow:batching_scheme = {'min_length': 0, 'window_size': 720, 'shuffle_queue_size': 270, 'boundaries': [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 24, 26, 28, 30, 33, 36, 39, 42, 46, 50, 55, 60, 66, 72, 79, 86, 94, 103, 113, 124, 136, 149, 163, 179, 196, 215, 236], 'max_length': 1000000000, 'batch_sizes': [240, 180, 180, 180, 144, 144, 144, 120, 120, 120, 90, 90, 90, 90, 80, 72, 72, 60, 60, 48, 48, 48, 40, 40, 36, 30, 30, 24, 24, 20, 20, 18, 18, 16, 15, 12, 12, 10, 10, 9, 8, 8]}\n", - "INFO:tensorflow:Updated batching_scheme = {'min_length': 0, 'window_size': 720, 'shuffle_queue_size': 270, 'boundaries': [], 'max_length': 1000000000, 'batch_sizes': [1]}\n", - "INFO:tensorflow:Reading data files from /usr/local/google/home/llion/t2t_data/translate_ende_wmt32k-dev*\n" - ] - } - ], - "source": [ - "hparams = trainer_lib.create_hparams(FLAGS.hparams_set, data_dir=FLAGS.data_dir, problem_name=PROBLEM)\n", - "hparams.use_fixed_batch_size = True\n", - "hparams.batch_size = 1\n", - "\n", - "# SET EXTRA HYPER PARAMS HERE!\n", - "#hparams.null_slot = True\n", - "\n", - "mode = tf.estimator.ModeKeys.EVAL\n", - "\n", - "problem = hparams.problem_instances[0]\n", - "inputs, target = problem.input_fn(\n", - " mode=mode,\n", - " hparams=hparams,\n", - " data_dir=DATA_DIR)\n", - "\n", - "features = inputs\n", - "features['targets'] = target" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ - "def encode(string):\n", - " subtokenizer = hparams.problems[0].vocabulary['inputs']\n", - " return [subtokenizer.encode(string) + [1] + [0]]\n", - "\n", - "def decode(ids):\n", - " return hparams.problems[0].vocabulary['targets'].decode(np.squeeze(ids))\n", - "\n", - "def to_tokens(ids):\n", - " ids = np.squeeze(ids)\n", - " subtokenizer = hparams.problems[0].vocabulary['targets']\n", - " tokens = []\n", - " for _id in ids:\n", - " if _id == 0:\n", - " tokens.append('')\n", - " elif _id == 1:\n", - " tokens.append('')\n", - " else:\n", - " tokens.append(subtokenizer._subtoken_id_to_subtoken_string(_id))\n", - " return tokens" + "# HParams\n", + "problem_name = 'translate_ende_wmt32k'\n", + "data_dir = os.path.expanduser('~/t2t_data/')\n", + "model_name = \"transformer\"\n", + "hparams_set = \"transformer_base_single_gpu\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Model" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:datashard_devices: ['gpu:0']\n", - "INFO:tensorflow:caching_devices: None\n", - "INFO:tensorflow:Doing model_fn_body took 1.881 sec.\n", - "INFO:tensorflow:This model_fn took 2.023 sec.\n" - ] - } - ], - "source": [ - "decode_hparams = decoding.decode_hparams(FLAGS.decode_hparams)\n", - "model_fn = t2t_model.T2TModel.make_estimator_model_fn(\n", - " MODEL,\n", - " hparams,\n", - " decode_hparams=decode_hparams)\n", - "est_spec = model_fn(features, target, mode)" + "## Visualization" ] }, { "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:datashard_devices: ['gpu:0']\n", - "INFO:tensorflow:caching_devices: None\n", - "INFO:tensorflow:Beam Decoding with beam size 4\n", - "INFO:tensorflow:Doing model_fn_body took 1.393 sec.\n", - "INFO:tensorflow:This model_fn took 1.504 sec.\n" - ] - } - ], - "source": [ - "with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n", - " beam_out = model_fn(features, target, tf.contrib.learn.ModeKeys.INFER)" - ] - }, - { - "cell_type": "markdown", + "execution_count": 5, "metadata": {}, - "source": [ - "# Session" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "collapsed": false - }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "INFO:tensorflow:Restoring parameters from /usr/local/google/home/llion/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu/model.ckpt-1\n", - "INFO:tensorflow:Starting standard services.\n", - "INFO:tensorflow:Starting queue runners.\n", - "INFO:tensorflow:Saving checkpoint to path /usr/local/google/home/llion/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu/model.ckpt\n" + "INFO:tensorflow:Setting T2TModel mode to 'eval'\n", + "INFO:tensorflow:Setting hparams.layer_prepostprocess_dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.symbol_dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.attention_dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.relu_dropout to 0.0\n", + "INFO:tensorflow:Using variable initializer: uniform_unit_scaling\n", + "INFO:tensorflow:Transforming feature 'inputs' with symbol_modality_33708_512.bottom\n", + "INFO:tensorflow:Transforming 'targets' with symbol_modality_33708_512.targets_bottom\n", + "INFO:tensorflow:Building model body\n", + "WARNING:tensorflow:From /tmp/t2t/tensor2tensor/layers/common_layers.py:512: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "keep_dims is deprecated, use keepdims instead\n", + "INFO:tensorflow:Transforming body output with symbol_modality_33708_512.top\n", + "WARNING:tensorflow:From /tmp/t2t/tensor2tensor/layers/common_layers.py:1707: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "\n", + "Future major versions of TensorFlow will allow gradients to flow\n", + "into the labels input on backprop by default.\n", + "\n", + "See tf.nn.softmax_cross_entropy_with_logits_v2.\n", + "\n", + "INFO:tensorflow:Greedy Decoding\n" ] - }, - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ - "sv = tf.train.Supervisor(\n", - " logdir=TRAIN_DIR,\n", - " global_step=tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step'))\n", - "sess = sv.PrepareSession(config=tf.ConfigProto(allow_soft_placement=True))\n", - "sv.StartQueueRunners(\n", - " sess,\n", - " tf.get_default_graph().get_collection(tf.GraphKeys.QUEUE_RUNNERS))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Visualization" + "visualizer = visualization.AttentionVisualizer(hparams_set, model_name, data_dir, problem_name, beam_size=1)" ] }, { "cell_type": "code", - "execution_count": 9, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Get the attention tensors from the graph.\n", - "# This need to be done using the training graph since the inference uses a tf.while_loop\n", - "# and you cant fetch tensors from inside a while_loop.\n", - "\n", - "enc_atts = []\n", - "dec_atts = []\n", - "encdec_atts = []\n", - "\n", - "for i in range(hparams.num_hidden_layers):\n", - " enc_att = tf.get_default_graph().get_operation_by_name(\n", - " \"body/model/parallel_0/body/encoder/layer_%i/self_attention/multihead_attention/dot_product_attention/attention_weights\" % i).values()[0]\n", - " dec_att = tf.get_default_graph().get_operation_by_name(\n", - " \"body/model/parallel_0/body/decoder/layer_%i/self_attention/multihead_attention/dot_product_attention/attention_weights\" % i).values()[0]\n", - " encdec_att = tf.get_default_graph().get_operation_by_name(\n", - " \"body/model/parallel_0/body/decoder/layer_%i/encdec_attention/multihead_attention/dot_product_attention/attention_weights\" % i).values()[0]\n", - "\n", - " enc_atts.append(enc_att)\n", - " dec_atts.append(dec_att)\n", - " encdec_atts.append(encdec_att)" - ] - }, - { - "cell_type": "markdown", + "execution_count": 6, "metadata": {}, - "source": [ - "## Test translation from the dataset" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "collapsed": false, - "scrolled": false - }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "INFO:tensorflow:global_step/sec: 0\n", - "Input: For example, during the 2008 general election in Florida, 33% of early voters were African-Americans, who accounted however for only 13% of voters in the State.\n", - "Gold: Beispielsweise waren bei den allgemeinen Wahlen 2008 in Florida 33% der Wähler, die im Voraus gewählt haben, Afro-Amerikaner, obwohl sie nur 13% der Wähler des Bundesstaates ausmachen.\n", - "Gold out: So waren 33 den allgemeinen Wahlen im in der a 33 % der Frühjungdie nur Land die wurden, die ro- Amerikaner, die sie nur 13 % der Wähler im Staates staats betra.\n", - "INFO:tensorflow:Recording summary at step 250000.\n" + "INFO:tensorflow:Create CheckpointSaverHook.\n", + "INFO:tensorflow:Restoring parameters from /usr/local/google/home/llion/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu/model.ckpt-1\n" ] } ], "source": [ - "inp, out, logits = sess.run([inputs['inputs'], target, est_spec.predictions['predictions']])\n", + "tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')\n", "\n", - "print(\"Input: \", decode(inp[0]))\n", - "print(\"Gold: \", decode(out[0]))\n", - "logits = np.squeeze(logits[0])\n", - "tokens = np.argmax(logits, axis=1)\n", - "print(\"Gold out: \", decode(tokens))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Visualize Custom Sentence" + "sess = tf.train.MonitoredTrainingSession(\n", + " checkpoint_dir=CHECKPOINT,\n", + " save_summaries_secs=0,\n", + ")" ] }, { "cell_type": "code", - "execution_count": 11, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "eng = \"I have three dogs.\"" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "collapsed": false - }, + "execution_count": 7, + "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Ich habe drei Hunde.\n" + "INFO:tensorflow:Saving checkpoints for 1 into /usr/local/google/home/llion/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu/model.ckpt.\n" ] } ], "source": [ - "inp_ids = encode(eng)\n", - "beam_decode = sess.run(beam_out.predictions['outputs'], {\n", - " inputs['inputs']: np.expand_dims(np.expand_dims(inp_ids, axis=2), axis=3),\n", - "})\n", - "trans = decode(beam_decode[0])\n", - "print(trans)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "output_ids = beam_decode\n", - "\n", - "# Get attentions\n", - "np_enc_atts, np_dec_atts, np_encdec_atts = sess.run([enc_atts, dec_atts, encdec_atts], {\n", - " inputs['inputs']: np.expand_dims(np.expand_dims(inp_ids, axis=2), axis=3),\n", - " target: np.expand_dims(np.expand_dims(output_ids, axis=2), axis=3),\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "application/javascript": [ - "IPython.OutputArea.prototype._should_scroll = function(lines) {\n", - " return false;\n", - "}" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "%%javascript\n", - "IPython.OutputArea.prototype._should_scroll = function(lines) {\n", - " return false;\n", - "}" + "input_sentence = \"I have two dogs.\"\n", + "output_string, inp_text, out_text, att_mats = visualizer.get_vis_data_from_string(sess, input_sentence)\n", + "print(output_string)" ] }, { @@ -462,22 +196,16 @@ " - Double clicking a color will hide all other colors, double clicking on a color when it’s the only head showing will show all the heads again.\n", "- You can hover over a word to see the individual attention weights for just that position.\n", " - Hovering over the words on the left will show what that position attended to.\n", - " - Hovering over the words on the right will show what positions attended to it.\n" + " - Hovering over the words on the right will show what positions attended to it." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true, - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ - "inp_text = to_tokens(inp_ids)\n", - "out_text = to_tokens(output_ids)\n", - "\n", - "attention.show(inp_text, out_text, np_enc_atts, np_dec_atts, np_encdec_atts)" + "attention.show(inp_text, out_text, *att_mats)" ] } ], diff --git a/tensor2tensor/visualization/visualization.py b/tensor2tensor/visualization/visualization.py new file mode 100644 index 000000000..e92ca518c --- /dev/null +++ b/tensor2tensor/visualization/visualization.py @@ -0,0 +1,202 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared code for visualizing transformer attentions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +import numpy as np + +# To register the hparams set +from tensor2tensor import models # pylint: disable=unused-import +from tensor2tensor import problems +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib + +import tensorflow as tf + +EOS_ID = 1 + + +class AttentionVisualizer(object): + """Helper object for creating Attention visualizations.""" + + def __init__( + self, hparams_set, model_name, data_dir, problem_name, beam_size=1): + inputs, targets, samples, att_mats = build_model( + hparams_set, model_name, data_dir, problem_name, beam_size=beam_size) + + # Fetch the problem + ende_problem = problems.problem(problem_name) + encoders = ende_problem.feature_encoders(data_dir) + + self.inputs = inputs + self.targets = targets + self.att_mats = att_mats + self.samples = samples + self.encoders = encoders + + def encode(self, input_str): + """Input str to features dict, ready for inference.""" + inputs = self.encoders['inputs'].encode(input_str) + [EOS_ID] + batch_inputs = np.reshape(inputs, [1, -1, 1, 1]) # Make it 3D. + return batch_inputs + + def decode(self, integers): + """List of ints to str.""" + integers = list(np.squeeze(integers)) + return self.encoders['inputs'].decode(integers) + + def decode_list(self, integers): + """List of ints to list of str.""" + integers = list(np.squeeze(integers)) + return self.encoders['inputs'].decode_list(integers) + + def get_vis_data_from_string(self, sess, input_string): + """Constructs the data needed for visualizing attentions. + + Args: + sess: A tf.Session object. + input_string: The input setence to be translated and visulized. + + Returns: + Tuple of ( + output_string: The translated sentence. + input_list: Tokenized input sentence. + output_list: Tokenized translation. + att_mats: Tuple of attention matrices; ( + enc_atts: Encoder self attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, inp_len, inp_len) + dec_atts: Decoder self attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, out_len) + encdec_atts: Encoder-Decoder attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, inp_len) + ) + """ + encoded_inputs = self.encode(input_string) + + # Run inference graph to get the translation. + out = sess.run(self.samples, { + self.inputs: encoded_inputs, + }) + + # Run the decoded translation through the training graph to get the + # attention tensors. + att_mats = sess.run(self.att_mats, { + self.inputs: encoded_inputs, + self.targets: np.reshape(out, [1, -1, 1, 1]), + }) + + output_string = self.decode(out) + input_list = self.decode_list(encoded_inputs) + output_list = self.decode_list(out) + + return output_string, input_list, output_list, att_mats + + +def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1): + """Build the graph required to featch the attention weights. + + Args: + hparams_set: HParams set to build the model with. + model_name: Name of model. + data_dir: Path to directory contatining training data. + problem_name: Name of problem. + beam_size: (Optional) Number of beams to use when decoding a traslation. + If set to 1 (default) then greedy decoding is used. + + Returns: + Tuple of ( + inputs: Input placeholder to feed in ids to be translated. + targets: Targets placeholder to feed to translation when fetching + attention weights. + samples: Tensor representing the ids of the translation. + att_mats: Tensors representing the attention weights. + ) + """ + hparams = trainer_lib.create_hparams( + hparams_set, data_dir=data_dir, problem_name=problem_name) + translate_model = registry.model(model_name)( + hparams, tf.estimator.ModeKeys.EVAL) + + inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name='inputs') + targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name='targets') + translate_model({ + 'inputs': inputs, + 'targets': targets, + }) + + # Must be called after building the training graph, so that the dict will + # have been filled with the attention tensors. BUT before creating the + # interence graph otherwise the dict will be filled with tensors from + # inside a tf.while_loop from decoding and are marked unfetchable. + att_mats = get_att_mats(translate_model) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + samples = translate_model.infer({ + 'inputs': inputs, + }, beam_size=beam_size)['outputs'] + + return inputs, targets, samples, att_mats + + +def get_att_mats(translate_model): + """Get's the tensors representing the attentions from a build model. + + The attentions are stored in a dict on the Transformer object while building + the graph. + + Args: + translate_model: Transformer object to fetch the attention weights from. + + Returns: + Tuple of attention matrices; ( + enc_atts: Encoder self attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, inp_len, inp_len) + dec_atts: Decoder self attetnion weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, out_len) + encdec_atts: Encoder-Decoder attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, inp_len) + ) + """ + enc_atts = [] + dec_atts = [] + encdec_atts = [] + + prefix = 'transformer/body/' + postfix = '/multihead_attention/dot_product_attention' + + for i in range(translate_model.hparams.num_hidden_layers): + enc_att = translate_model.attention_weights[ + '%sencoder/layer_%i/self_attention%s' % (prefix, i, postfix)] + dec_att = translate_model.attention_weights[ + '%sdecoder/layer_%i/self_attention%s' % (prefix, i, postfix)] + encdec_att = translate_model.attention_weights[ + '%sdecoder/layer_%i/encdec_attention%s' % (prefix, i, postfix)] + enc_atts.append(enc_att) + dec_atts.append(dec_att) + encdec_atts.append(encdec_att) + + return enc_atts, dec_atts, encdec_atts diff --git a/tensor2tensor/visualization/visualization_test.py b/tensor2tensor/visualization/visualization_test.py new file mode 100644 index 000000000..b4d6b96f6 --- /dev/null +++ b/tensor2tensor/visualization/visualization_test.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# Copyright 2017 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for visualization library. + + IF ANY OF THESE TESTS BREAK PLEASE UPDATE THE CODE IN THE VIZ NOTEBOOK +****************************************************************************** + +Any fixes you have to make to this test or visualization.py to fix this test +might have to be reflected in the visualization notebook, for example if the +name of the hparams_set changes. + +If you need help testing the changes please contact llion@. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.utils import trainer_lib +from tensor2tensor.visualization import visualization +import tensorflow as tf + + +def get_data_dir(): + pkg, _ = os.path.split(__file__) + pkg, _ = os.path.split(pkg) + return os.path.join(pkg, 'test_data') + + +problem_name = 'translate_ende_wmt32k' +model_name = 'transformer' +hparams_set = 'transformer_base_single_gpu' + + +class VisualizationTest(tf.test.TestCase): + + def setUp(self): + super(VisualizationTest, self).setUp() + self.data_dir = get_data_dir() + + def test_build_model_greedy(self): + inputs, targets, outputs, _ = visualization.build_model( + hparams_set, model_name, self.data_dir, problem_name, beam_size=1) + + self.assertAllEqual((1, None, 1, 1), inputs.shape.as_list()) + self.assertAllEqual((1, None, 1, 1), targets.shape.as_list()) + self.assertAllEqual((None, None), outputs.shape.as_list()) + + def test_build_model_beam(self): + inputs, targets, outputs, _ = visualization.build_model( + hparams_set, model_name, self.data_dir, problem_name, beam_size=8) + + self.assertAllEqual((1, None, 1, 1), inputs.shape.as_list()) + self.assertAllEqual((1, None, 1, 1), targets.shape.as_list()) + self.assertAllEqual((None, None), outputs.shape.as_list()) + + def test_get_vis_data_from_string(self): + visualizer = visualization.AttentionVisualizer( + hparams_set, model_name, self.data_dir, problem_name, beam_size=8) + + input_sentence = 'I have two dogs.' + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + _, inp_text, out_text, att_mats = ( + visualizer.get_vis_data_from_string(sess, input_sentence)) + + self.assertAllEqual( + [u'I_', u'have_', u'two_', u'dogs_', u'._', u''], inp_text) + + hparams = trainer_lib.create_hparams( + hparams_set, data_dir=self.data_dir, problem_name=problem_name) + + enc_atts, dec_atts, encdec_atts = att_mats + + self.assertAllEqual(hparams.num_hidden_layers, len(enc_atts)) + + enc_atts = enc_atts[0] + dec_atts = dec_atts[0] + encdec_atts = encdec_atts[0] + + batch_size = 1 + num_heads = hparams.num_heads + inp_len = len(inp_text) + out_len = len(out_text) + + self.assertAllEqual( + (batch_size, num_heads, inp_len, inp_len), enc_atts.shape) + self.assertAllEqual( + (batch_size, num_heads, out_len, out_len), dec_atts.shape) + self.assertAllEqual( + (batch_size, num_heads, out_len, inp_len), encdec_atts.shape) + +if __name__ == '__main__': + tf.test.main()