protected boolean readTAssignFile(String tassignFile) {
    try {
      int i, j;
      BufferedReader reader =
          new BufferedReader(new InputStreamReader(new FileInputStream(tassignFile), "UTF-8"));

      String line;
      //            z = new Vector[M];
      //            z = new ArrayList[M];
      z = new short[M][];
      data = new LDADataset(M);
      data.V = V;
      for (i = 0; i < M; i++) {
        line = reader.readLine();
        StringTokenizer tknr = new StringTokenizer(line, " \t\r\n");

        int length = tknr.countTokens();

        //                Vector<Integer> words = new Vector<Integer>();
        //                Vector<Integer> topics = new Vector<Integer>();
        ArrayList<Integer> words = new ArrayList<>();
        ArrayList<Integer> topics = new ArrayList<>();

        for (j = 0; j < length; j++) {
          String token = tknr.nextToken();

          StringTokenizer tknr2 = new StringTokenizer(token, ":");
          if (tknr2.countTokens() != 2) {
            System.out.println("Invalid word-topic assignment line\n");
            return false;
          }

          words.add(Integer.parseInt(tknr2.nextToken()));
          topics.add(Integer.parseInt(tknr2.nextToken()));
        } // end for each topic assignment

        // allocate and add new document to the corpus
        Document doc = new Document(words);
        data.setDoc(doc, i);

        // assign values for z
        //                z[i] = new Vector<Integer>();
        //                z[i] = new ArrayList<>(topics.size()); // Note: Modify: init arraylist
        // size.
        z[i] = new short[topics.size()];
        for (j = 0; j < topics.size(); j++) {
          //                    z[i].add(topics.get(j));
          z[i][j] = (short) (int) topics.get(j);
        }
      } // end for each doc

      reader.close();
    } catch (Exception e) {
      System.out.println("Error while loading model: " + e.getMessage());
      e.printStackTrace();
      return false;
    }
    return true;
  }