public List_adp Load( Xomp_mgr_db mgr_db, String machine_name, List_adp list, int list_idx, int list_len) { List_adp rv = List_adp_.New(); // add remaining pages from old pool to new_pool; for (int i = list_idx; i < list_len; ++i) { rv.Add((Xomp_page_itm) list.Get_at(i)); } // load pages into new pool Xomp_lock_mgr lock_mgr = mgr_db.Lock_mgr(); int uid_db = lock_mgr.Uid_prv__get(machine_name); if (uid_db == Xomp_lock_mgr__fsys.Uid__eos) return rv; // assert that uids must be incrementally larger; handle one machine reaching end, // and putting -1 in queue; int uid_new = 0; try { uid_new = this.Load_from_db(rv, uid_db); } finally { lock_mgr.Uid_prv__rls(machine_name, uid_new); } if (show_msg__fetched_pool) Gfo_usr_dlg_.Instance.Note_many( "", "", "fetched new pool: old=~{0} new=~{1}", uid_db, uid_new); return rv; }
private int Load_from_db(List_adp list, int uid_prv) { // prepare for page_tbl String sql = String_.Format( String_.Concat_lines_nl_skip_last // ANSI.Y ( "SELECT mp.xomp_uid", ", pp.page_id", ", pp.page_namespace", ", pp.page_title", ", pp.page_text_db_id", "FROM xomp_page mp", " JOIN <page_db>page pp ON mp.page_id = pp.page_id", "WHERE mp.xomp_uid > {0}", "AND mp.page_status = 0", "LIMIT {1}"), uid_prv, num_pages_per_load); this.attach_mgr.Conn_links_( new Db_attach_itm("page_db", wiki.Data__core_mgr().Db__core().Conn())); sql = attach_mgr.Resolve_sql(sql); // run page_tbl int rv = -1; Xomp_text_db_loader text_db_loader = new Xomp_text_db_loader(wiki); attach_mgr.Attach(); Db_rdr rdr = make_conn.Stmt_sql(sql).Exec_select__rls_auto(); try { while (rdr.Move_next()) { rv = rdr.Read_int("xomp_uid"); int text_db_id = rdr.Read_int("page_text_db_id"); Xomp_page_itm ppg = new Xomp_page_itm(rdr.Read_int("page_id")); ppg.Init_by_page( rdr.Read_int("page_namespace"), rdr.Read_bry_by_str("page_title"), text_db_id); list.Add(ppg); text_db_loader.Add(text_db_id, ppg); } } finally { rdr.Rls(); } attach_mgr.Detach(); text_db_loader.Load(); return rv; }
public Xobc_import_step_itm[] Select_by_task_id(int task_id) { List_adp list = List_adp_.New(); Db_rdr rdr = conn.Stmt_sql( Db_sql_.Make_by_fmt( String_.Ary( "SELECT s.*", "FROM import_step s", " JOIN step_map sm ON s.step_id = sm.step_id", "WHERE sm.task_id = {0}"), task_id)) .Exec_select__rls_auto(); try { while (rdr.Move_next()) { list.Add(New_itm(rdr)); } } finally { rdr.Rls(); } return (Xobc_import_step_itm[]) list.To_ary_and_clear(Xobc_import_step_itm.class); }
private byte Select_fsdb_itms(List_adp list) { list.Clear(); boolean pages_found = false, links_found = false; DataRdr rdr = Xob_xfer_regy_tbl.Select_by_tier_page(bldr_conn, tier_id_val, page_id_val, select_interval); try { while (rdr.MoveNextPeer()) { pages_found = true; // at least one page found; set true Xodb_tbl_oimg_xfer_itm itm = Xodb_tbl_oimg_xfer_itm.new_rdr_(rdr); if (itm.Lnki_page_id() == page_id_val // same page_id && itm.Lnki_id() <= lnki_id_val // ... but lnki_id < last ) continue; // ... ignore; note that select is by page_id, not page_id + link_id; needed // else restarts would not resume exactly at same point; links_found = true; list.Add(itm); } } finally { rdr.Rls(); } if (pages_found && !links_found) return Select_rv_next_page; // pages found, but all links processed else if (!pages_found) return Select_rv_stop; // no more pages found else return Select_rv_process; // pages and links found }