diff --git a/nevergrad/benchmark/test_experiments.py b/nevergrad/benchmark/test_experiments.py index b9c475a223..b75a63adca 100644 --- a/nevergrad/benchmark/test_experiments.py +++ b/nevergrad/benchmark/test_experiments.py @@ -29,9 +29,11 @@ @testing.parametrized(**{name: (name, maker) for name, maker in experiments.registry.items()}) def test_experiments_registry(name: str, maker: tp.Callable[[], tp.Iterator[experiments.Experiment]]) -> None: # "mav" is not availablefor now. - if name == "conformant_planning" or name == "neuro_planning": + if name == "conformant_planning" or name == "neuro_planning" or "compiler" in name: raise SkipTest("This is user parametric and can not be tested.") - + if "emulator" in name: + raise SkipTest("Emulators not included in CI.") + # Our PGAN is not well accepted by circleci. if "_pgan" in name and os.environ.get("CIRCLECI", False): raise SkipTest("Too slow in CircleCI") diff --git a/nevergrad/optimization/differentialevolution.py b/nevergrad/optimization/differentialevolution.py index 86586b3cc1..b9e8ab6c30 100644 --- a/nevergrad/optimization/differentialevolution.py +++ b/nevergrad/optimization/differentialevolution.py @@ -101,7 +101,8 @@ def __init__( if isinstance(self._config.popsize, int): self.llambda = self._config.popsize else: - self.llambda = max(30, self.num_workers, pop_choice[self._config.popsize]) + self.llambda = max(30, pop_choice[self._config.popsize]) + self.llambda = max(self.llambda, self.num_workers) # internals if budget is not None and budget < 60: warnings.warn( @@ -197,7 +198,9 @@ def _internal_tell_candidate(self, candidate: p.Parameter, loss: tp.FloatLoss) - if uid not in self.population: # parent was removed, revert to tell_not_asked self._internal_tell_not_asked(candidate, loss) return - self._uid_queue.tell(uid) # only add to queue if not a "tell_not_asked" (from a removed parent) + if uid in self._uid_queue.asked: # if taken from queue in multiple asks, add back only once + self._uid_queue.asked.discard(uid) + self._uid_queue.tell(uid) parent = self.population[uid] mo_adapt = self._config.multiobjective_adaptation and self.num_objectives > 1 mo_adapt &= candidate._losses is not None # can happen with bad constraints @@ -275,8 +278,8 @@ class DifferentialEvolution(base.ConfiguredOptimizer): F2: float differential weight #2 popsize: int, "standard", "dimension", "large" - size of the population to use. "standard" is max(num_workers, 30), "dimension" max(num_workers, 30, dimension +1) - and "large" max(num_workers, 30, 7 * dimension). + size of the population to use. "standard" is 30, "dimension" max(30, dimension +1) and "large" + max(30, 7 * dimension). Set to be at least num_workers so that base vectors are distinct. multiobjective_adaptation: bool Automatically adapts to handle multiobjective case. This is a very basic **experimental** version, activated by default because the non-multiobjective implementation is performing very badly. diff --git a/nevergrad/optimization/experimentalvariants.py b/nevergrad/optimization/experimentalvariants.py index 511f0ba495..06e04b72af 100644 --- a/nevergrad/optimization/experimentalvariants.py +++ b/nevergrad/optimization/experimentalvariants.py @@ -357,6 +357,7 @@ MixDeterministicRL = ConfPortfolio(optimizers=[DiagonalCMA, PSO, GeneticDE]).set_name( "MixDeterministicRL", register=True ) + SpecialRL = Chaining([MixDeterministicRL, TBPSA], ["half"]).set_name("SpecialRL", register=True) NoisyRL1 = Chaining([MixDeterministicRL, NoisyOnePlusOne], ["half"]).set_name("NoisyRL1", register=True) NoisyRL2 = Chaining( diff --git a/nevergrad/optimization/optimizerlib.py b/nevergrad/optimization/optimizerlib.py index 55003a67fd..534e40ce5e 100644 --- a/nevergrad/optimization/optimizerlib.py +++ b/nevergrad/optimization/optimizerlib.py @@ -1596,7 +1596,7 @@ def __init__( num = len(optimizers) self.optims: tp.List[base.Optimizer] = [] sub_budget = None if budget is None else budget // num + (budget % num > 0) - sub_workers = 1 + sub_workers = num_workers if distribute_workers: sub_workers = num_workers // num + (num_workers % num > 0) for opt in optimizers: @@ -1673,7 +1673,7 @@ def enable_pickling(self) -> None: opt.enable_pickling() -ParaPortfolio = ConfPortfolio(optimizers=[CMA, TwoPointsDE, PSO, SQP, ScrHammersleySearch]).set_name( +ParaPortfolio = ConfPortfolio(optimizers=[CMA, TwoPointsDE, PSO, ScrHammersleySearch]).set_name( "ParaPortfolio", register=True ) ASCMADEthird = ConfPortfolio(optimizers=[CMA, LhsDE], warmup_ratio=0.33).set_name(