Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TM activeCells_ is a SDR #442

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,7 @@ using namespace nupic::algorithms::connections;

py_HTM.def("getActiveCells", [](const HTM_t& self)
{
auto activeCells = self.getActiveCells();

return py::array_t<nupic::UInt32>(activeCells.size(), activeCells.data());
return self.getActiveCells();
});

py_HTM.def("activateDendrites", [](HTM_t &self, bool learn) {
Expand Down
75 changes: 33 additions & 42 deletions src/nupic/algorithms/TemporalMemory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,10 @@ void TemporalMemory::initialize(
connections = Connections(static_cast<CellIdx>(numberOfColumns() * cellsPerColumn_), connectedPermanence_);
rng_ = Random(seed);

auto cellsDims = getColumnDimensions(); //nD column dimensions (eg 10x100)
cellsDims.push_back(getCellsPerColumn()); //add n+1-th dimension for cellsPerColumn (eg. 10x100x8)
activeCells_.initialize(cellsDims);

maxSegmentsPerCell_ = maxSegmentsPerCell;
maxSynapsesPerSegment_ = maxSynapsesPerSegment;
iteration_ = 0;
Expand Down Expand Up @@ -294,7 +298,7 @@ static void growSynapses(Connections &connections,
}

static void activatePredictedColumn(
vector<CellIdx> &activeCells,
SDR &activeCellsSDR,
vector<CellIdx> &winnerCells,
Connections &connections,
Random &rng,
Expand All @@ -310,6 +314,7 @@ static void activatePredictedColumn(
const SynapseIdx maxSynapsesPerSegment,
const bool learn) {
auto activeSegment = columnActiveSegmentsBegin;
auto& activeCells = activeCellsSDR.getSparse();
do {
const CellIdx cell = connections.cellForSegment(*activeSegment);
activeCells.push_back(cell);
Expand All @@ -333,6 +338,8 @@ static void activatePredictedColumn(
} while (++activeSegment != columnActiveSegmentsEnd &&
connections.cellForSegment(*activeSegment) == cell);
} while (activeSegment != columnActiveSegmentsEnd);

activeCellsSDR.setSparse(activeCells); //update SDR
}

static Segment createSegment(Connections &connections, //TODO remove, use TM::createSegment
Expand Down Expand Up @@ -361,7 +368,7 @@ static Segment createSegment(Connections &connections, //TODO remove, use TM::c
}

static void
burstColumn(vector<CellIdx> &activeCells,
burstColumn(SDR &activeCellsSDR,
vector<CellIdx> &winnerCells,
Connections &connections,
Random &rng,
Expand All @@ -381,12 +388,17 @@ burstColumn(vector<CellIdx> &activeCells,
const SegmentIdx maxSegmentsPerCell,
const SynapseIdx maxSynapsesPerSegment,
const bool learn) {

{
auto& activeCells = activeCellsSDR.getSparse();
// Calculate the active cells.
const CellIdx start = column * cellsPerColumn;
const CellIdx end = start + cellsPerColumn;
for (CellIdx cell = start; cell < end; cell++) {
activeCells.push_back(cell);
}
activeCellsSDR.setSparse(activeCells);
}

const auto bestMatchingSegment =
std::max_element(columnMatchingSegmentsBegin, columnMatchingSegmentsEnd,
Expand Down Expand Up @@ -466,10 +478,10 @@ void TemporalMemory::activateCells(const SDR &activeColumns, const bool learn) {


vector<bool> prevActiveCellsDense(numberOfCells() + extra_, false);
for (CellIdx cell : activeCells_) {
for (CellIdx cell : activeCells_.getSparse()) {
prevActiveCellsDense[cell] = true;
}
activeCells_.clear();
activeCells_.zero();

const vector<CellIdx> prevWinnerCells = std::move(winnerCells_);

Expand Down Expand Up @@ -531,12 +543,12 @@ void TemporalMemory::activateDendrites(const bool learn,
{
NTA_CHECK( extraActive.size == extra_ );
NTA_CHECK( extraWinners.size == extra_ );
NTA_CHECK( extraActive.dimensions == extraWinners.dimensions);
#ifdef NTA_ASSERTIONS_ON
SDR both(extraActive.dimensions);
both.intersection(extraActive, extraWinners);
NTA_ASSERT(both == extraWinners) << "ExtraWinners must be a subset of ExtraActive";
#endif
NTA_CHECK( extraActive.dimensions == extraWinners.dimensions);
#ifdef NTA_ASSERTIONS_ON
SDR both(extraActive.dimensions);
both.intersection(extraActive, extraWinners);
NTA_ASSERT(both == extraWinners) << "ExtraWinners must be a subset of ExtraActive";
#endif
}
else
{
Expand All @@ -548,13 +560,12 @@ void TemporalMemory::activateDendrites(const bool learn,
if( segmentsValid_ )
return;

for(const auto &active : extraActive.getSparse()) {
NTA_ASSERT( active < extra_ );
activeCells_.push_back( static_cast<CellIdx>(active + numberOfCells()) );
}
SDR dendriteInputs({ activeCells_.size + extraActive.size });
dendriteInputs.concatenate(activeCells_.flatten(), extraActive.flatten());
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh, thank you! I've totally forgot about this problem.


for(const auto &winner : extraWinners.getSparse()) {
NTA_ASSERT( winner < extra_ );
winnerCells_.push_back( static_cast<CellIdx>(winner + numberOfCells()) );
winnerCells_.push_back( static_cast<CellIdx>(winner + numberOfCells()) ); //TODO winnerCells_ also SDR
}

const size_t length = connections.segmentFlatListLength();
Expand All @@ -563,7 +574,7 @@ void TemporalMemory::activateDendrites(const bool learn,
numActivePotentialSynapsesForSegment_.assign(length, 0);
connections.computeActivity(numActiveConnectedSynapsesForSegment_,
numActivePotentialSynapsesForSegment_,
activeCells_);
dendriteInputs.getSparse());

// Active segments, connected synapses.
activeSegments_.clear();
Expand Down Expand Up @@ -619,7 +630,7 @@ void TemporalMemory::compute(const SDR &activeColumns, const bool learn) {
}

void TemporalMemory::reset(void) {
activeCells_.clear();
activeCells_.zero();
winnerCells_.clear();
activeSegments_.clear();
matchingSegments_.clear();
Expand All @@ -644,10 +655,7 @@ UInt TemporalMemory::columnForCell(const CellIdx cell) const {


SDR TemporalMemory::cellsToColumns(const SDR& cells) const {
auto correctDims = getColumnDimensions(); //nD column dimensions (eg 10x100)
correctDims.push_back(getCellsPerColumn()); //add n+1-th dimension for cellsPerColumn (eg. 10x100x8)

NTA_CHECK(cells.dimensions == correctDims)
NTA_CHECK(cells.dimensions == activeCells_.dimensions)
<< "cells.dimensions must match TM's (column dims x cellsPerColumn) ";

SDR cols(getColumnDimensions());
Expand Down Expand Up @@ -676,24 +684,16 @@ vector<CellIdx> TemporalMemory::cellsForColumn(CellIdx column) {
return cellsInColumn;
}

vector<CellIdx> TemporalMemory::getActiveCells() const { return activeCells_; }

void TemporalMemory::getActiveCells(SDR &activeCells) const
{
NTA_CHECK( activeCells.size == numberOfCells() );
activeCells.setSparse( getActiveCells() );
}
SDR TemporalMemory::getActiveCells() const { return activeCells_; }


SDR TemporalMemory::getPredictiveCells() const {

NTA_CHECK( segmentsValid_ )
<< "Call TM.activateDendrites() before TM.getPredictiveCells()!";

auto correctDims = getColumnDimensions();
correctDims.push_back(getCellsPerColumn());
SDR predictive(correctDims);

SDR predictive(activeCells_.dimensions); //match TM's dimensions, same as active cells
auto& predictiveCells = predictive.getSparse();

for (auto segment = activeSegments_.cbegin(); segment != activeSegments_.cend();
Expand Down Expand Up @@ -851,10 +851,7 @@ void TemporalMemory::save(ostream &outStream) const {
}
outStream << endl;

outStream << activeCells_.size() << " ";
for (CellIdx cell : activeCells_) {
outStream << cell << " ";
}
activeCells_.save(outStream);
outStream << endl;

outStream << winnerCells_.size() << " ";
Expand Down Expand Up @@ -934,13 +931,7 @@ void TemporalMemory::load(istream &inStream) {
inStream >> columnDimensions_[i];
}

UInt numActiveCells;
inStream >> numActiveCells;
for (UInt i = 0; i < numActiveCells; i++) {
CellIdx cell;
inStream >> cell;
activeCells_.push_back(cell);
}
activeCells_.load(inStream);

if (version < 2) {
UInt numPredictiveCells;
Expand Down
9 changes: 3 additions & 6 deletions src/nupic/algorithms/TemporalMemory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -305,12 +305,9 @@ using namespace nupic::algorithms::connections;
size_t numberOfCells(void) const { return connections.numCells(); }

/**
* Returns the indices of the active cells.
*
* @returns (std::vector<CellIdx>) Vector of indices of active cells.
* @return SDR with indices of active cells.
*/
vector<CellIdx> getActiveCells() const; //TODO remove
void getActiveCells(sdr::SDR &activeCells) const;
sdr::SDR getActiveCells() const;

/**
* @return SDR with indices of the predictive cells.
Expand Down Expand Up @@ -621,7 +618,7 @@ using namespace nupic::algorithms::connections;
SynapseIdx maxSynapsesPerSegment_;

private:
vector<CellIdx> activeCells_;
sdr::SDR activeCells_;
vector<CellIdx> winnerCells_;
bool segmentsValid_;
vector<Segment> activeSegments_;
Expand Down
12 changes: 9 additions & 3 deletions src/nupic/regions/TMRegion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,15 +239,21 @@ void TMRegion::compute() {
out = getOutput("bottomUpOut");
if (out && (out->hasOutgoingLinks() || LogItem::isDebug())) {
SDR& sdr = out->getData().getSDR();
tm_->getActiveCells(sdr); //active cells
if (args_.orColumnOutputs) { //output as columns
sdr = tm_->cellsToColumns(sdr);
SDR cols = tm_->cellsToColumns(tm_->getActiveCells());
sdr.setSparse(cols.getSparse());
} else { //output as cells
SDR cells = tm_->getActiveCells();
sdr.setSparse(cells.getSparse());
}
NTA_DEBUG << "compute " << *out << std::endl;
}
out = getOutput("activeCells");
if (out && (out->hasOutgoingLinks() || LogItem::isDebug())) {
tm_->getActiveCells(out->getData().getSDR());
SDR& sdr = out->getData().getSDR();
SDR cells = tm_->getActiveCells();
sdr.setSparse(cells.getSparse());

NTA_DEBUG << "compute " << *out << std::endl;
}
out = getOutput("predictedActiveCells");
Expand Down
3 changes: 3 additions & 0 deletions src/nupic/types/Sdr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,9 @@ namespace sdr {
SDR::setDenseInplace();
}

Reshape SparseDistributedRepresentation::flatten() const
{ return Reshape(*this, {size} ); }
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we want Reshape, or a plain SDR {size} + setSparse would do?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That would work too, but it would copy the data. The Reshape class does not copy data.


bool SparseDistributedRepresentation::operator==(const SparseDistributedRepresentation &sdr) const {
// Check attributes
if( sdr.size != size or dimensions.size() != sdr.dimensions.size() )
Expand Down
4 changes: 4 additions & 0 deletions src/nupic/types/Sdr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ using SDR_sparse_t = std::vector<ElemSparse>;
using SDR_coordinate_t = std::vector<std::vector<UInt>>;
using SDR_callback_t = std::function<void()>;

class Reshape; // Forward Declaration.

/**
* SparseDistributedRepresentation class
* Also known as "SDR" class
Expand Down Expand Up @@ -519,6 +521,8 @@ class SparseDistributedRepresentation : public Serializable
void concatenate(std::vector<const SparseDistributedRepresentation*> inputs,
UInt axis = 0u);

Reshape flatten() const;

/**
* Print a human readable version of the SDR.
*/
Expand Down
23 changes: 11 additions & 12 deletions src/test/unit/algorithms/TemporalMemoryTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ TEST(TemporalMemoryTest, ActivateCorrectlyPredictiveCells) {
ASSERT_EQ(expectedActiveCells, tm.getPredictiveCells().getSparse());
tm.compute(activeColumns, true);

EXPECT_EQ(expectedActiveCells, tm.getActiveCells());
EXPECT_EQ(expectedActiveCells, tm.getActiveCells().getSparse());
}

/**
Expand All @@ -141,7 +141,7 @@ TEST(TemporalMemoryTest, BurstUnpredictedColumns) {

tm.compute(activeColumns, true);

EXPECT_EQ(burstingCells, tm.getActiveCells());
EXPECT_EQ(burstingCells, tm.getActiveCells().getSparse());
}

/**
Expand Down Expand Up @@ -176,15 +176,14 @@ TEST(TemporalMemoryTest, ZeroActiveColumns) {
tm.connections.createSynapse(segment, previousActiveCells[2], 0.5f);
tm.connections.createSynapse(segment, previousActiveCells[3], 0.5f);
tm.compute(previousActiveColumns, true);
ASSERT_FALSE(tm.getActiveCells().empty());
ASSERT_FALSE(tm.getActiveCells().getSum() == 0);
ASSERT_FALSE(tm.getWinnerCells().empty());
tm.activateDendrites();
ASSERT_FALSE(tm.getPredictiveCells().getSum() == 0);

SDR empty({32});
empty.setSparse(SDR_sparse_t{});
const SDR empty({32});
EXPECT_NO_THROW(tm.compute(empty, true)) << "failed with empty compute";
EXPECT_TRUE(tm.getActiveCells().empty());
EXPECT_TRUE(tm.getActiveCells().getSparse().empty());
EXPECT_TRUE(tm.getWinnerCells().empty());
tm.activateDendrites();
EXPECT_TRUE(tm.getPredictiveCells().getSum() == 0);
Expand Down Expand Up @@ -1087,7 +1086,7 @@ TEST(TemporalMemoryTest, AddSegmentToCellWithFewestSegments) {
tm.compute(previousActiveColumns, true);
tm.compute(activeColumns, true);

ASSERT_EQ(activeCells, tm.getActiveCells());
ASSERT_EQ(activeCells, tm.getActiveCells().getSparse());

EXPECT_EQ(3ul, tm.connections.numSegments());
EXPECT_EQ(1ul, tm.connections.segmentsForCell(0).size());
Expand Down Expand Up @@ -1483,7 +1482,7 @@ void serializationTestVerify(TemporalMemory &tm) {

// Verify the correct cells were activated.
EXPECT_EQ((vector<UInt>{4, 8, 9, 10, 11, 12, 13, 14, 15}),
tm.getActiveCells());
tm.getActiveCells().getSparse());
const vector<UInt> winnerCells = tm.getWinnerCells();
ASSERT_EQ(3ul, winnerCells.size());
EXPECT_EQ(4ul, winnerCells[0]);
Expand Down Expand Up @@ -1644,8 +1643,8 @@ TEST(TemporalMemoryTest, testExtraActive) {
/* extra */ (UInt)(columns.size * 12u));
auto tm_dimensions = tm.getColumnDimensions();
tm_dimensions.push_back( tm.getCellsPerColumn() );
SDR extraActive( tm_dimensions );
SDR extraWinners( tm_dimensions );
SDR extraActive( tm_dimensions);
SDR extraWinners(tm_dimensions);

// Look at the pattern.
for(UInt trial = 0; trial < 20; trial++) {
Expand All @@ -1654,8 +1653,8 @@ TEST(TemporalMemoryTest, testExtraActive) {
// Calculate TM output
tm.compute(x, true, extraActive, extraWinners);
// update the external 'hints' for the next iteration
tm.getActiveCells( extraActive );
tm.getWinnerCells( extraWinners );
extraActive = tm.getActiveCells();
tm.getWinnerCells(extraWinners);
}
if( trial >= 19 ) {
ASSERT_LT( tm.anomaly, 0.05f );
Expand Down