convolveND.h
Go to the documentation of this file.00001
00013 #ifndef DLRNUMERIC_CONVOLVEND_H
00014 #define DLRNUMERIC_CONVOLVEND_H
00015
00016 #include <iostream>
00017 #include <string>
00018 #include <dlrCommon/exception.h>
00019 #include <dlrNumeric/arrayND.h>
00020 #include <dlrNumeric/convolutionStrategy.h>
00021
00022
00023 namespace dlr {
00024
00025 namespace numeric {
00026
00027
00029 template <class OutputType, class AccumulatorType,
00030 class KernelType, class SignalType, size_t Dimension>
00031 ArrayND<Dimension, OutputType>
00032 convolve(const Array1D<KernelType>& kernel,
00033 const ArrayND<Dimension, SignalType>& signal,
00034 size_t axis,
00035 ConvolutionStrategy strategy = DLR_CONVOLVE_PAD_RESULT,
00036 ConvolutionROI roi = DLR_CONVOLVE_ROI_SAME);
00037
00038
00039 }
00040
00041 }
00042
00043
00044
00045
00046
00047
00048 namespace dlr {
00049
00050 namespace numeric {
00051
00052 namespace privateCode {
00053
00054 inline bool
00055 convolveNDUpdatePosition(Array1D<size_t>& position,
00056 Array1D<size_t> const& lowerBound,
00057 Array1D<size_t> const& upperBound)
00058 {
00059 for(size_t ii = position.size() - 1; ii < position.size(); --ii) {
00060 if(++(position[ii]) < upperBound[ii]) {
00061 return true;
00062 }
00063 position[ii] = lowerBound[ii];
00064 }
00065 return false;
00066 }
00067
00068 }
00069
00070
00072 template <class OutputType, class AccumulatorType,
00073 class KernelType, class SignalType, size_t Dimension>
00074 ArrayND<Dimension, OutputType>
00075 convolve(const Array1D<KernelType>& kernel,
00076 const ArrayND<Dimension, SignalType>& signal,
00077 size_t axis,
00078 ConvolutionStrategy strategy = DLR_CONVOLVE_PAD_RESULT,
00079 ConvolutionROI roi = DLR_CONVOLVE_ROI_SAME)
00080 {
00081 Array1D<AccumulatorType> reversedKernel(kernel.size());
00082 for(size_t ii = 0; ii < kernel.size(); ++ii) {
00083 reversedKernel[kernel.size() - ii - 1] =
00084 static_cast<AccumulatorType>(kernel[ii]);
00085 }
00086
00087 ArrayND<Dimension, OutputType> result(signal.getShape());
00088 result = OutputType(0);
00089
00090 if(strategy != DLR_CONVOLVE_PAD_RESULT) {
00091 DLR_THROW(NotImplementedException, "convolve()",
00092 "Only ConvolutionStrategy DLR_CONVOLVE_PAD_RESULT is "
00093 "currently implemented.");
00094 }
00095 if(roi != DLR_CONVOLVE_ROI_SAME) {
00096 DLR_THROW(NotImplementedException, "convolve()",
00097 "Only ConvolutionROI DLR_CONVOLVE_ROI_SAME is "
00098 "currently implemented.");
00099 }
00100
00101
00102
00103
00104
00105
00106
00107 size_t stride = signal.getStride(axis);
00108 size_t resultOffset = (reversedKernel.size() / 2) * stride;
00109
00110
00111
00112
00113 Array1D<size_t> elementPosition(Dimension);
00114 elementPosition = 0;
00115
00116
00117
00118 Array1D<size_t> positionLowerBound = elementPosition.copy();
00119 Array1D<size_t> positionUpperBound = signal.getShape().copy();
00120 positionUpperBound[axis] -= (reversedKernel.size() - 1);
00121
00122 do {
00123 size_t signalIndex = signal.flattenIndex(elementPosition);
00124 size_t resultIndex = signalIndex + resultOffset;
00125
00126 AccumulatorType accumulator = AccumulatorType(0);
00127 for(size_t ii = 0; ii < reversedKernel.size(); ++ii) {
00128 accumulator += (reversedKernel[ii]
00129 * static_cast<AccumulatorType>(signal(signalIndex)));
00130 signalIndex += stride;
00131 }
00132 result(resultIndex) = static_cast<OutputType>(accumulator);
00133 } while(privateCode::convolveNDUpdatePosition(
00134 elementPosition, positionLowerBound, positionUpperBound));
00135 return result;
00136 }
00137
00138
00139 }
00140
00141 }
00142
00143 #endif